You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

454 lines
14KB

  1. /*
  2. * Cinepak Video Decoder
  3. * Copyright (C) 2003 the ffmpeg project
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. *
  21. */
  22. /**
  23. * @file cinepak.c
  24. * Cinepak video decoder
  25. * by Ewald Snel <ewald@rambo.its.tudelft.nl>
  26. * For more information on the Cinepak algorithm, visit:
  27. * http://www.csse.monash.edu.au/~timf/
  28. */
  29. #include <stdio.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include <unistd.h>
  33. #include "common.h"
  34. #include "avcodec.h"
  35. #include "dsputil.h"
  36. typedef struct {
  37. uint8_t y0, y1, y2, y3;
  38. uint8_t u, v;
  39. } cvid_codebook_t;
  40. #define MAX_STRIPS 32
  41. typedef struct {
  42. uint16_t id;
  43. uint16_t x1, y1;
  44. uint16_t x2, y2;
  45. cvid_codebook_t v4_codebook[256];
  46. cvid_codebook_t v1_codebook[256];
  47. } cvid_strip_t;
  48. typedef struct CinepakContext {
  49. AVCodecContext *avctx;
  50. DSPContext dsp;
  51. AVFrame frame;
  52. unsigned char *data;
  53. int size;
  54. int width, height;
  55. int palette_video;
  56. cvid_strip_t strips[MAX_STRIPS];
  57. } CinepakContext;
  58. static void cinepak_decode_codebook (cvid_codebook_t *codebook,
  59. int chunk_id, int size, uint8_t *data)
  60. {
  61. uint8_t *eod = (data + size);
  62. uint32_t flag, mask;
  63. int i, n;
  64. /* check if this chunk contains 4- or 6-element vectors */
  65. n = (chunk_id & 0x0400) ? 4 : 6;
  66. flag = 0;
  67. mask = 0;
  68. for (i=0; i < 256; i++) {
  69. if ((chunk_id & 0x0100) && !(mask >>= 1)) {
  70. if ((data + 4) > eod)
  71. break;
  72. flag = BE_32 (data);
  73. data += 4;
  74. mask = 0x80000000;
  75. }
  76. if (!(chunk_id & 0x0100) || (flag & mask)) {
  77. if ((data + n) > eod)
  78. break;
  79. if (n == 6) {
  80. codebook[i].y0 = *data++;
  81. codebook[i].y1 = *data++;
  82. codebook[i].y2 = *data++;
  83. codebook[i].y3 = *data++;
  84. codebook[i].u = 128 + *data++;
  85. codebook[i].v = 128 + *data++;
  86. } else {
  87. /* this codebook type indicates either greyscale or
  88. * palettized video; if palettized, U & V components will
  89. * not be used so it is safe to set them to 128 for the
  90. * benefit of greyscale rendering in YUV420P */
  91. codebook[i].y0 = *data++;
  92. codebook[i].y1 = *data++;
  93. codebook[i].y2 = *data++;
  94. codebook[i].y3 = *data++;
  95. codebook[i].u = 128;
  96. codebook[i].v = 128;
  97. }
  98. }
  99. }
  100. }
  101. static int cinepak_decode_vectors (CinepakContext *s, cvid_strip_t *strip,
  102. int chunk_id, int size, uint8_t *data)
  103. {
  104. uint8_t *eod = (data + size);
  105. uint32_t flag, mask;
  106. cvid_codebook_t *codebook;
  107. unsigned int x, y;
  108. uint32_t iy[4];
  109. uint32_t iu[2];
  110. uint32_t iv[2];
  111. flag = 0;
  112. mask = 0;
  113. for (y=strip->y1; y < strip->y2; y+=4) {
  114. iy[0] = strip->x1 + (y * s->frame.linesize[0]);
  115. iy[1] = iy[0] + s->frame.linesize[0];
  116. iy[2] = iy[1] + s->frame.linesize[0];
  117. iy[3] = iy[2] + s->frame.linesize[0];
  118. iu[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[1]);
  119. iu[1] = iu[0] + s->frame.linesize[1];
  120. iv[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[2]);
  121. iv[1] = iv[0] + s->frame.linesize[2];
  122. for (x=strip->x1; x < strip->x2; x+=4) {
  123. if ((chunk_id & 0x0100) && !(mask >>= 1)) {
  124. if ((data + 4) > eod)
  125. return -1;
  126. flag = BE_32 (data);
  127. data += 4;
  128. mask = 0x80000000;
  129. }
  130. if (!(chunk_id & 0x0100) || (flag & mask)) {
  131. if (!(chunk_id & 0x0200) && !(mask >>= 1)) {
  132. if ((data + 4) > eod)
  133. return -1;
  134. flag = BE_32 (data);
  135. data += 4;
  136. mask = 0x80000000;
  137. }
  138. if ((chunk_id & 0x0200) || (~flag & mask)) {
  139. if (data >= eod)
  140. return -1;
  141. codebook = &strip->v1_codebook[*data++];
  142. s->frame.data[0][iy[0] + 0] = codebook->y0;
  143. s->frame.data[0][iy[0] + 1] = codebook->y0;
  144. s->frame.data[0][iy[1] + 0] = codebook->y0;
  145. s->frame.data[0][iy[1] + 1] = codebook->y0;
  146. if (!s->palette_video) {
  147. s->frame.data[1][iu[0]] = codebook->u;
  148. s->frame.data[2][iv[0]] = codebook->v;
  149. }
  150. s->frame.data[0][iy[0] + 2] = codebook->y1;
  151. s->frame.data[0][iy[0] + 3] = codebook->y1;
  152. s->frame.data[0][iy[1] + 2] = codebook->y1;
  153. s->frame.data[0][iy[1] + 3] = codebook->y1;
  154. if (!s->palette_video) {
  155. s->frame.data[1][iu[0] + 1] = codebook->u;
  156. s->frame.data[2][iv[0] + 1] = codebook->v;
  157. }
  158. s->frame.data[0][iy[2] + 0] = codebook->y2;
  159. s->frame.data[0][iy[2] + 1] = codebook->y2;
  160. s->frame.data[0][iy[3] + 0] = codebook->y2;
  161. s->frame.data[0][iy[3] + 1] = codebook->y2;
  162. if (!s->palette_video) {
  163. s->frame.data[1][iu[1]] = codebook->u;
  164. s->frame.data[2][iv[1]] = codebook->v;
  165. }
  166. s->frame.data[0][iy[2] + 2] = codebook->y3;
  167. s->frame.data[0][iy[2] + 3] = codebook->y3;
  168. s->frame.data[0][iy[3] + 2] = codebook->y3;
  169. s->frame.data[0][iy[3] + 3] = codebook->y3;
  170. if (!s->palette_video) {
  171. s->frame.data[1][iu[1] + 1] = codebook->u;
  172. s->frame.data[2][iv[1] + 1] = codebook->v;
  173. }
  174. } else if (flag & mask) {
  175. if ((data + 4) > eod)
  176. return -1;
  177. codebook = &strip->v4_codebook[*data++];
  178. s->frame.data[0][iy[0] + 0] = codebook->y0;
  179. s->frame.data[0][iy[0] + 1] = codebook->y1;
  180. s->frame.data[0][iy[1] + 0] = codebook->y2;
  181. s->frame.data[0][iy[1] + 1] = codebook->y3;
  182. if (!s->palette_video) {
  183. s->frame.data[1][iu[0]] = codebook->u;
  184. s->frame.data[2][iv[0]] = codebook->v;
  185. }
  186. codebook = &strip->v4_codebook[*data++];
  187. s->frame.data[0][iy[0] + 2] = codebook->y0;
  188. s->frame.data[0][iy[0] + 3] = codebook->y1;
  189. s->frame.data[0][iy[1] + 2] = codebook->y2;
  190. s->frame.data[0][iy[1] + 3] = codebook->y3;
  191. if (!s->palette_video) {
  192. s->frame.data[1][iu[0] + 1] = codebook->u;
  193. s->frame.data[2][iv[0] + 1] = codebook->v;
  194. }
  195. codebook = &strip->v4_codebook[*data++];
  196. s->frame.data[0][iy[2] + 0] = codebook->y0;
  197. s->frame.data[0][iy[2] + 1] = codebook->y1;
  198. s->frame.data[0][iy[3] + 0] = codebook->y2;
  199. s->frame.data[0][iy[3] + 1] = codebook->y3;
  200. if (!s->palette_video) {
  201. s->frame.data[1][iu[1]] = codebook->u;
  202. s->frame.data[2][iv[1]] = codebook->v;
  203. }
  204. codebook = &strip->v4_codebook[*data++];
  205. s->frame.data[0][iy[2] + 2] = codebook->y0;
  206. s->frame.data[0][iy[2] + 3] = codebook->y1;
  207. s->frame.data[0][iy[3] + 2] = codebook->y2;
  208. s->frame.data[0][iy[3] + 3] = codebook->y3;
  209. if (!s->palette_video) {
  210. s->frame.data[1][iu[1] + 1] = codebook->u;
  211. s->frame.data[2][iv[1] + 1] = codebook->v;
  212. }
  213. }
  214. }
  215. iy[0] += 4; iy[1] += 4;
  216. iy[2] += 4; iy[3] += 4;
  217. iu[0] += 2; iu[1] += 2;
  218. iv[0] += 2; iv[1] += 2;
  219. }
  220. }
  221. return 0;
  222. }
  223. static int cinepak_decode_strip (CinepakContext *s,
  224. cvid_strip_t *strip, uint8_t *data, int size)
  225. {
  226. uint8_t *eod = (data + size);
  227. int chunk_id, chunk_size;
  228. /* coordinate sanity checks */
  229. if (strip->x1 >= s->width || strip->x2 > s->width ||
  230. strip->y1 >= s->height || strip->y2 > s->height ||
  231. strip->x1 >= strip->x2 || strip->y1 >= strip->y2)
  232. return -1;
  233. while ((data + 4) <= eod) {
  234. chunk_id = BE_16 (&data[0]);
  235. chunk_size = BE_16 (&data[2]) - 4;
  236. if(chunk_size < 0)
  237. return -1;
  238. data += 4;
  239. chunk_size = ((data + chunk_size) > eod) ? (eod - data) : chunk_size;
  240. switch (chunk_id) {
  241. case 0x2000:
  242. case 0x2100:
  243. case 0x2400:
  244. case 0x2500:
  245. cinepak_decode_codebook (strip->v4_codebook, chunk_id,
  246. chunk_size, data);
  247. break;
  248. case 0x2200:
  249. case 0x2300:
  250. case 0x2600:
  251. case 0x2700:
  252. cinepak_decode_codebook (strip->v1_codebook, chunk_id,
  253. chunk_size, data);
  254. break;
  255. case 0x3000:
  256. case 0x3100:
  257. case 0x3200:
  258. return cinepak_decode_vectors (s, strip, chunk_id,
  259. chunk_size, data);
  260. }
  261. data += chunk_size;
  262. }
  263. return -1;
  264. }
  265. static int cinepak_decode (CinepakContext *s)
  266. {
  267. uint8_t *eod = (s->data + s->size);
  268. int i, result, strip_size, frame_flags, num_strips;
  269. int y0 = 0;
  270. int encoded_buf_size;
  271. /* if true, Cinepak data is from a Sega FILM/CPK file */
  272. int sega_film_data = 0;
  273. if (s->size < 10)
  274. return -1;
  275. frame_flags = s->data[0];
  276. num_strips = BE_16 (&s->data[8]);
  277. encoded_buf_size = ((s->data[1] << 16) | BE_16 (&s->data[2]));
  278. if (encoded_buf_size != s->size)
  279. sega_film_data = 1;
  280. if (sega_film_data)
  281. s->data += 12;
  282. else
  283. s->data += 10;
  284. if (num_strips > MAX_STRIPS)
  285. num_strips = MAX_STRIPS;
  286. for (i=0; i < num_strips; i++) {
  287. if ((s->data + 12) > eod)
  288. return -1;
  289. s->strips[i].id = BE_16 (s->data);
  290. s->strips[i].y1 = y0;
  291. s->strips[i].x1 = 0;
  292. s->strips[i].y2 = y0 + BE_16 (&s->data[8]);
  293. s->strips[i].x2 = s->avctx->width;
  294. strip_size = BE_16 (&s->data[2]) - 12;
  295. s->data += 12;
  296. strip_size = ((s->data + strip_size) > eod) ? (eod - s->data) : strip_size;
  297. if ((i > 0) && !(frame_flags & 0x01)) {
  298. memcpy (s->strips[i].v4_codebook, s->strips[i-1].v4_codebook,
  299. sizeof(s->strips[i].v4_codebook));
  300. memcpy (s->strips[i].v1_codebook, s->strips[i-1].v1_codebook,
  301. sizeof(s->strips[i].v1_codebook));
  302. }
  303. result = cinepak_decode_strip (s, &s->strips[i], s->data, strip_size);
  304. if (result != 0)
  305. return result;
  306. s->data += strip_size;
  307. y0 = s->strips[i].y2;
  308. }
  309. return 0;
  310. }
  311. static int cinepak_decode_init(AVCodecContext *avctx)
  312. {
  313. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  314. s->avctx = avctx;
  315. s->width = (avctx->width + 3) & ~3;
  316. s->height = (avctx->height + 3) & ~3;
  317. // check for paletted data
  318. if ((avctx->palctrl == NULL) || (avctx->bits_per_sample == 40)) {
  319. s->palette_video = 0;
  320. avctx->pix_fmt = PIX_FMT_YUV420P;
  321. } else {
  322. s->palette_video = 1;
  323. avctx->pix_fmt = PIX_FMT_PAL8;
  324. }
  325. avctx->has_b_frames = 0;
  326. dsputil_init(&s->dsp, avctx);
  327. s->frame.data[0] = NULL;
  328. return 0;
  329. }
  330. static int cinepak_decode_frame(AVCodecContext *avctx,
  331. void *data, int *data_size,
  332. uint8_t *buf, int buf_size)
  333. {
  334. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  335. s->data = buf;
  336. s->size = buf_size;
  337. s->frame.reference = 1;
  338. s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
  339. FF_BUFFER_HINTS_REUSABLE;
  340. if (avctx->reget_buffer(avctx, &s->frame)) {
  341. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  342. return -1;
  343. }
  344. cinepak_decode(s);
  345. if (s->palette_video) {
  346. memcpy (s->frame.data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
  347. if (avctx->palctrl->palette_changed) {
  348. s->frame.palette_has_changed = 1;
  349. avctx->palctrl->palette_changed = 0;
  350. } else
  351. s->frame.palette_has_changed = 0;
  352. }
  353. *data_size = sizeof(AVFrame);
  354. *(AVFrame*)data = s->frame;
  355. /* report that the buffer was completely consumed */
  356. return buf_size;
  357. }
  358. static int cinepak_decode_end(AVCodecContext *avctx)
  359. {
  360. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  361. if (s->frame.data[0])
  362. avctx->release_buffer(avctx, &s->frame);
  363. return 0;
  364. }
  365. AVCodec cinepak_decoder = {
  366. "cinepak",
  367. CODEC_TYPE_VIDEO,
  368. CODEC_ID_CINEPAK,
  369. sizeof(CinepakContext),
  370. cinepak_decode_init,
  371. NULL,
  372. cinepak_decode_end,
  373. cinepak_decode_frame,
  374. CODEC_CAP_DR1,
  375. };