You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

452 lines
14KB

  1. /*
  2. * Cinepak Video Decoder
  3. * Copyright (C) 2003 the ffmpeg project
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. */
  20. /**
  21. * @file cinepak.c
  22. * Cinepak video decoder
  23. * by Ewald Snel <ewald@rambo.its.tudelft.nl>
  24. * For more information on the Cinepak algorithm, visit:
  25. * http://www.csse.monash.edu.au/~timf/
  26. */
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <string.h>
  30. #include <unistd.h>
  31. #include "common.h"
  32. #include "avcodec.h"
  33. #include "dsputil.h"
  34. typedef struct {
  35. uint8_t y0, y1, y2, y3;
  36. uint8_t u, v;
  37. } cvid_codebook_t;
  38. #define MAX_STRIPS 32
  39. typedef struct {
  40. uint16_t id;
  41. uint16_t x1, y1;
  42. uint16_t x2, y2;
  43. cvid_codebook_t v4_codebook[256];
  44. cvid_codebook_t v1_codebook[256];
  45. } cvid_strip_t;
  46. typedef struct CinepakContext {
  47. AVCodecContext *avctx;
  48. DSPContext dsp;
  49. AVFrame frame;
  50. unsigned char *data;
  51. int size;
  52. int width, height;
  53. int palette_video;
  54. cvid_strip_t strips[MAX_STRIPS];
  55. } CinepakContext;
  56. static void cinepak_decode_codebook (cvid_codebook_t *codebook,
  57. int chunk_id, int size, uint8_t *data)
  58. {
  59. uint8_t *eod = (data + size);
  60. uint32_t flag, mask;
  61. int i, n;
  62. /* check if this chunk contains 4- or 6-element vectors */
  63. n = (chunk_id & 0x0400) ? 4 : 6;
  64. flag = 0;
  65. mask = 0;
  66. for (i=0; i < 256; i++) {
  67. if ((chunk_id & 0x0100) && !(mask >>= 1)) {
  68. if ((data + 4) > eod)
  69. break;
  70. flag = BE_32 (data);
  71. data += 4;
  72. mask = 0x80000000;
  73. }
  74. if (!(chunk_id & 0x0100) || (flag & mask)) {
  75. if ((data + n) > eod)
  76. break;
  77. if (n == 6) {
  78. codebook[i].y0 = *data++;
  79. codebook[i].y1 = *data++;
  80. codebook[i].y2 = *data++;
  81. codebook[i].y3 = *data++;
  82. codebook[i].u = 128 + *data++;
  83. codebook[i].v = 128 + *data++;
  84. } else {
  85. /* this codebook type indicates either greyscale or
  86. * palettized video; if palettized, U & V components will
  87. * not be used so it is safe to set them to 128 for the
  88. * benefit of greyscale rendering in YUV420P */
  89. codebook[i].y0 = *data++;
  90. codebook[i].y1 = *data++;
  91. codebook[i].y2 = *data++;
  92. codebook[i].y3 = *data++;
  93. codebook[i].u = 128;
  94. codebook[i].v = 128;
  95. }
  96. }
  97. }
  98. }
  99. static int cinepak_decode_vectors (CinepakContext *s, cvid_strip_t *strip,
  100. int chunk_id, int size, uint8_t *data)
  101. {
  102. uint8_t *eod = (data + size);
  103. uint32_t flag, mask;
  104. cvid_codebook_t *codebook;
  105. unsigned int x, y;
  106. uint32_t iy[4];
  107. uint32_t iu[2];
  108. uint32_t iv[2];
  109. flag = 0;
  110. mask = 0;
  111. for (y=strip->y1; y < strip->y2; y+=4) {
  112. iy[0] = strip->x1 + (y * s->frame.linesize[0]);
  113. iy[1] = iy[0] + s->frame.linesize[0];
  114. iy[2] = iy[1] + s->frame.linesize[0];
  115. iy[3] = iy[2] + s->frame.linesize[0];
  116. iu[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[1]);
  117. iu[1] = iu[0] + s->frame.linesize[1];
  118. iv[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[2]);
  119. iv[1] = iv[0] + s->frame.linesize[2];
  120. for (x=strip->x1; x < strip->x2; x+=4) {
  121. if ((chunk_id & 0x0100) && !(mask >>= 1)) {
  122. if ((data + 4) > eod)
  123. return -1;
  124. flag = BE_32 (data);
  125. data += 4;
  126. mask = 0x80000000;
  127. }
  128. if (!(chunk_id & 0x0100) || (flag & mask)) {
  129. if (!(chunk_id & 0x0200) && !(mask >>= 1)) {
  130. if ((data + 4) > eod)
  131. return -1;
  132. flag = BE_32 (data);
  133. data += 4;
  134. mask = 0x80000000;
  135. }
  136. if ((chunk_id & 0x0200) || (~flag & mask)) {
  137. if (data >= eod)
  138. return -1;
  139. codebook = &strip->v1_codebook[*data++];
  140. s->frame.data[0][iy[0] + 0] = codebook->y0;
  141. s->frame.data[0][iy[0] + 1] = codebook->y0;
  142. s->frame.data[0][iy[1] + 0] = codebook->y0;
  143. s->frame.data[0][iy[1] + 1] = codebook->y0;
  144. if (!s->palette_video) {
  145. s->frame.data[1][iu[0]] = codebook->u;
  146. s->frame.data[2][iv[0]] = codebook->v;
  147. }
  148. s->frame.data[0][iy[0] + 2] = codebook->y1;
  149. s->frame.data[0][iy[0] + 3] = codebook->y1;
  150. s->frame.data[0][iy[1] + 2] = codebook->y1;
  151. s->frame.data[0][iy[1] + 3] = codebook->y1;
  152. if (!s->palette_video) {
  153. s->frame.data[1][iu[0] + 1] = codebook->u;
  154. s->frame.data[2][iv[0] + 1] = codebook->v;
  155. }
  156. s->frame.data[0][iy[2] + 0] = codebook->y2;
  157. s->frame.data[0][iy[2] + 1] = codebook->y2;
  158. s->frame.data[0][iy[3] + 0] = codebook->y2;
  159. s->frame.data[0][iy[3] + 1] = codebook->y2;
  160. if (!s->palette_video) {
  161. s->frame.data[1][iu[1]] = codebook->u;
  162. s->frame.data[2][iv[1]] = codebook->v;
  163. }
  164. s->frame.data[0][iy[2] + 2] = codebook->y3;
  165. s->frame.data[0][iy[2] + 3] = codebook->y3;
  166. s->frame.data[0][iy[3] + 2] = codebook->y3;
  167. s->frame.data[0][iy[3] + 3] = codebook->y3;
  168. if (!s->palette_video) {
  169. s->frame.data[1][iu[1] + 1] = codebook->u;
  170. s->frame.data[2][iv[1] + 1] = codebook->v;
  171. }
  172. } else if (flag & mask) {
  173. if ((data + 4) > eod)
  174. return -1;
  175. codebook = &strip->v4_codebook[*data++];
  176. s->frame.data[0][iy[0] + 0] = codebook->y0;
  177. s->frame.data[0][iy[0] + 1] = codebook->y1;
  178. s->frame.data[0][iy[1] + 0] = codebook->y2;
  179. s->frame.data[0][iy[1] + 1] = codebook->y3;
  180. if (!s->palette_video) {
  181. s->frame.data[1][iu[0]] = codebook->u;
  182. s->frame.data[2][iv[0]] = codebook->v;
  183. }
  184. codebook = &strip->v4_codebook[*data++];
  185. s->frame.data[0][iy[0] + 2] = codebook->y0;
  186. s->frame.data[0][iy[0] + 3] = codebook->y1;
  187. s->frame.data[0][iy[1] + 2] = codebook->y2;
  188. s->frame.data[0][iy[1] + 3] = codebook->y3;
  189. if (!s->palette_video) {
  190. s->frame.data[1][iu[0] + 1] = codebook->u;
  191. s->frame.data[2][iv[0] + 1] = codebook->v;
  192. }
  193. codebook = &strip->v4_codebook[*data++];
  194. s->frame.data[0][iy[2] + 0] = codebook->y0;
  195. s->frame.data[0][iy[2] + 1] = codebook->y1;
  196. s->frame.data[0][iy[3] + 0] = codebook->y2;
  197. s->frame.data[0][iy[3] + 1] = codebook->y3;
  198. if (!s->palette_video) {
  199. s->frame.data[1][iu[1]] = codebook->u;
  200. s->frame.data[2][iv[1]] = codebook->v;
  201. }
  202. codebook = &strip->v4_codebook[*data++];
  203. s->frame.data[0][iy[2] + 2] = codebook->y0;
  204. s->frame.data[0][iy[2] + 3] = codebook->y1;
  205. s->frame.data[0][iy[3] + 2] = codebook->y2;
  206. s->frame.data[0][iy[3] + 3] = codebook->y3;
  207. if (!s->palette_video) {
  208. s->frame.data[1][iu[1] + 1] = codebook->u;
  209. s->frame.data[2][iv[1] + 1] = codebook->v;
  210. }
  211. }
  212. }
  213. iy[0] += 4; iy[1] += 4;
  214. iy[2] += 4; iy[3] += 4;
  215. iu[0] += 2; iu[1] += 2;
  216. iv[0] += 2; iv[1] += 2;
  217. }
  218. }
  219. return 0;
  220. }
  221. static int cinepak_decode_strip (CinepakContext *s,
  222. cvid_strip_t *strip, uint8_t *data, int size)
  223. {
  224. uint8_t *eod = (data + size);
  225. int chunk_id, chunk_size;
  226. /* coordinate sanity checks */
  227. if (strip->x1 >= s->width || strip->x2 > s->width ||
  228. strip->y1 >= s->height || strip->y2 > s->height ||
  229. strip->x1 >= strip->x2 || strip->y1 >= strip->y2)
  230. return -1;
  231. while ((data + 4) <= eod) {
  232. chunk_id = BE_16 (&data[0]);
  233. chunk_size = BE_16 (&data[2]) - 4;
  234. if(chunk_size < 0)
  235. return -1;
  236. data += 4;
  237. chunk_size = ((data + chunk_size) > eod) ? (eod - data) : chunk_size;
  238. switch (chunk_id) {
  239. case 0x2000:
  240. case 0x2100:
  241. case 0x2400:
  242. case 0x2500:
  243. cinepak_decode_codebook (strip->v4_codebook, chunk_id,
  244. chunk_size, data);
  245. break;
  246. case 0x2200:
  247. case 0x2300:
  248. case 0x2600:
  249. case 0x2700:
  250. cinepak_decode_codebook (strip->v1_codebook, chunk_id,
  251. chunk_size, data);
  252. break;
  253. case 0x3000:
  254. case 0x3100:
  255. case 0x3200:
  256. return cinepak_decode_vectors (s, strip, chunk_id,
  257. chunk_size, data);
  258. }
  259. data += chunk_size;
  260. }
  261. return -1;
  262. }
  263. static int cinepak_decode (CinepakContext *s)
  264. {
  265. uint8_t *eod = (s->data + s->size);
  266. int i, result, strip_size, frame_flags, num_strips;
  267. int y0 = 0;
  268. int encoded_buf_size;
  269. /* if true, Cinepak data is from a Sega FILM/CPK file */
  270. int sega_film_data = 0;
  271. if (s->size < 10)
  272. return -1;
  273. frame_flags = s->data[0];
  274. num_strips = BE_16 (&s->data[8]);
  275. encoded_buf_size = BE_16 (&s->data[2]);
  276. if (encoded_buf_size != s->size)
  277. sega_film_data = 1;
  278. if (sega_film_data)
  279. s->data += 12;
  280. else
  281. s->data += 10;
  282. if (num_strips > MAX_STRIPS)
  283. num_strips = MAX_STRIPS;
  284. for (i=0; i < num_strips; i++) {
  285. if ((s->data + 12) > eod)
  286. return -1;
  287. s->strips[i].id = BE_16 (s->data);
  288. s->strips[i].y1 = y0;
  289. s->strips[i].x1 = 0;
  290. s->strips[i].y2 = y0 + BE_16 (&s->data[8]);
  291. s->strips[i].x2 = s->avctx->width;
  292. strip_size = BE_16 (&s->data[2]) - 12;
  293. s->data += 12;
  294. strip_size = ((s->data + strip_size) > eod) ? (eod - s->data) : strip_size;
  295. if ((i > 0) && !(frame_flags & 0x01)) {
  296. memcpy (s->strips[i].v4_codebook, s->strips[i-1].v4_codebook,
  297. sizeof(s->strips[i].v4_codebook));
  298. memcpy (s->strips[i].v1_codebook, s->strips[i-1].v1_codebook,
  299. sizeof(s->strips[i].v1_codebook));
  300. }
  301. result = cinepak_decode_strip (s, &s->strips[i], s->data, strip_size);
  302. if (result != 0)
  303. return result;
  304. s->data += strip_size;
  305. y0 = s->strips[i].y2;
  306. }
  307. return 0;
  308. }
  309. static int cinepak_decode_init(AVCodecContext *avctx)
  310. {
  311. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  312. s->avctx = avctx;
  313. s->width = (avctx->width + 3) & ~3;
  314. s->height = (avctx->height + 3) & ~3;
  315. // check for paletted data
  316. if ((avctx->palctrl == NULL) || (avctx->bits_per_sample == 40)) {
  317. s->palette_video = 0;
  318. avctx->pix_fmt = PIX_FMT_YUV420P;
  319. } else {
  320. s->palette_video = 1;
  321. avctx->pix_fmt = PIX_FMT_PAL8;
  322. }
  323. avctx->has_b_frames = 0;
  324. dsputil_init(&s->dsp, avctx);
  325. s->frame.data[0] = NULL;
  326. return 0;
  327. }
  328. static int cinepak_decode_frame(AVCodecContext *avctx,
  329. void *data, int *data_size,
  330. uint8_t *buf, int buf_size)
  331. {
  332. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  333. s->data = buf;
  334. s->size = buf_size;
  335. s->frame.reference = 1;
  336. s->frame.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
  337. FF_BUFFER_HINTS_REUSABLE;
  338. if (avctx->reget_buffer(avctx, &s->frame)) {
  339. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  340. return -1;
  341. }
  342. cinepak_decode(s);
  343. if (s->palette_video) {
  344. memcpy (s->frame.data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
  345. if (avctx->palctrl->palette_changed) {
  346. s->frame.palette_has_changed = 1;
  347. avctx->palctrl->palette_changed = 0;
  348. } else
  349. s->frame.palette_has_changed = 0;
  350. }
  351. *data_size = sizeof(AVFrame);
  352. *(AVFrame*)data = s->frame;
  353. /* report that the buffer was completely consumed */
  354. return buf_size;
  355. }
  356. static int cinepak_decode_end(AVCodecContext *avctx)
  357. {
  358. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  359. if (s->frame.data[0])
  360. avctx->release_buffer(avctx, &s->frame);
  361. return 0;
  362. }
  363. AVCodec cinepak_decoder = {
  364. "cinepak",
  365. CODEC_TYPE_VIDEO,
  366. CODEC_ID_CINEPAK,
  367. sizeof(CinepakContext),
  368. cinepak_decode_init,
  369. NULL,
  370. cinepak_decode_end,
  371. cinepak_decode_frame,
  372. CODEC_CAP_DR1,
  373. };