You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

455 lines
14KB

  1. /*
  2. * Cinepak Video Decoder
  3. * Copyright (C) 2003 the ffmpeg project
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. */
  20. /**
  21. * @file cinepak.c
  22. * Cinepak video decoder
  23. * by Ewald Snel <ewald@rambo.its.tudelft.nl>
  24. * For more information on the Cinepak algorithm, visit:
  25. * http://www.csse.monash.edu.au/~timf/
  26. */
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <string.h>
  30. #include <unistd.h>
  31. #include "common.h"
  32. #include "avcodec.h"
  33. #include "dsputil.h"
  34. #define PALETTE_COUNT 256
  35. typedef struct {
  36. uint8_t y0, y1, y2, y3;
  37. uint8_t u, v;
  38. } cvid_codebook_t;
  39. #define MAX_STRIPS 32
  40. typedef struct {
  41. uint16_t id;
  42. uint16_t x1, y1;
  43. uint16_t x2, y2;
  44. cvid_codebook_t v4_codebook[256];
  45. cvid_codebook_t v1_codebook[256];
  46. } cvid_strip_t;
  47. typedef struct CinepakContext {
  48. AVCodecContext *avctx;
  49. DSPContext dsp;
  50. AVFrame frame;
  51. AVFrame prev_frame;
  52. unsigned char *data;
  53. int size;
  54. int width, height;
  55. unsigned char palette[PALETTE_COUNT * 4];
  56. int palette_video;
  57. cvid_strip_t strips[MAX_STRIPS];
  58. } CinepakContext;
  59. static void cinepak_decode_codebook (cvid_codebook_t *codebook,
  60. int chunk_id, int size, uint8_t *data)
  61. {
  62. uint8_t *eod = (data + size);
  63. uint32_t flag, mask;
  64. int i, n;
  65. /* check if this chunk contains 4- or 6-element vectors */
  66. n = (chunk_id & 0x0400) ? 4 : 6;
  67. flag = 0;
  68. mask = 0;
  69. for (i=0; i < 256; i++) {
  70. if ((chunk_id & 0x0100) && !(mask >>= 1)) {
  71. if ((data + 4) > eod)
  72. break;
  73. flag = BE_32 (data);
  74. data += 4;
  75. mask = 0x80000000;
  76. }
  77. if (!(chunk_id & 0x0100) || (flag & mask)) {
  78. if ((data + n) > eod)
  79. break;
  80. if (n == 6) {
  81. codebook[i].y0 = *data++;
  82. codebook[i].y1 = *data++;
  83. codebook[i].y2 = *data++;
  84. codebook[i].y3 = *data++;
  85. codebook[i].u = 128 + *data++;
  86. codebook[i].v = 128 + *data++;
  87. } else {
  88. /* this codebook type indicates either greyscale or
  89. * palettized video; if palettized, U & V components will
  90. * not be used so it is safe to set them to 128 for the
  91. * benefit of greyscale rendering in YUV420P */
  92. codebook[i].y0 = *data++;
  93. codebook[i].y1 = *data++;
  94. codebook[i].y2 = *data++;
  95. codebook[i].y3 = *data++;
  96. codebook[i].u = 128;
  97. codebook[i].v = 128;
  98. }
  99. }
  100. }
  101. }
  102. static int cinepak_decode_vectors (CinepakContext *s, cvid_strip_t *strip,
  103. int chunk_id, int size, uint8_t *data)
  104. {
  105. uint8_t *eod = (data + size);
  106. uint32_t flag, mask;
  107. cvid_codebook_t *codebook;
  108. unsigned int i, j, x, y;
  109. uint32_t iy[4];
  110. uint32_t iu[2];
  111. uint32_t iv[2];
  112. flag = 0;
  113. mask = 0;
  114. for (y=strip->y1; y < strip->y2; y+=4) {
  115. iy[0] = strip->x1 + (y * s->frame.linesize[0]);
  116. iy[1] = iy[0] + s->frame.linesize[0];
  117. iy[2] = iy[1] + s->frame.linesize[0];
  118. iy[3] = iy[2] + s->frame.linesize[0];
  119. iu[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[1]);
  120. iu[1] = iu[0] + s->frame.linesize[1];
  121. iv[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[2]);
  122. iv[1] = iv[0] + s->frame.linesize[2];
  123. for (x=strip->x1; x < strip->x2; x+=4) {
  124. if ((chunk_id & 0x0100) && !(mask >>= 1)) {
  125. if ((data + 4) > eod)
  126. return -1;
  127. flag = BE_32 (data);
  128. data += 4;
  129. mask = 0x80000000;
  130. }
  131. if (!(chunk_id & 0x0100) || (flag & mask)) {
  132. if (!(chunk_id & 0x0200) && !(mask >>= 1)) {
  133. if ((data + 4) > eod)
  134. return -1;
  135. flag = BE_32 (data);
  136. data += 4;
  137. mask = 0x80000000;
  138. }
  139. if ((chunk_id & 0x0200) || (~flag & mask)) {
  140. if (data >= eod)
  141. return -1;
  142. codebook = &strip->v1_codebook[*data++];
  143. s->frame.data[0][iy[0] + 0] = codebook->y0;
  144. s->frame.data[0][iy[0] + 1] = codebook->y0;
  145. s->frame.data[0][iy[1] + 0] = codebook->y0;
  146. s->frame.data[0][iy[1] + 1] = codebook->y0;
  147. if (!s->palette_video) {
  148. s->frame.data[1][iu[0]] = codebook->u;
  149. s->frame.data[2][iv[0]] = codebook->v;
  150. }
  151. s->frame.data[0][iy[0] + 2] = codebook->y0;
  152. s->frame.data[0][iy[0] + 3] = codebook->y0;
  153. s->frame.data[0][iy[1] + 2] = codebook->y0;
  154. s->frame.data[0][iy[1] + 3] = codebook->y0;
  155. if (!s->palette_video) {
  156. s->frame.data[1][iu[0] + 1] = codebook->u;
  157. s->frame.data[2][iv[0] + 1] = codebook->v;
  158. }
  159. s->frame.data[0][iy[2] + 0] = codebook->y0;
  160. s->frame.data[0][iy[2] + 1] = codebook->y0;
  161. s->frame.data[0][iy[3] + 0] = codebook->y0;
  162. s->frame.data[0][iy[3] + 1] = codebook->y0;
  163. if (!s->palette_video) {
  164. s->frame.data[1][iu[1]] = codebook->u;
  165. s->frame.data[2][iv[1]] = codebook->v;
  166. }
  167. s->frame.data[0][iy[2] + 2] = codebook->y0;
  168. s->frame.data[0][iy[2] + 3] = codebook->y0;
  169. s->frame.data[0][iy[3] + 2] = codebook->y0;
  170. s->frame.data[0][iy[3] + 3] = codebook->y0;
  171. if (!s->palette_video) {
  172. s->frame.data[1][iu[1] + 1] = codebook->u;
  173. s->frame.data[2][iv[1] + 1] = codebook->v;
  174. }
  175. } else if (flag & mask) {
  176. if ((data + 4) > eod)
  177. return -1;
  178. codebook = &strip->v4_codebook[*data++];
  179. s->frame.data[0][iy[0] + 0] = codebook->y0;
  180. s->frame.data[0][iy[0] + 1] = codebook->y1;
  181. s->frame.data[0][iy[1] + 0] = codebook->y2;
  182. s->frame.data[0][iy[1] + 1] = codebook->y3;
  183. if (!s->palette_video) {
  184. s->frame.data[1][iu[0]] = codebook->u;
  185. s->frame.data[2][iv[0]] = codebook->v;
  186. }
  187. codebook = &strip->v4_codebook[*data++];
  188. s->frame.data[0][iy[0] + 2] = codebook->y0;
  189. s->frame.data[0][iy[0] + 3] = codebook->y1;
  190. s->frame.data[0][iy[1] + 2] = codebook->y2;
  191. s->frame.data[0][iy[1] + 3] = codebook->y3;
  192. if (!s->palette_video) {
  193. s->frame.data[1][iu[0] + 1] = codebook->u;
  194. s->frame.data[2][iv[0] + 1] = codebook->v;
  195. }
  196. codebook = &strip->v4_codebook[*data++];
  197. s->frame.data[0][iy[2] + 0] = codebook->y0;
  198. s->frame.data[0][iy[2] + 1] = codebook->y1;
  199. s->frame.data[0][iy[3] + 0] = codebook->y2;
  200. s->frame.data[0][iy[3] + 1] = codebook->y3;
  201. if (!s->palette_video) {
  202. s->frame.data[1][iu[1]] = codebook->u;
  203. s->frame.data[2][iv[1]] = codebook->v;
  204. }
  205. codebook = &strip->v4_codebook[*data++];
  206. s->frame.data[0][iy[2] + 2] = codebook->y0;
  207. s->frame.data[0][iy[2] + 3] = codebook->y1;
  208. s->frame.data[0][iy[3] + 2] = codebook->y2;
  209. s->frame.data[0][iy[3] + 3] = codebook->y3;
  210. if (!s->palette_video) {
  211. s->frame.data[1][iu[1] + 1] = codebook->u;
  212. s->frame.data[2][iv[1] + 1] = codebook->v;
  213. }
  214. }
  215. } else {
  216. /* copy from the previous frame */
  217. for (i = 0; i < 4; i++) {
  218. for (j = 0; j < 4; j++) {
  219. s->frame.data[0][iy[i] + j] =
  220. s->prev_frame.data[0][iy[i] + j];
  221. }
  222. }
  223. for (i = 0; i < 2; i++) {
  224. for (j = 0; j < 2; j++) {
  225. s->frame.data[1][iu[i] + j] =
  226. s->prev_frame.data[1][iu[i] + j];
  227. s->frame.data[2][iv[i] + j] =
  228. s->prev_frame.data[2][iv[i] + j];
  229. }
  230. }
  231. }
  232. iy[0] += 4; iy[1] += 4;
  233. iy[2] += 4; iy[3] += 4;
  234. iu[0] += 2; iu[1] += 2;
  235. iv[0] += 2; iv[1] += 2;
  236. }
  237. }
  238. return 0;
  239. }
  240. static int cinepak_decode_strip (CinepakContext *s,
  241. cvid_strip_t *strip, uint8_t *data, int size)
  242. {
  243. uint8_t *eod = (data + size);
  244. int chunk_id, chunk_size;
  245. /* coordinate sanity checks */
  246. if (strip->x1 >= s->width || strip->x2 > s->width ||
  247. strip->y1 >= s->height || strip->y2 > s->height ||
  248. strip->x1 >= strip->x2 || strip->y1 >= strip->y2)
  249. return -1;
  250. while ((data + 4) <= eod) {
  251. chunk_id = BE_16 (&data[0]);
  252. chunk_size = BE_16 (&data[2]) - 4;
  253. data += 4;
  254. chunk_size = ((data + chunk_size) > eod) ? (eod - data) : chunk_size;
  255. switch (chunk_id) {
  256. case 0x2000:
  257. case 0x2100:
  258. case 0x2400:
  259. case 0x2500:
  260. cinepak_decode_codebook (strip->v4_codebook, chunk_id,
  261. chunk_size, data);
  262. break;
  263. case 0x2200:
  264. case 0x2300:
  265. case 0x2600:
  266. case 0x2700:
  267. cinepak_decode_codebook (strip->v1_codebook, chunk_id,
  268. chunk_size, data);
  269. break;
  270. case 0x3000:
  271. case 0x3100:
  272. case 0x3200:
  273. return cinepak_decode_vectors (s, strip, chunk_id,
  274. chunk_size, data);
  275. }
  276. data += chunk_size;
  277. }
  278. return -1;
  279. }
  280. static int cinepak_decode (CinepakContext *s)
  281. {
  282. uint8_t *eod = (s->data + s->size);
  283. int i, result, strip_size, frame_flags, num_strips;
  284. int y0 = 0;
  285. if (s->size < 10)
  286. return -1;
  287. frame_flags = s->data[0];
  288. num_strips = BE_16 (&s->data[8]);
  289. s->data += 10;
  290. if (num_strips > MAX_STRIPS)
  291. num_strips = MAX_STRIPS;
  292. for (i=0; i < num_strips; i++) {
  293. if ((s->data + 12) > eod)
  294. return -1;
  295. s->strips[i].id = BE_16 (s->data);
  296. s->strips[i].y1 = y0;
  297. s->strips[i].x1 = 0;
  298. s->strips[i].y2 = y0 + BE_16 (&s->data[8]);
  299. s->strips[i].x2 = s->avctx->width;
  300. strip_size = BE_16 (&s->data[2]) - 12;
  301. s->data += 12;
  302. strip_size = ((s->data + strip_size) > eod) ? (eod - s->data) : strip_size;
  303. if ((i > 0) && !(frame_flags & 0x01)) {
  304. memcpy (s->strips[i].v4_codebook, s->strips[i-1].v4_codebook,
  305. sizeof(s->strips[i].v4_codebook));
  306. memcpy (s->strips[i].v1_codebook, s->strips[i-1].v1_codebook,
  307. sizeof(s->strips[i].v1_codebook));
  308. }
  309. result = cinepak_decode_strip (s, &s->strips[i], s->data, strip_size);
  310. if (result != 0)
  311. return result;
  312. s->data += strip_size;
  313. y0 = s->strips[i].y2;
  314. }
  315. return 0;
  316. }
  317. static int cinepak_decode_init(AVCodecContext *avctx)
  318. {
  319. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  320. /*
  321. int i;
  322. unsigned char r, g, b;
  323. unsigned char *raw_palette;
  324. unsigned int *palette32;
  325. */
  326. s->avctx = avctx;
  327. s->width = (avctx->width + 3) & ~3;
  328. s->height = (avctx->height + 3) & ~3;
  329. // check for paletted data
  330. s->palette_video = 0;
  331. avctx->pix_fmt = PIX_FMT_YUV420P;
  332. avctx->has_b_frames = 0;
  333. dsputil_init(&s->dsp, avctx);
  334. s->frame.data[0] = s->prev_frame.data[0] = NULL;
  335. return 0;
  336. }
  337. static int cinepak_decode_frame(AVCodecContext *avctx,
  338. void *data, int *data_size,
  339. uint8_t *buf, int buf_size)
  340. {
  341. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  342. s->data = buf;
  343. s->size = buf_size;
  344. if (avctx->get_buffer(avctx, &s->frame)) {
  345. av_log(avctx, AV_LOG_ERROR, " Cinepak: get_buffer() failed\n");
  346. return -1;
  347. }
  348. cinepak_decode(s);
  349. if (s->prev_frame.data[0])
  350. avctx->release_buffer(avctx, &s->prev_frame);
  351. /* shuffle frames */
  352. s->prev_frame = s->frame;
  353. *data_size = sizeof(AVFrame);
  354. *(AVFrame*)data = s->frame;
  355. /* report that the buffer was completely consumed */
  356. return buf_size;
  357. }
  358. static int cinepak_decode_end(AVCodecContext *avctx)
  359. {
  360. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  361. if (s->prev_frame.data[0])
  362. avctx->release_buffer(avctx, &s->prev_frame);
  363. return 0;
  364. }
  365. AVCodec cinepak_decoder = {
  366. "cinepak",
  367. CODEC_TYPE_VIDEO,
  368. CODEC_ID_CINEPAK,
  369. sizeof(CinepakContext),
  370. cinepak_decode_init,
  371. NULL,
  372. cinepak_decode_end,
  373. cinepak_decode_frame,
  374. CODEC_CAP_DR1,
  375. };