You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

457 lines
14KB

  1. /*
  2. * Cinepak Video Decoder
  3. * Copyright (C) 2003 the ffmpeg project
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. */
  20. /**
  21. * @file cinepak.c
  22. * Cinepak video decoder
  23. * by Ewald Snel <ewald@rambo.its.tudelft.nl>
  24. * For more information on the Cinepak algorithm, visit:
  25. * http://www.csse.monash.edu.au/~timf/
  26. */
  27. #include <stdio.h>
  28. #include <stdlib.h>
  29. #include <string.h>
  30. #include <unistd.h>
  31. #include "common.h"
  32. #include "avcodec.h"
  33. #include "dsputil.h"
  34. #define PALETTE_COUNT 256
  35. #define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1])
  36. #define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \
  37. (((uint8_t*)(x))[1] << 16) | \
  38. (((uint8_t*)(x))[2] << 8) | \
  39. ((uint8_t*)(x))[3])
  40. typedef struct {
  41. uint8_t y0, y1, y2, y3;
  42. uint8_t u, v;
  43. } cvid_codebook_t;
  44. #define MAX_STRIPS 32
  45. typedef struct {
  46. uint16_t id;
  47. uint16_t x1, y1;
  48. uint16_t x2, y2;
  49. cvid_codebook_t v4_codebook[256];
  50. cvid_codebook_t v1_codebook[256];
  51. } cvid_strip_t;
  52. typedef struct CinepakContext {
  53. AVCodecContext *avctx;
  54. DSPContext dsp;
  55. AVFrame frame;
  56. AVFrame prev_frame;
  57. unsigned char *data;
  58. int size;
  59. unsigned char palette[PALETTE_COUNT * 4];
  60. int palette_video;
  61. cvid_strip_t strips[MAX_STRIPS];
  62. } CinepakContext;
  63. static void cinepak_decode_codebook (cvid_codebook_t *codebook,
  64. int chunk_id, int size, uint8_t *data)
  65. {
  66. uint8_t *eod = (data + size);
  67. uint32_t flag, mask;
  68. int i, n;
  69. /* check if this chunk contains 4- or 6-element vectors */
  70. n = (chunk_id & 0x0400) ? 4 : 6;
  71. flag = 0;
  72. mask = 0;
  73. for (i=0; i < 256; i++) {
  74. if ((chunk_id & 0x0100) && !(mask >>= 1)) {
  75. if ((data + 4) > eod)
  76. break;
  77. flag = BE_32 (data);
  78. data += 4;
  79. mask = 0x80000000;
  80. }
  81. if (!(chunk_id & 0x0100) || (flag & mask)) {
  82. if ((data + n) > eod)
  83. break;
  84. if (n == 6) {
  85. codebook[i].y0 = *data++;
  86. codebook[i].y1 = *data++;
  87. codebook[i].y2 = *data++;
  88. codebook[i].y3 = *data++;
  89. codebook[i].u = 128 + *data++;
  90. codebook[i].v = 128 + *data++;
  91. } else {
  92. /* this codebook type indicates either greyscale or
  93. * palettized video; if palettized, U & V components will
  94. * not be used so it is safe to set them to 128 for the
  95. * benefit of greyscale rendering in YUV420P */
  96. codebook[i].y0 = *data++;
  97. codebook[i].y1 = *data++;
  98. codebook[i].y2 = *data++;
  99. codebook[i].y3 = *data++;
  100. codebook[i].u = 128;
  101. codebook[i].v = 128;
  102. }
  103. }
  104. }
  105. }
  106. static int cinepak_decode_vectors (CinepakContext *s, cvid_strip_t *strip,
  107. int chunk_id, int size, uint8_t *data)
  108. {
  109. uint8_t *eod = (data + size);
  110. uint32_t flag, mask;
  111. cvid_codebook_t *codebook;
  112. unsigned int i, j, x, y;
  113. uint32_t iy[4];
  114. uint32_t iu[2];
  115. uint32_t iv[2];
  116. flag = 0;
  117. mask = 0;
  118. for (y=strip->y1; y < strip->y2; y+=4) {
  119. iy[0] = strip->x1 + (y * s->frame.linesize[0]);
  120. iy[1] = iy[0] + s->frame.linesize[0];
  121. iy[2] = iy[1] + s->frame.linesize[0];
  122. iy[3] = iy[2] + s->frame.linesize[0];
  123. iu[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[1]);
  124. iu[1] = iu[0] + s->frame.linesize[1];
  125. iv[0] = (strip->x1/2) + ((y/2) * s->frame.linesize[2]);
  126. iv[1] = iv[0] + s->frame.linesize[2];
  127. for (x=strip->x1; x < strip->x2; x+=4) {
  128. if ((chunk_id & 0x0100) && !(mask >>= 1)) {
  129. if ((data + 4) > eod)
  130. return -1;
  131. flag = BE_32 (data);
  132. data += 4;
  133. mask = 0x80000000;
  134. }
  135. if (!(chunk_id & 0x0100) || (flag & mask)) {
  136. if (!(chunk_id & 0x0200) && !(mask >>= 1)) {
  137. if ((data + 4) > eod)
  138. return -1;
  139. flag = BE_32 (data);
  140. data += 4;
  141. mask = 0x80000000;
  142. }
  143. if ((chunk_id & 0x0200) || (~flag & mask)) {
  144. if (data >= eod)
  145. return -1;
  146. codebook = &strip->v1_codebook[*data++];
  147. s->frame.data[0][iy[0] + 0] = codebook->y0;
  148. s->frame.data[0][iy[0] + 1] = codebook->y0;
  149. s->frame.data[0][iy[1] + 0] = codebook->y0;
  150. s->frame.data[0][iy[1] + 1] = codebook->y0;
  151. if (!s->palette_video) {
  152. s->frame.data[1][iu[0]] = codebook->u;
  153. s->frame.data[2][iv[0]] = codebook->v;
  154. }
  155. s->frame.data[0][iy[0] + 2] = codebook->y0;
  156. s->frame.data[0][iy[0] + 3] = codebook->y0;
  157. s->frame.data[0][iy[1] + 2] = codebook->y0;
  158. s->frame.data[0][iy[1] + 3] = codebook->y0;
  159. if (!s->palette_video) {
  160. s->frame.data[1][iu[0] + 1] = codebook->u;
  161. s->frame.data[2][iv[0] + 1] = codebook->v;
  162. }
  163. s->frame.data[0][iy[2] + 0] = codebook->y0;
  164. s->frame.data[0][iy[2] + 1] = codebook->y0;
  165. s->frame.data[0][iy[3] + 0] = codebook->y0;
  166. s->frame.data[0][iy[3] + 1] = codebook->y0;
  167. if (!s->palette_video) {
  168. s->frame.data[1][iu[1]] = codebook->u;
  169. s->frame.data[2][iv[1]] = codebook->v;
  170. }
  171. s->frame.data[0][iy[2] + 2] = codebook->y0;
  172. s->frame.data[0][iy[2] + 3] = codebook->y0;
  173. s->frame.data[0][iy[3] + 2] = codebook->y0;
  174. s->frame.data[0][iy[3] + 3] = codebook->y0;
  175. if (!s->palette_video) {
  176. s->frame.data[1][iu[1] + 1] = codebook->u;
  177. s->frame.data[2][iv[1] + 1] = codebook->v;
  178. }
  179. } else if (flag & mask) {
  180. if ((data + 4) > eod)
  181. return -1;
  182. codebook = &strip->v4_codebook[*data++];
  183. s->frame.data[0][iy[0] + 0] = codebook->y0;
  184. s->frame.data[0][iy[0] + 1] = codebook->y1;
  185. s->frame.data[0][iy[1] + 0] = codebook->y2;
  186. s->frame.data[0][iy[1] + 1] = codebook->y3;
  187. if (!s->palette_video) {
  188. s->frame.data[1][iu[0]] = codebook->u;
  189. s->frame.data[2][iv[0]] = codebook->v;
  190. }
  191. codebook = &strip->v4_codebook[*data++];
  192. s->frame.data[0][iy[0] + 2] = codebook->y0;
  193. s->frame.data[0][iy[0] + 3] = codebook->y1;
  194. s->frame.data[0][iy[1] + 2] = codebook->y2;
  195. s->frame.data[0][iy[1] + 3] = codebook->y3;
  196. if (!s->palette_video) {
  197. s->frame.data[1][iu[0] + 1] = codebook->u;
  198. s->frame.data[2][iv[0] + 1] = codebook->v;
  199. }
  200. codebook = &strip->v4_codebook[*data++];
  201. s->frame.data[0][iy[2] + 0] = codebook->y0;
  202. s->frame.data[0][iy[2] + 1] = codebook->y1;
  203. s->frame.data[0][iy[3] + 0] = codebook->y2;
  204. s->frame.data[0][iy[3] + 1] = codebook->y3;
  205. if (!s->palette_video) {
  206. s->frame.data[1][iu[1]] = codebook->u;
  207. s->frame.data[2][iv[1]] = codebook->v;
  208. }
  209. codebook = &strip->v4_codebook[*data++];
  210. s->frame.data[0][iy[2] + 2] = codebook->y0;
  211. s->frame.data[0][iy[2] + 3] = codebook->y1;
  212. s->frame.data[0][iy[3] + 2] = codebook->y2;
  213. s->frame.data[0][iy[3] + 3] = codebook->y3;
  214. if (!s->palette_video) {
  215. s->frame.data[1][iu[1] + 1] = codebook->u;
  216. s->frame.data[2][iv[1] + 1] = codebook->v;
  217. }
  218. }
  219. } else {
  220. /* copy from the previous frame */
  221. for (i = 0; i < 4; i++) {
  222. for (j = 0; j < 4; j++) {
  223. s->frame.data[0][iy[i] + j] =
  224. s->prev_frame.data[0][iy[i] + j];
  225. }
  226. }
  227. for (i = 0; i < 2; i++) {
  228. for (j = 0; j < 2; j++) {
  229. s->frame.data[1][iu[i] + j] =
  230. s->prev_frame.data[1][iu[i] + j];
  231. s->frame.data[2][iv[i] + j] =
  232. s->prev_frame.data[2][iv[i] + j];
  233. }
  234. }
  235. }
  236. iy[0] += 4; iy[1] += 4;
  237. iy[2] += 4; iy[3] += 4;
  238. iu[0] += 2; iu[1] += 2;
  239. iv[0] += 2; iv[1] += 2;
  240. }
  241. }
  242. return 0;
  243. }
  244. static int cinepak_decode_strip (CinepakContext *s,
  245. cvid_strip_t *strip, uint8_t *data, int size)
  246. {
  247. uint8_t *eod = (data + size);
  248. int chunk_id, chunk_size;
  249. /* coordinate sanity checks */
  250. if (strip->x1 >= s->avctx->width || strip->x2 > s->avctx->width ||
  251. strip->y1 >= s->avctx->height || strip->y2 > s->avctx->height ||
  252. strip->x1 >= strip->x2 || strip->y1 >= strip->y2)
  253. return -1;
  254. while ((data + 4) <= eod) {
  255. chunk_id = BE_16 (&data[0]);
  256. chunk_size = BE_16 (&data[2]) - 4;
  257. data += 4;
  258. chunk_size = ((data + chunk_size) > eod) ? (eod - data) : chunk_size;
  259. switch (chunk_id) {
  260. case 0x2000:
  261. case 0x2100:
  262. case 0x2400:
  263. case 0x2500:
  264. cinepak_decode_codebook (strip->v4_codebook, chunk_id,
  265. chunk_size, data);
  266. break;
  267. case 0x2200:
  268. case 0x2300:
  269. case 0x2600:
  270. case 0x2700:
  271. cinepak_decode_codebook (strip->v1_codebook, chunk_id,
  272. chunk_size, data);
  273. break;
  274. case 0x3000:
  275. case 0x3100:
  276. case 0x3200:
  277. return cinepak_decode_vectors (s, strip, chunk_id,
  278. chunk_size, data);
  279. }
  280. data += chunk_size;
  281. }
  282. return -1;
  283. }
  284. static int cinepak_decode (CinepakContext *s)
  285. {
  286. uint8_t *eod = (s->data + s->size);
  287. int i, result, strip_size, frame_flags, num_strips;
  288. int y0 = 0;
  289. if (s->size < 10)
  290. return -1;
  291. frame_flags = s->data[0];
  292. num_strips = BE_16 (&s->data[8]);
  293. s->data += 10;
  294. if (num_strips > MAX_STRIPS)
  295. num_strips = MAX_STRIPS;
  296. for (i=0; i < num_strips; i++) {
  297. if ((s->data + 12) > eod)
  298. return -1;
  299. s->strips[i].id = BE_16 (s->data);
  300. s->strips[i].y1 = y0;
  301. s->strips[i].x1 = 0;
  302. s->strips[i].y2 = y0 + BE_16 (&s->data[8]);
  303. s->strips[i].x2 = s->avctx->width;
  304. strip_size = BE_16 (&s->data[2]) - 12;
  305. s->data += 12;
  306. strip_size = ((s->data + strip_size) > eod) ? (eod - s->data) : strip_size;
  307. if ((i > 0) && !(frame_flags & 0x01)) {
  308. memcpy (s->strips[i].v4_codebook, s->strips[i-1].v4_codebook,
  309. sizeof(s->strips[i].v4_codebook));
  310. memcpy (s->strips[i].v1_codebook, s->strips[i-1].v1_codebook,
  311. sizeof(s->strips[i].v1_codebook));
  312. }
  313. result = cinepak_decode_strip (s, &s->strips[i], s->data, strip_size);
  314. if (result != 0)
  315. return result;
  316. s->data += strip_size;
  317. y0 = s->strips[i].y2;
  318. }
  319. return 0;
  320. }
  321. static int cinepak_decode_init(AVCodecContext *avctx)
  322. {
  323. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  324. /*
  325. int i;
  326. unsigned char r, g, b;
  327. unsigned char *raw_palette;
  328. unsigned int *palette32;
  329. */
  330. s->avctx = avctx;
  331. // check for paletted data
  332. s->palette_video = 0;
  333. avctx->pix_fmt = PIX_FMT_YUV420P;
  334. avctx->has_b_frames = 0;
  335. dsputil_init(&s->dsp, avctx);
  336. s->frame.data[0] = s->prev_frame.data[0] = NULL;
  337. return 0;
  338. }
  339. static int cinepak_decode_frame(AVCodecContext *avctx,
  340. void *data, int *data_size,
  341. uint8_t *buf, int buf_size)
  342. {
  343. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  344. s->data = buf;
  345. s->size = buf_size;
  346. if (avctx->get_buffer(avctx, &s->frame)) {
  347. printf (" Cinepak: get_buffer() failed\n");
  348. return -1;
  349. }
  350. cinepak_decode(s);
  351. if (s->prev_frame.data[0])
  352. avctx->release_buffer(avctx, &s->prev_frame);
  353. /* shuffle frames */
  354. s->prev_frame = s->frame;
  355. *data_size = sizeof(AVFrame);
  356. *(AVFrame*)data = s->frame;
  357. /* report that the buffer was completely consumed */
  358. return buf_size;
  359. }
  360. static int cinepak_decode_end(AVCodecContext *avctx)
  361. {
  362. CinepakContext *s = (CinepakContext *)avctx->priv_data;
  363. if (s->prev_frame.data[0])
  364. avctx->release_buffer(avctx, &s->prev_frame);
  365. return 0;
  366. }
  367. AVCodec cinepak_decoder = {
  368. "cinepak",
  369. CODEC_TYPE_VIDEO,
  370. CODEC_ID_CINEPAK,
  371. sizeof(CinepakContext),
  372. cinepak_decode_init,
  373. NULL,
  374. cinepak_decode_end,
  375. cinepak_decode_frame,
  376. CODEC_CAP_DR1,
  377. };