You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1122 lines
40KB

  1. /*
  2. * Indeo Video v3 compatible decoder
  3. * Copyright (c) 2009 - 2011 Maxim Poliakovski
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * This is a decoder for Intel Indeo Video v3.
  24. * It is based on vector quantization, run-length coding and motion compensation.
  25. * Known container formats: .avi and .mov
  26. * Known FOURCCs: 'IV31', 'IV32'
  27. *
  28. * @see http://wiki.multimedia.cx/index.php?title=Indeo_3
  29. */
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/intreadwrite.h"
  32. #include "avcodec.h"
  33. #include "dsputil.h"
  34. #include "bytestream.h"
  35. #include "get_bits.h"
  36. #include "indeo3data.h"
  37. /* RLE opcodes. */
  38. enum {
  39. RLE_ESC_F9 = 249, ///< same as RLE_ESC_FA + do the same with next block
  40. RLE_ESC_FA = 250, ///< INTRA: skip block, INTER: copy data from reference
  41. RLE_ESC_FB = 251, ///< apply null delta to N blocks / skip N blocks
  42. RLE_ESC_FC = 252, ///< same as RLE_ESC_FD + do the same with next block
  43. RLE_ESC_FD = 253, ///< apply null delta to all remaining lines of this block
  44. RLE_ESC_FE = 254, ///< apply null delta to all lines up to the 3rd line
  45. RLE_ESC_FF = 255 ///< apply null delta to all lines up to the 2nd line
  46. };
  47. /* Some constants for parsing frame bitstream flags. */
  48. #define BS_8BIT_PEL (1 << 1) ///< 8bit pixel bitdepth indicator
  49. #define BS_KEYFRAME (1 << 2) ///< intra frame indicator
  50. #define BS_MV_Y_HALF (1 << 4) ///< vertical mv halfpel resolution indicator
  51. #define BS_MV_X_HALF (1 << 5) ///< horizontal mv halfpel resolution indicator
  52. #define BS_NONREF (1 << 8) ///< nonref (discardable) frame indicator
  53. #define BS_BUFFER 9 ///< indicates which of two frame buffers should be used
  54. typedef struct Plane {
  55. uint8_t *buffers[2];
  56. uint8_t *pixels[2]; ///< pointer to the actual pixel data of the buffers above
  57. uint32_t width;
  58. uint32_t height;
  59. uint32_t pitch;
  60. } Plane;
  61. #define CELL_STACK_MAX 20
  62. typedef struct Cell {
  63. int16_t xpos; ///< cell coordinates in 4x4 blocks
  64. int16_t ypos;
  65. int16_t width; ///< cell width in 4x4 blocks
  66. int16_t height; ///< cell height in 4x4 blocks
  67. uint8_t tree; ///< tree id: 0- MC tree, 1 - VQ tree
  68. const int8_t *mv_ptr; ///< ptr to the motion vector if any
  69. } Cell;
  70. typedef struct Indeo3DecodeContext {
  71. AVCodecContext *avctx;
  72. AVFrame frame;
  73. DSPContext dsp;
  74. GetBitContext gb;
  75. int need_resync;
  76. int skip_bits;
  77. const uint8_t *next_cell_data;
  78. const uint8_t *last_byte;
  79. const int8_t *mc_vectors;
  80. unsigned num_vectors; ///< number of motion vectors in mc_vectors
  81. int16_t width, height;
  82. uint32_t frame_num; ///< current frame number (zero-based)
  83. uint32_t data_size; ///< size of the frame data in bytes
  84. uint16_t frame_flags; ///< frame properties
  85. uint8_t cb_offset; ///< needed for selecting VQ tables
  86. uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
  87. const uint8_t *y_data_ptr;
  88. const uint8_t *v_data_ptr;
  89. const uint8_t *u_data_ptr;
  90. int32_t y_data_size;
  91. int32_t v_data_size;
  92. int32_t u_data_size;
  93. const uint8_t *alt_quant; ///< secondary VQ table set for the modes 1 and 4
  94. Plane planes[3];
  95. } Indeo3DecodeContext;
  96. static uint8_t requant_tab[8][128];
  97. /*
  98. * Build the static requantization table.
  99. * This table is used to remap pixel values according to a specific
  100. * quant index and thus avoid overflows while adding deltas.
  101. */
  102. static av_cold void build_requant_tab(void)
  103. {
  104. static int8_t offsets[8] = { 1, 1, 2, -3, -3, 3, 4, 4 };
  105. static int8_t deltas [8] = { 0, 1, 0, 4, 4, 1, 0, 1 };
  106. int i, j, step;
  107. for (i = 0; i < 8; i++) {
  108. step = i + 2;
  109. for (j = 0; j < 128; j++)
  110. requant_tab[i][j] = (j + offsets[i]) / step * step + deltas[i];
  111. }
  112. /* some last elements calculated above will have values >= 128 */
  113. /* pixel values shall never exceed 127 so set them to non-overflowing values */
  114. /* according with the quantization step of the respective section */
  115. requant_tab[0][127] = 126;
  116. requant_tab[1][119] = 118;
  117. requant_tab[1][120] = 118;
  118. requant_tab[2][126] = 124;
  119. requant_tab[2][127] = 124;
  120. requant_tab[6][124] = 120;
  121. requant_tab[6][125] = 120;
  122. requant_tab[6][126] = 120;
  123. requant_tab[6][127] = 120;
  124. /* Patch for compatibility with the Intel's binary decoders */
  125. requant_tab[1][7] = 10;
  126. requant_tab[4][8] = 10;
  127. }
  128. static av_cold int allocate_frame_buffers(Indeo3DecodeContext *ctx,
  129. AVCodecContext *avctx, int luma_width, int luma_height)
  130. {
  131. int p, chroma_width, chroma_height;
  132. int luma_pitch, chroma_pitch, luma_size, chroma_size;
  133. if (luma_width < 16 || luma_width > 640 ||
  134. luma_height < 16 || luma_height > 480 ||
  135. luma_width & 3 || luma_height & 3) {
  136. av_log(avctx, AV_LOG_ERROR, "Invalid picture dimensions: %d x %d!\n",
  137. luma_width, luma_height);
  138. return AVERROR_INVALIDDATA;
  139. }
  140. ctx->width = luma_width ;
  141. ctx->height = luma_height;
  142. chroma_width = FFALIGN(luma_width >> 2, 4);
  143. chroma_height = FFALIGN(luma_height >> 2, 4);
  144. luma_pitch = FFALIGN(luma_width, 16);
  145. chroma_pitch = FFALIGN(chroma_width, 16);
  146. /* Calculate size of the luminance plane. */
  147. /* Add one line more for INTRA prediction. */
  148. luma_size = luma_pitch * (luma_height + 1);
  149. /* Calculate size of a chrominance planes. */
  150. /* Add one line more for INTRA prediction. */
  151. chroma_size = chroma_pitch * (chroma_height + 1);
  152. /* allocate frame buffers */
  153. for (p = 0; p < 3; p++) {
  154. ctx->planes[p].pitch = !p ? luma_pitch : chroma_pitch;
  155. ctx->planes[p].width = !p ? luma_width : chroma_width;
  156. ctx->planes[p].height = !p ? luma_height : chroma_height;
  157. ctx->planes[p].buffers[0] = av_malloc(!p ? luma_size : chroma_size);
  158. ctx->planes[p].buffers[1] = av_malloc(!p ? luma_size : chroma_size);
  159. /* fill the INTRA prediction lines with the middle pixel value = 64 */
  160. memset(ctx->planes[p].buffers[0], 0x40, ctx->planes[p].pitch);
  161. memset(ctx->planes[p].buffers[1], 0x40, ctx->planes[p].pitch);
  162. /* set buffer pointers = buf_ptr + pitch and thus skip the INTRA prediction line */
  163. ctx->planes[p].pixels[0] = ctx->planes[p].buffers[0] + ctx->planes[p].pitch;
  164. ctx->planes[p].pixels[1] = ctx->planes[p].buffers[1] + ctx->planes[p].pitch;
  165. }
  166. return 0;
  167. }
  168. static av_cold void free_frame_buffers(Indeo3DecodeContext *ctx)
  169. {
  170. int p;
  171. ctx->width=
  172. ctx->height= 0;
  173. for (p = 0; p < 3; p++) {
  174. av_freep(&ctx->planes[p].buffers[0]);
  175. av_freep(&ctx->planes[p].buffers[1]);
  176. }
  177. }
  178. /**
  179. * Copy pixels of the cell(x + mv_x, y + mv_y) from the previous frame into
  180. * the cell(x, y) in the current frame.
  181. *
  182. * @param ctx pointer to the decoder context
  183. * @param plane pointer to the plane descriptor
  184. * @param cell pointer to the cell descriptor
  185. */
  186. static void copy_cell(Indeo3DecodeContext *ctx, Plane *plane, Cell *cell)
  187. {
  188. int h, w, mv_x, mv_y, offset, offset_dst;
  189. uint8_t *src, *dst;
  190. /* setup output and reference pointers */
  191. offset_dst = (cell->ypos << 2) * plane->pitch + (cell->xpos << 2);
  192. dst = plane->pixels[ctx->buf_sel] + offset_dst;
  193. if(cell->mv_ptr){
  194. mv_y = cell->mv_ptr[0];
  195. mv_x = cell->mv_ptr[1];
  196. }else
  197. mv_x= mv_y= 0;
  198. offset = offset_dst + mv_y * plane->pitch + mv_x;
  199. src = plane->pixels[ctx->buf_sel ^ 1] + offset;
  200. h = cell->height << 2;
  201. for (w = cell->width; w > 0;) {
  202. /* copy using 16xH blocks */
  203. if (!((cell->xpos << 2) & 15) && w >= 4) {
  204. for (; w >= 4; src += 16, dst += 16, w -= 4)
  205. ctx->dsp.put_no_rnd_pixels_tab[0][0](dst, src, plane->pitch, h);
  206. }
  207. /* copy using 8xH blocks */
  208. if (!((cell->xpos << 2) & 7) && w >= 2) {
  209. ctx->dsp.put_no_rnd_pixels_tab[1][0](dst, src, plane->pitch, h);
  210. w -= 2;
  211. src += 8;
  212. dst += 8;
  213. }
  214. if (w >= 1) {
  215. copy_block4(dst, src, plane->pitch, plane->pitch, h);
  216. w--;
  217. src += 4;
  218. dst += 4;
  219. }
  220. }
  221. }
  222. /* Average 4/8 pixels at once without rounding using SWAR */
  223. #define AVG_32(dst, src, ref) \
  224. AV_WN32A(dst, ((AV_RN32A(src) + AV_RN32A(ref)) >> 1) & 0x7F7F7F7FUL)
  225. #define AVG_64(dst, src, ref) \
  226. AV_WN64A(dst, ((AV_RN64A(src) + AV_RN64A(ref)) >> 1) & 0x7F7F7F7F7F7F7F7FULL)
  227. /*
  228. * Replicate each even pixel as follows:
  229. * ABCDEFGH -> AACCEEGG
  230. */
  231. static inline uint64_t replicate64(uint64_t a) {
  232. #if HAVE_BIGENDIAN
  233. a &= 0xFF00FF00FF00FF00ULL;
  234. a |= a >> 8;
  235. #else
  236. a &= 0x00FF00FF00FF00FFULL;
  237. a |= a << 8;
  238. #endif
  239. return a;
  240. }
  241. static inline uint32_t replicate32(uint32_t a) {
  242. #if HAVE_BIGENDIAN
  243. a &= 0xFF00FF00UL;
  244. a |= a >> 8;
  245. #else
  246. a &= 0x00FF00FFUL;
  247. a |= a << 8;
  248. #endif
  249. return a;
  250. }
  251. /* Fill n lines with 64bit pixel value pix */
  252. static inline void fill_64(uint8_t *dst, const uint64_t pix, int32_t n,
  253. int32_t row_offset)
  254. {
  255. for (; n > 0; dst += row_offset, n--)
  256. AV_WN64A(dst, pix);
  257. }
  258. /* Error codes for cell decoding. */
  259. enum {
  260. IV3_NOERR = 0,
  261. IV3_BAD_RLE = 1,
  262. IV3_BAD_DATA = 2,
  263. IV3_BAD_COUNTER = 3,
  264. IV3_UNSUPPORTED = 4,
  265. IV3_OUT_OF_DATA = 5
  266. };
  267. #define BUFFER_PRECHECK \
  268. if (*data_ptr >= last_ptr) \
  269. return IV3_OUT_OF_DATA; \
  270. #define RLE_BLOCK_COPY \
  271. if (cell->mv_ptr || !skip_flag) \
  272. copy_block4(dst, ref, row_offset, row_offset, 4 << v_zoom)
  273. #define RLE_BLOCK_COPY_8 \
  274. pix64 = AV_RN64A(ref);\
  275. if (is_first_row) {/* special prediction case: top line of a cell */\
  276. pix64 = replicate64(pix64);\
  277. fill_64(dst + row_offset, pix64, 7, row_offset);\
  278. AVG_64(dst, ref, dst + row_offset);\
  279. } else \
  280. fill_64(dst, pix64, 8, row_offset)
  281. #define RLE_LINES_COPY \
  282. copy_block4(dst, ref, row_offset, row_offset, num_lines << v_zoom)
  283. #define RLE_LINES_COPY_M10 \
  284. pix64 = AV_RN64A(ref);\
  285. if (is_top_of_cell) {\
  286. pix64 = replicate64(pix64);\
  287. fill_64(dst + row_offset, pix64, (num_lines << 1) - 1, row_offset);\
  288. AVG_64(dst, ref, dst + row_offset);\
  289. } else \
  290. fill_64(dst, pix64, num_lines << 1, row_offset)
  291. #define APPLY_DELTA_4 \
  292. AV_WN16A(dst + line_offset , AV_RN16A(ref ) + delta_tab->deltas[dyad1]);\
  293. AV_WN16A(dst + line_offset + 2, AV_RN16A(ref + 2) + delta_tab->deltas[dyad2]);\
  294. if (mode >= 3) {\
  295. if (is_top_of_cell && !cell->ypos) {\
  296. AV_COPY32(dst, dst + row_offset);\
  297. } else {\
  298. AVG_32(dst, ref, dst + row_offset);\
  299. }\
  300. }
  301. #define APPLY_DELTA_8 \
  302. /* apply two 32-bit VQ deltas to next even line */\
  303. if (is_top_of_cell) { \
  304. AV_WN32A(dst + row_offset , \
  305. replicate32(AV_RN32A(ref )) + delta_tab->deltas_m10[dyad1]);\
  306. AV_WN32A(dst + row_offset + 4, \
  307. replicate32(AV_RN32A(ref + 4)) + delta_tab->deltas_m10[dyad2]);\
  308. } else { \
  309. AV_WN32A(dst + row_offset , \
  310. AV_RN32A(ref ) + delta_tab->deltas_m10[dyad1]);\
  311. AV_WN32A(dst + row_offset + 4, \
  312. AV_RN32A(ref + 4) + delta_tab->deltas_m10[dyad2]);\
  313. } \
  314. /* odd lines are not coded but rather interpolated/replicated */\
  315. /* first line of the cell on the top of image? - replicate */\
  316. /* otherwise - interpolate */\
  317. if (is_top_of_cell && !cell->ypos) {\
  318. AV_COPY64(dst, dst + row_offset);\
  319. } else \
  320. AVG_64(dst, ref, dst + row_offset);
  321. #define APPLY_DELTA_1011_INTER \
  322. if (mode == 10) { \
  323. AV_WN32A(dst , \
  324. AV_RN32A(dst ) + delta_tab->deltas_m10[dyad1]);\
  325. AV_WN32A(dst + 4 , \
  326. AV_RN32A(dst + 4 ) + delta_tab->deltas_m10[dyad2]);\
  327. AV_WN32A(dst + row_offset , \
  328. AV_RN32A(dst + row_offset ) + delta_tab->deltas_m10[dyad1]);\
  329. AV_WN32A(dst + row_offset + 4, \
  330. AV_RN32A(dst + row_offset + 4) + delta_tab->deltas_m10[dyad2]);\
  331. } else { \
  332. AV_WN16A(dst , \
  333. AV_RN16A(dst ) + delta_tab->deltas[dyad1]);\
  334. AV_WN16A(dst + 2 , \
  335. AV_RN16A(dst + 2 ) + delta_tab->deltas[dyad2]);\
  336. AV_WN16A(dst + row_offset , \
  337. AV_RN16A(dst + row_offset ) + delta_tab->deltas[dyad1]);\
  338. AV_WN16A(dst + row_offset + 2, \
  339. AV_RN16A(dst + row_offset + 2) + delta_tab->deltas[dyad2]);\
  340. }
  341. static int decode_cell_data(Cell *cell, uint8_t *block, uint8_t *ref_block,
  342. int pitch, int h_zoom, int v_zoom, int mode,
  343. const vqEntry *delta[2], int swap_quads[2],
  344. const uint8_t **data_ptr, const uint8_t *last_ptr)
  345. {
  346. int x, y, line, num_lines;
  347. int rle_blocks = 0;
  348. uint8_t code, *dst, *ref;
  349. const vqEntry *delta_tab;
  350. unsigned int dyad1, dyad2;
  351. uint64_t pix64;
  352. int skip_flag = 0, is_top_of_cell, is_first_row = 1;
  353. int row_offset, blk_row_offset, line_offset;
  354. row_offset = pitch;
  355. blk_row_offset = (row_offset << (2 + v_zoom)) - (cell->width << 2);
  356. line_offset = v_zoom ? row_offset : 0;
  357. for (y = 0; y + v_zoom < cell->height; is_first_row = 0, y += 1 + v_zoom) {
  358. for (x = 0; x + h_zoom < cell->width; x += 1 + h_zoom) {
  359. ref = ref_block;
  360. dst = block;
  361. if (rle_blocks > 0) {
  362. if (mode <= 4) {
  363. RLE_BLOCK_COPY;
  364. } else if (mode == 10 && !cell->mv_ptr) {
  365. RLE_BLOCK_COPY_8;
  366. }
  367. rle_blocks--;
  368. } else {
  369. for (line = 0; line < 4;) {
  370. num_lines = 1;
  371. is_top_of_cell = is_first_row && !line;
  372. /* select primary VQ table for odd, secondary for even lines */
  373. if (mode <= 4)
  374. delta_tab = delta[line & 1];
  375. else
  376. delta_tab = delta[1];
  377. BUFFER_PRECHECK;
  378. code = bytestream_get_byte(data_ptr);
  379. if (code < 248) {
  380. if (code < delta_tab->num_dyads) {
  381. BUFFER_PRECHECK;
  382. dyad1 = bytestream_get_byte(data_ptr);
  383. dyad2 = code;
  384. if (dyad1 >= delta_tab->num_dyads || dyad1 >= 248)
  385. return IV3_BAD_DATA;
  386. } else {
  387. /* process QUADS */
  388. code -= delta_tab->num_dyads;
  389. dyad1 = code / delta_tab->quad_exp;
  390. dyad2 = code % delta_tab->quad_exp;
  391. if (swap_quads[line & 1])
  392. FFSWAP(unsigned int, dyad1, dyad2);
  393. }
  394. if (mode <= 4) {
  395. APPLY_DELTA_4;
  396. } else if (mode == 10 && !cell->mv_ptr) {
  397. APPLY_DELTA_8;
  398. } else {
  399. APPLY_DELTA_1011_INTER;
  400. }
  401. } else {
  402. /* process RLE codes */
  403. switch (code) {
  404. case RLE_ESC_FC:
  405. skip_flag = 0;
  406. rle_blocks = 1;
  407. code = 253;
  408. /* FALLTHROUGH */
  409. case RLE_ESC_FF:
  410. case RLE_ESC_FE:
  411. case RLE_ESC_FD:
  412. num_lines = 257 - code - line;
  413. if (num_lines <= 0)
  414. return IV3_BAD_RLE;
  415. if (mode <= 4) {
  416. RLE_LINES_COPY;
  417. } else if (mode == 10 && !cell->mv_ptr) {
  418. RLE_LINES_COPY_M10;
  419. }
  420. break;
  421. case RLE_ESC_FB:
  422. BUFFER_PRECHECK;
  423. code = bytestream_get_byte(data_ptr);
  424. rle_blocks = (code & 0x1F) - 1; /* set block counter */
  425. if (code >= 64 || rle_blocks < 0)
  426. return IV3_BAD_COUNTER;
  427. skip_flag = code & 0x20;
  428. num_lines = 4 - line; /* enforce next block processing */
  429. if (mode >= 10 || (cell->mv_ptr || !skip_flag)) {
  430. if (mode <= 4) {
  431. RLE_LINES_COPY;
  432. } else if (mode == 10 && !cell->mv_ptr) {
  433. RLE_LINES_COPY_M10;
  434. }
  435. }
  436. break;
  437. case RLE_ESC_F9:
  438. skip_flag = 1;
  439. rle_blocks = 1;
  440. /* FALLTHROUGH */
  441. case RLE_ESC_FA:
  442. if (line)
  443. return IV3_BAD_RLE;
  444. num_lines = 4; /* enforce next block processing */
  445. if (cell->mv_ptr) {
  446. if (mode <= 4) {
  447. RLE_LINES_COPY;
  448. } else if (mode == 10 && !cell->mv_ptr) {
  449. RLE_LINES_COPY_M10;
  450. }
  451. }
  452. break;
  453. default:
  454. return IV3_UNSUPPORTED;
  455. }
  456. }
  457. line += num_lines;
  458. ref += row_offset * (num_lines << v_zoom);
  459. dst += row_offset * (num_lines << v_zoom);
  460. }
  461. }
  462. /* move to next horizontal block */
  463. block += 4 << h_zoom;
  464. ref_block += 4 << h_zoom;
  465. }
  466. /* move to next line of blocks */
  467. ref_block += blk_row_offset;
  468. block += blk_row_offset;
  469. }
  470. return IV3_NOERR;
  471. }
  472. /**
  473. * Decode a vector-quantized cell.
  474. * It consists of several routines, each of which handles one or more "modes"
  475. * with which a cell can be encoded.
  476. *
  477. * @param ctx pointer to the decoder context
  478. * @param avctx ptr to the AVCodecContext
  479. * @param plane pointer to the plane descriptor
  480. * @param cell pointer to the cell descriptor
  481. * @param data_ptr pointer to the compressed data
  482. * @param last_ptr pointer to the last byte to catch reads past end of buffer
  483. * @return number of consumed bytes or negative number in case of error
  484. */
  485. static int decode_cell(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
  486. Plane *plane, Cell *cell, const uint8_t *data_ptr,
  487. const uint8_t *last_ptr)
  488. {
  489. int x, mv_x, mv_y, mode, vq_index, prim_indx, second_indx;
  490. int zoom_fac;
  491. int offset, error = 0, swap_quads[2];
  492. uint8_t code, *block, *ref_block = 0;
  493. const vqEntry *delta[2];
  494. const uint8_t *data_start = data_ptr;
  495. /* get coding mode and VQ table index from the VQ descriptor byte */
  496. code = *data_ptr++;
  497. mode = code >> 4;
  498. vq_index = code & 0xF;
  499. /* setup output and reference pointers */
  500. offset = (cell->ypos << 2) * plane->pitch + (cell->xpos << 2);
  501. block = plane->pixels[ctx->buf_sel] + offset;
  502. if (cell->mv_ptr) {
  503. mv_y = cell->mv_ptr[0];
  504. mv_x = cell->mv_ptr[1];
  505. if ( mv_x + 4*cell->xpos < 0
  506. || mv_y + 4*cell->ypos < 0
  507. || mv_x + 4*cell->xpos + 4*cell->width > plane->width
  508. || mv_y + 4*cell->ypos + 4*cell->height > plane->height) {
  509. av_log(avctx, AV_LOG_ERROR, "motion vector %d %d outside reference\n", mv_x + 4*cell->xpos, mv_y + 4*cell->ypos);
  510. return AVERROR_INVALIDDATA;
  511. }
  512. }
  513. if (!cell->mv_ptr) {
  514. /* use previous line as reference for INTRA cells */
  515. ref_block = block - plane->pitch;
  516. } else if (mode >= 10) {
  517. /* for mode 10 and 11 INTER first copy the predicted cell into the current one */
  518. /* so we don't need to do data copying for each RLE code later */
  519. copy_cell(ctx, plane, cell);
  520. } else {
  521. /* set the pointer to the reference pixels for modes 0-4 INTER */
  522. mv_y = cell->mv_ptr[0];
  523. mv_x = cell->mv_ptr[1];
  524. offset += mv_y * plane->pitch + mv_x;
  525. ref_block = plane->pixels[ctx->buf_sel ^ 1] + offset;
  526. }
  527. /* select VQ tables as follows: */
  528. /* modes 0 and 3 use only the primary table for all lines in a block */
  529. /* while modes 1 and 4 switch between primary and secondary tables on alternate lines */
  530. if (mode == 1 || mode == 4) {
  531. code = ctx->alt_quant[vq_index];
  532. prim_indx = (code >> 4) + ctx->cb_offset;
  533. second_indx = (code & 0xF) + ctx->cb_offset;
  534. } else {
  535. vq_index += ctx->cb_offset;
  536. prim_indx = second_indx = vq_index;
  537. }
  538. if (prim_indx >= 24 || second_indx >= 24) {
  539. av_log(avctx, AV_LOG_ERROR, "Invalid VQ table indexes! Primary: %d, secondary: %d!\n",
  540. prim_indx, second_indx);
  541. return AVERROR_INVALIDDATA;
  542. }
  543. delta[0] = &vq_tab[second_indx];
  544. delta[1] = &vq_tab[prim_indx];
  545. swap_quads[0] = second_indx >= 16;
  546. swap_quads[1] = prim_indx >= 16;
  547. /* requantize the prediction if VQ index of this cell differs from VQ index */
  548. /* of the predicted cell in order to avoid overflows. */
  549. if (vq_index >= 8 && ref_block) {
  550. for (x = 0; x < cell->width << 2; x++)
  551. ref_block[x] = requant_tab[vq_index & 7][ref_block[x] & 127];
  552. }
  553. error = IV3_NOERR;
  554. switch (mode) {
  555. case 0: /*------------------ MODES 0 & 1 (4x4 block processing) --------------------*/
  556. case 1:
  557. case 3: /*------------------ MODES 3 & 4 (4x8 block processing) --------------------*/
  558. case 4:
  559. if (mode >= 3 && cell->mv_ptr) {
  560. av_log(avctx, AV_LOG_ERROR, "Attempt to apply Mode 3/4 to an INTER cell!\n");
  561. return AVERROR_INVALIDDATA;
  562. }
  563. zoom_fac = mode >= 3;
  564. error = decode_cell_data(cell, block, ref_block, plane->pitch, 0, zoom_fac,
  565. mode, delta, swap_quads, &data_ptr, last_ptr);
  566. break;
  567. case 10: /*-------------------- MODE 10 (8x8 block processing) ---------------------*/
  568. case 11: /*----------------- MODE 11 (4x8 INTER block processing) ------------------*/
  569. if (mode == 10 && !cell->mv_ptr) { /* MODE 10 INTRA processing */
  570. error = decode_cell_data(cell, block, ref_block, plane->pitch, 1, 1,
  571. mode, delta, swap_quads, &data_ptr, last_ptr);
  572. } else { /* mode 10 and 11 INTER processing */
  573. if (mode == 11 && !cell->mv_ptr) {
  574. av_log(avctx, AV_LOG_ERROR, "Attempt to use Mode 11 for an INTRA cell!\n");
  575. return AVERROR_INVALIDDATA;
  576. }
  577. zoom_fac = mode == 10;
  578. error = decode_cell_data(cell, block, ref_block, plane->pitch,
  579. zoom_fac, 1, mode, delta, swap_quads,
  580. &data_ptr, last_ptr);
  581. }
  582. break;
  583. default:
  584. av_log(avctx, AV_LOG_ERROR, "Unsupported coding mode: %d\n", mode);
  585. return AVERROR_INVALIDDATA;
  586. }//switch mode
  587. switch (error) {
  588. case IV3_BAD_RLE:
  589. av_log(avctx, AV_LOG_ERROR, "Mode %d: RLE code %X is not allowed at the current line\n",
  590. mode, data_ptr[-1]);
  591. return AVERROR_INVALIDDATA;
  592. case IV3_BAD_DATA:
  593. av_log(avctx, AV_LOG_ERROR, "Mode %d: invalid VQ data\n", mode);
  594. return AVERROR_INVALIDDATA;
  595. case IV3_BAD_COUNTER:
  596. av_log(avctx, AV_LOG_ERROR, "Mode %d: RLE-FB invalid counter: %d\n", mode, code);
  597. return AVERROR_INVALIDDATA;
  598. case IV3_UNSUPPORTED:
  599. av_log(avctx, AV_LOG_ERROR, "Mode %d: unsupported RLE code: %X\n", mode, data_ptr[-1]);
  600. return AVERROR_INVALIDDATA;
  601. case IV3_OUT_OF_DATA:
  602. av_log(avctx, AV_LOG_ERROR, "Mode %d: attempt to read past end of buffer\n", mode);
  603. return AVERROR_INVALIDDATA;
  604. }
  605. return data_ptr - data_start; /* report number of bytes consumed from the input buffer */
  606. }
  607. /* Binary tree codes. */
  608. enum {
  609. H_SPLIT = 0,
  610. V_SPLIT = 1,
  611. INTRA_NULL = 2,
  612. INTER_DATA = 3
  613. };
  614. #define SPLIT_CELL(size, new_size) (new_size) = ((size) > 2) ? ((((size) + 2) >> 2) << 1) : 1
  615. #define UPDATE_BITPOS(n) \
  616. ctx->skip_bits += (n); \
  617. ctx->need_resync = 1
  618. #define RESYNC_BITSTREAM \
  619. if (ctx->need_resync && !(get_bits_count(&ctx->gb) & 7)) { \
  620. skip_bits_long(&ctx->gb, ctx->skip_bits); \
  621. ctx->skip_bits = 0; \
  622. ctx->need_resync = 0; \
  623. }
  624. #define CHECK_CELL \
  625. if (curr_cell.xpos + curr_cell.width > (plane->width >> 2) || \
  626. curr_cell.ypos + curr_cell.height > (plane->height >> 2)) { \
  627. av_log(avctx, AV_LOG_ERROR, "Invalid cell: x=%d, y=%d, w=%d, h=%d\n", \
  628. curr_cell.xpos, curr_cell.ypos, curr_cell.width, curr_cell.height); \
  629. return AVERROR_INVALIDDATA; \
  630. }
  631. static int parse_bintree(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
  632. Plane *plane, int code, Cell *ref_cell,
  633. const int depth, const int strip_width)
  634. {
  635. Cell curr_cell;
  636. int bytes_used;
  637. int mv_x, mv_y;
  638. if (depth <= 0) {
  639. av_log(avctx, AV_LOG_ERROR, "Stack overflow (corrupted binary tree)!\n");
  640. return AVERROR_INVALIDDATA; // unwind recursion
  641. }
  642. curr_cell = *ref_cell; // clone parent cell
  643. if (code == H_SPLIT) {
  644. SPLIT_CELL(ref_cell->height, curr_cell.height);
  645. ref_cell->ypos += curr_cell.height;
  646. ref_cell->height -= curr_cell.height;
  647. if (ref_cell->height <= 0 || curr_cell.height <= 0)
  648. return AVERROR_INVALIDDATA;
  649. } else if (code == V_SPLIT) {
  650. if (curr_cell.width > strip_width) {
  651. /* split strip */
  652. curr_cell.width = (curr_cell.width <= (strip_width << 1) ? 1 : 2) * strip_width;
  653. } else
  654. SPLIT_CELL(ref_cell->width, curr_cell.width);
  655. ref_cell->xpos += curr_cell.width;
  656. ref_cell->width -= curr_cell.width;
  657. if (ref_cell->width <= 0 || curr_cell.width <= 0)
  658. return AVERROR_INVALIDDATA;
  659. }
  660. while (get_bits_left(&ctx->gb) >= 2) { /* loop until return */
  661. RESYNC_BITSTREAM;
  662. switch (code = get_bits(&ctx->gb, 2)) {
  663. case H_SPLIT:
  664. case V_SPLIT:
  665. if (parse_bintree(ctx, avctx, plane, code, &curr_cell, depth - 1, strip_width))
  666. return AVERROR_INVALIDDATA;
  667. break;
  668. case INTRA_NULL:
  669. if (!curr_cell.tree) { /* MC tree INTRA code */
  670. curr_cell.mv_ptr = 0; /* mark the current strip as INTRA */
  671. curr_cell.tree = 1; /* enter the VQ tree */
  672. } else { /* VQ tree NULL code */
  673. RESYNC_BITSTREAM;
  674. code = get_bits(&ctx->gb, 2);
  675. if (code >= 2) {
  676. av_log(avctx, AV_LOG_ERROR, "Invalid VQ_NULL code: %d\n", code);
  677. return AVERROR_INVALIDDATA;
  678. }
  679. if (code == 1)
  680. av_log(avctx, AV_LOG_ERROR, "SkipCell procedure not implemented yet!\n");
  681. CHECK_CELL
  682. if (!curr_cell.mv_ptr)
  683. return AVERROR_INVALIDDATA;
  684. mv_y = curr_cell.mv_ptr[0];
  685. mv_x = curr_cell.mv_ptr[1];
  686. if ( mv_x + 4*curr_cell.xpos < 0
  687. || mv_y + 4*curr_cell.ypos < 0
  688. || mv_x + 4*curr_cell.xpos + 4*curr_cell.width > plane->width
  689. || mv_y + 4*curr_cell.ypos + 4*curr_cell.height > plane->height) {
  690. av_log(avctx, AV_LOG_ERROR, "motion vector %d %d outside reference\n", mv_x + 4*curr_cell.xpos, mv_y + 4*curr_cell.ypos);
  691. return AVERROR_INVALIDDATA;
  692. }
  693. copy_cell(ctx, plane, &curr_cell);
  694. return 0;
  695. }
  696. break;
  697. case INTER_DATA:
  698. if (!curr_cell.tree) { /* MC tree INTER code */
  699. unsigned mv_idx;
  700. /* get motion vector index and setup the pointer to the mv set */
  701. if (!ctx->need_resync)
  702. ctx->next_cell_data = &ctx->gb.buffer[(get_bits_count(&ctx->gb) + 7) >> 3];
  703. if (ctx->next_cell_data >= ctx->last_byte) {
  704. av_log(avctx, AV_LOG_ERROR, "motion vector out of array\n");
  705. return AVERROR_INVALIDDATA;
  706. }
  707. mv_idx = *(ctx->next_cell_data++);
  708. if (mv_idx >= ctx->num_vectors) {
  709. av_log(avctx, AV_LOG_ERROR, "motion vector index out of range\n");
  710. return AVERROR_INVALIDDATA;
  711. }
  712. curr_cell.mv_ptr = &ctx->mc_vectors[mv_idx << 1];
  713. curr_cell.tree = 1; /* enter the VQ tree */
  714. UPDATE_BITPOS(8);
  715. } else { /* VQ tree DATA code */
  716. if (!ctx->need_resync)
  717. ctx->next_cell_data = &ctx->gb.buffer[(get_bits_count(&ctx->gb) + 7) >> 3];
  718. CHECK_CELL
  719. bytes_used = decode_cell(ctx, avctx, plane, &curr_cell,
  720. ctx->next_cell_data, ctx->last_byte);
  721. if (bytes_used < 0)
  722. return AVERROR_INVALIDDATA;
  723. UPDATE_BITPOS(bytes_used << 3);
  724. ctx->next_cell_data += bytes_used;
  725. return 0;
  726. }
  727. break;
  728. }
  729. }//while
  730. return AVERROR_INVALIDDATA;
  731. }
  732. static int decode_plane(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
  733. Plane *plane, const uint8_t *data, int32_t data_size,
  734. int32_t strip_width)
  735. {
  736. Cell curr_cell;
  737. unsigned num_vectors;
  738. /* each plane data starts with mc_vector_count field, */
  739. /* an optional array of motion vectors followed by the vq data */
  740. num_vectors = bytestream_get_le32(&data); data_size -= 4;
  741. if (num_vectors > 256) {
  742. av_log(ctx->avctx, AV_LOG_ERROR,
  743. "Read invalid number of motion vectors %d\n", num_vectors);
  744. return AVERROR_INVALIDDATA;
  745. }
  746. if (num_vectors * 2 > data_size)
  747. return AVERROR_INVALIDDATA;
  748. ctx->num_vectors = num_vectors;
  749. ctx->mc_vectors = num_vectors ? data : 0;
  750. /* init the bitreader */
  751. init_get_bits(&ctx->gb, &data[num_vectors * 2], (data_size - num_vectors * 2) << 3);
  752. ctx->skip_bits = 0;
  753. ctx->need_resync = 0;
  754. ctx->last_byte = data + data_size;
  755. /* initialize the 1st cell and set its dimensions to whole plane */
  756. curr_cell.xpos = curr_cell.ypos = 0;
  757. curr_cell.width = plane->width >> 2;
  758. curr_cell.height = plane->height >> 2;
  759. curr_cell.tree = 0; // we are in the MC tree now
  760. curr_cell.mv_ptr = 0; // no motion vector = INTRA cell
  761. return parse_bintree(ctx, avctx, plane, INTRA_NULL, &curr_cell, CELL_STACK_MAX, strip_width);
  762. }
  763. #define OS_HDR_ID MKBETAG('F', 'R', 'M', 'H')
  764. static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
  765. const uint8_t *buf, int buf_size)
  766. {
  767. const uint8_t *buf_ptr = buf, *bs_hdr;
  768. uint32_t frame_num, word2, check_sum, data_size;
  769. uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
  770. uint16_t height, width;
  771. int i, j;
  772. /* parse and check the OS header */
  773. frame_num = bytestream_get_le32(&buf_ptr);
  774. word2 = bytestream_get_le32(&buf_ptr);
  775. check_sum = bytestream_get_le32(&buf_ptr);
  776. data_size = bytestream_get_le32(&buf_ptr);
  777. if ((frame_num ^ word2 ^ data_size ^ OS_HDR_ID) != check_sum) {
  778. av_log(avctx, AV_LOG_ERROR, "OS header checksum mismatch!\n");
  779. return AVERROR_INVALIDDATA;
  780. }
  781. /* parse the bitstream header */
  782. bs_hdr = buf_ptr;
  783. buf_size -= 16;
  784. if (bytestream_get_le16(&buf_ptr) != 32) {
  785. av_log(avctx, AV_LOG_ERROR, "Unsupported codec version!\n");
  786. return AVERROR_INVALIDDATA;
  787. }
  788. ctx->frame_num = frame_num;
  789. ctx->frame_flags = bytestream_get_le16(&buf_ptr);
  790. ctx->data_size = (bytestream_get_le32(&buf_ptr) + 7) >> 3;
  791. ctx->cb_offset = *buf_ptr++;
  792. if (ctx->data_size == 16)
  793. return 4;
  794. if (ctx->data_size > buf_size)
  795. ctx->data_size = buf_size;
  796. buf_ptr += 3; // skip reserved byte and checksum
  797. /* check frame dimensions */
  798. height = bytestream_get_le16(&buf_ptr);
  799. width = bytestream_get_le16(&buf_ptr);
  800. if (av_image_check_size(width, height, 0, avctx))
  801. return AVERROR_INVALIDDATA;
  802. if (width != ctx->width || height != ctx->height) {
  803. int res;
  804. av_dlog(avctx, "Frame dimensions changed!\n");
  805. free_frame_buffers(ctx);
  806. if ((res = allocate_frame_buffers(ctx, avctx, width, height)) < 0)
  807. return res;
  808. avcodec_set_dimensions(avctx, width, height);
  809. }
  810. y_offset = bytestream_get_le32(&buf_ptr);
  811. v_offset = bytestream_get_le32(&buf_ptr);
  812. u_offset = bytestream_get_le32(&buf_ptr);
  813. /* unfortunately there is no common order of planes in the buffer */
  814. /* so we use that sorting algo for determining planes data sizes */
  815. starts[0] = y_offset;
  816. starts[1] = v_offset;
  817. starts[2] = u_offset;
  818. for (j = 0; j < 3; j++) {
  819. ends[j] = ctx->data_size;
  820. for (i = 2; i >= 0; i--)
  821. if (starts[i] < ends[j] && starts[i] > starts[j])
  822. ends[j] = starts[i];
  823. }
  824. ctx->y_data_size = ends[0] - starts[0];
  825. ctx->v_data_size = ends[1] - starts[1];
  826. ctx->u_data_size = ends[2] - starts[2];
  827. if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
  828. FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
  829. av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
  830. return AVERROR_INVALIDDATA;
  831. }
  832. ctx->y_data_ptr = bs_hdr + y_offset;
  833. ctx->v_data_ptr = bs_hdr + v_offset;
  834. ctx->u_data_ptr = bs_hdr + u_offset;
  835. ctx->alt_quant = buf_ptr + sizeof(uint32_t);
  836. if (ctx->data_size == 16) {
  837. av_log(avctx, AV_LOG_DEBUG, "Sync frame encountered!\n");
  838. return 16;
  839. }
  840. if (ctx->frame_flags & BS_8BIT_PEL) {
  841. av_log_ask_for_sample(avctx, "8-bit pixel format\n");
  842. return AVERROR_PATCHWELCOME;
  843. }
  844. if (ctx->frame_flags & BS_MV_X_HALF || ctx->frame_flags & BS_MV_Y_HALF) {
  845. av_log_ask_for_sample(avctx, "halfpel motion vectors\n");
  846. return AVERROR_PATCHWELCOME;
  847. }
  848. return 0;
  849. }
  850. /**
  851. * Convert and output the current plane.
  852. * All pixel values will be upsampled by shifting right by one bit.
  853. *
  854. * @param[in] plane pointer to the descriptor of the plane being processed
  855. * @param[in] buf_sel indicates which frame buffer the input data stored in
  856. * @param[out] dst pointer to the buffer receiving converted pixels
  857. * @param[in] dst_pitch pitch for moving to the next y line
  858. */
  859. static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst, int dst_pitch)
  860. {
  861. int x,y;
  862. const uint8_t *src = plane->pixels[buf_sel];
  863. uint32_t pitch = plane->pitch;
  864. for (y = 0; y < plane->height; y++) {
  865. /* convert four pixels at once using SWAR */
  866. for (x = 0; x < plane->width >> 2; x++) {
  867. AV_WN32A(dst, (AV_RN32A(src) & 0x7F7F7F7F) << 1);
  868. src += 4;
  869. dst += 4;
  870. }
  871. for (x <<= 2; x < plane->width; x++)
  872. *dst++ = *src++ << 1;
  873. src += pitch - plane->width;
  874. dst += dst_pitch - plane->width;
  875. }
  876. }
  877. static av_cold int decode_init(AVCodecContext *avctx)
  878. {
  879. Indeo3DecodeContext *ctx = avctx->priv_data;
  880. ctx->avctx = avctx;
  881. avctx->pix_fmt = PIX_FMT_YUV410P;
  882. avcodec_get_frame_defaults(&ctx->frame);
  883. build_requant_tab();
  884. ff_dsputil_init(&ctx->dsp, avctx);
  885. return allocate_frame_buffers(ctx, avctx, avctx->width, avctx->height);
  886. }
  887. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  888. AVPacket *avpkt)
  889. {
  890. Indeo3DecodeContext *ctx = avctx->priv_data;
  891. const uint8_t *buf = avpkt->data;
  892. int buf_size = avpkt->size;
  893. int res;
  894. res = decode_frame_headers(ctx, avctx, buf, buf_size);
  895. if (res < 0)
  896. return res;
  897. /* skip sync(null) frames */
  898. if (res) {
  899. // we have processed 16 bytes but no data was decoded
  900. *data_size = 0;
  901. return buf_size;
  902. }
  903. /* skip droppable INTER frames if requested */
  904. if (ctx->frame_flags & BS_NONREF &&
  905. (avctx->skip_frame >= AVDISCARD_NONREF))
  906. return 0;
  907. /* skip INTER frames if requested */
  908. if (!(ctx->frame_flags & BS_KEYFRAME) && avctx->skip_frame >= AVDISCARD_NONKEY)
  909. return 0;
  910. /* use BS_BUFFER flag for buffer switching */
  911. ctx->buf_sel = (ctx->frame_flags >> BS_BUFFER) & 1;
  912. /* decode luma plane */
  913. if ((res = decode_plane(ctx, avctx, ctx->planes, ctx->y_data_ptr, ctx->y_data_size, 40)))
  914. return res;
  915. /* decode chroma planes */
  916. if ((res = decode_plane(ctx, avctx, &ctx->planes[1], ctx->u_data_ptr, ctx->u_data_size, 10)))
  917. return res;
  918. if ((res = decode_plane(ctx, avctx, &ctx->planes[2], ctx->v_data_ptr, ctx->v_data_size, 10)))
  919. return res;
  920. if (ctx->frame.data[0])
  921. avctx->release_buffer(avctx, &ctx->frame);
  922. ctx->frame.reference = 0;
  923. if ((res = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
  924. av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  925. return res;
  926. }
  927. output_plane(&ctx->planes[0], ctx->buf_sel, ctx->frame.data[0], ctx->frame.linesize[0]);
  928. output_plane(&ctx->planes[1], ctx->buf_sel, ctx->frame.data[1], ctx->frame.linesize[1]);
  929. output_plane(&ctx->planes[2], ctx->buf_sel, ctx->frame.data[2], ctx->frame.linesize[2]);
  930. *data_size = sizeof(AVFrame);
  931. *(AVFrame*)data = ctx->frame;
  932. return buf_size;
  933. }
  934. static av_cold int decode_close(AVCodecContext *avctx)
  935. {
  936. Indeo3DecodeContext *ctx = avctx->priv_data;
  937. free_frame_buffers(avctx->priv_data);
  938. if (ctx->frame.data[0])
  939. avctx->release_buffer(avctx, &ctx->frame);
  940. return 0;
  941. }
  942. AVCodec ff_indeo3_decoder = {
  943. .name = "indeo3",
  944. .type = AVMEDIA_TYPE_VIDEO,
  945. .id = CODEC_ID_INDEO3,
  946. .priv_data_size = sizeof(Indeo3DecodeContext),
  947. .init = decode_init,
  948. .close = decode_close,
  949. .decode = decode_frame,
  950. .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"),
  951. };