You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1118 lines
40KB

  1. /*
  2. * Indeo Video v3 compatible decoder
  3. * Copyright (c) 2009 - 2011 Maxim Poliakovski
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * This is a decoder for Intel Indeo Video v3.
  24. * It is based on vector quantization, run-length coding and motion compensation.
  25. * Known container formats: .avi and .mov
  26. * Known FOURCCs: 'IV31', 'IV32'
  27. *
  28. * @see http://wiki.multimedia.cx/index.php?title=Indeo_3
  29. */
  30. #include "libavutil/imgutils.h"
  31. #include "libavutil/intreadwrite.h"
  32. #include "avcodec.h"
  33. #include "dsputil.h"
  34. #include "bytestream.h"
  35. #include "get_bits.h"
  36. #include "indeo3data.h"
  37. /* RLE opcodes. */
  38. enum {
  39. RLE_ESC_F9 = 249, ///< same as RLE_ESC_FA + do the same with next block
  40. RLE_ESC_FA = 250, ///< INTRA: skip block, INTER: copy data from reference
  41. RLE_ESC_FB = 251, ///< apply null delta to N blocks / skip N blocks
  42. RLE_ESC_FC = 252, ///< same as RLE_ESC_FD + do the same with next block
  43. RLE_ESC_FD = 253, ///< apply null delta to all remaining lines of this block
  44. RLE_ESC_FE = 254, ///< apply null delta to all lines up to the 3rd line
  45. RLE_ESC_FF = 255 ///< apply null delta to all lines up to the 2nd line
  46. };
  47. /* Some constants for parsing frame bitstream flags. */
  48. #define BS_8BIT_PEL (1 << 1) ///< 8bit pixel bitdepth indicator
  49. #define BS_KEYFRAME (1 << 2) ///< intra frame indicator
  50. #define BS_MV_Y_HALF (1 << 4) ///< vertical mv halfpel resolution indicator
  51. #define BS_MV_X_HALF (1 << 5) ///< horizontal mv halfpel resolution indicator
  52. #define BS_NONREF (1 << 8) ///< nonref (discardable) frame indicator
  53. #define BS_BUFFER 9 ///< indicates which of two frame buffers should be used
  54. typedef struct Plane {
  55. uint8_t *buffers[2];
  56. uint8_t *pixels[2]; ///< pointer to the actual pixel data of the buffers above
  57. uint32_t width;
  58. uint32_t height;
  59. uint32_t pitch;
  60. } Plane;
  61. #define CELL_STACK_MAX 20
  62. typedef struct Cell {
  63. int16_t xpos; ///< cell coordinates in 4x4 blocks
  64. int16_t ypos;
  65. int16_t width; ///< cell width in 4x4 blocks
  66. int16_t height; ///< cell height in 4x4 blocks
  67. uint8_t tree; ///< tree id: 0- MC tree, 1 - VQ tree
  68. const int8_t *mv_ptr; ///< ptr to the motion vector if any
  69. } Cell;
  70. typedef struct Indeo3DecodeContext {
  71. AVCodecContext *avctx;
  72. AVFrame frame;
  73. DSPContext dsp;
  74. GetBitContext gb;
  75. int need_resync;
  76. int skip_bits;
  77. const uint8_t *next_cell_data;
  78. const uint8_t *last_byte;
  79. const int8_t *mc_vectors;
  80. unsigned num_vectors; ///< number of motion vectors in mc_vectors
  81. int16_t width, height;
  82. uint32_t frame_num; ///< current frame number (zero-based)
  83. uint32_t data_size; ///< size of the frame data in bytes
  84. uint16_t frame_flags; ///< frame properties
  85. uint8_t cb_offset; ///< needed for selecting VQ tables
  86. uint8_t buf_sel; ///< active frame buffer: 0 - primary, 1 -secondary
  87. const uint8_t *y_data_ptr;
  88. const uint8_t *v_data_ptr;
  89. const uint8_t *u_data_ptr;
  90. int32_t y_data_size;
  91. int32_t v_data_size;
  92. int32_t u_data_size;
  93. const uint8_t *alt_quant; ///< secondary VQ table set for the modes 1 and 4
  94. Plane planes[3];
  95. } Indeo3DecodeContext;
  96. static uint8_t requant_tab[8][128];
  97. /*
  98. * Build the static requantization table.
  99. * This table is used to remap pixel values according to a specific
  100. * quant index and thus avoid overflows while adding deltas.
  101. */
  102. static av_cold void build_requant_tab(void)
  103. {
  104. static int8_t offsets[8] = { 1, 1, 2, -3, -3, 3, 4, 4 };
  105. static int8_t deltas [8] = { 0, 1, 0, 4, 4, 1, 0, 1 };
  106. int i, j, step;
  107. for (i = 0; i < 8; i++) {
  108. step = i + 2;
  109. for (j = 0; j < 128; j++)
  110. requant_tab[i][j] = (j + offsets[i]) / step * step + deltas[i];
  111. }
  112. /* some last elements calculated above will have values >= 128 */
  113. /* pixel values shall never exceed 127 so set them to non-overflowing values */
  114. /* according with the quantization step of the respective section */
  115. requant_tab[0][127] = 126;
  116. requant_tab[1][119] = 118;
  117. requant_tab[1][120] = 118;
  118. requant_tab[2][126] = 124;
  119. requant_tab[2][127] = 124;
  120. requant_tab[6][124] = 120;
  121. requant_tab[6][125] = 120;
  122. requant_tab[6][126] = 120;
  123. requant_tab[6][127] = 120;
  124. /* Patch for compatibility with the Intel's binary decoders */
  125. requant_tab[1][7] = 10;
  126. requant_tab[4][8] = 10;
  127. }
  128. static av_cold int allocate_frame_buffers(Indeo3DecodeContext *ctx,
  129. AVCodecContext *avctx)
  130. {
  131. int p, luma_width, luma_height, chroma_width, chroma_height;
  132. int luma_pitch, chroma_pitch, luma_size, chroma_size;
  133. luma_width = ctx->width;
  134. luma_height = ctx->height;
  135. if (luma_width < 16 || luma_width > 640 ||
  136. luma_height < 16 || luma_height > 480 ||
  137. luma_width & 3 || luma_height & 3) {
  138. av_log(avctx, AV_LOG_ERROR, "Invalid picture dimensions: %d x %d!\n",
  139. luma_width, luma_height);
  140. return AVERROR_INVALIDDATA;
  141. }
  142. chroma_width = FFALIGN(luma_width >> 2, 4);
  143. chroma_height = FFALIGN(luma_height >> 2, 4);
  144. luma_pitch = FFALIGN(luma_width, 16);
  145. chroma_pitch = FFALIGN(chroma_width, 16);
  146. /* Calculate size of the luminance plane. */
  147. /* Add one line more for INTRA prediction. */
  148. luma_size = luma_pitch * (luma_height + 1);
  149. /* Calculate size of a chrominance planes. */
  150. /* Add one line more for INTRA prediction. */
  151. chroma_size = chroma_pitch * (chroma_height + 1);
  152. /* allocate frame buffers */
  153. for (p = 0; p < 3; p++) {
  154. ctx->planes[p].pitch = !p ? luma_pitch : chroma_pitch;
  155. ctx->planes[p].width = !p ? luma_width : chroma_width;
  156. ctx->planes[p].height = !p ? luma_height : chroma_height;
  157. ctx->planes[p].buffers[0] = av_malloc(!p ? luma_size : chroma_size);
  158. ctx->planes[p].buffers[1] = av_malloc(!p ? luma_size : chroma_size);
  159. /* fill the INTRA prediction lines with the middle pixel value = 64 */
  160. memset(ctx->planes[p].buffers[0], 0x40, ctx->planes[p].pitch);
  161. memset(ctx->planes[p].buffers[1], 0x40, ctx->planes[p].pitch);
  162. /* set buffer pointers = buf_ptr + pitch and thus skip the INTRA prediction line */
  163. ctx->planes[p].pixels[0] = ctx->planes[p].buffers[0] + ctx->planes[p].pitch;
  164. ctx->planes[p].pixels[1] = ctx->planes[p].buffers[1] + ctx->planes[p].pitch;
  165. memset(ctx->planes[p].pixels[0], 0, ctx->planes[p].pitch * ctx->planes[p].height);
  166. memset(ctx->planes[p].pixels[1], 0, ctx->planes[p].pitch * ctx->planes[p].height);
  167. }
  168. return 0;
  169. }
  170. static av_cold void free_frame_buffers(Indeo3DecodeContext *ctx)
  171. {
  172. int p;
  173. for (p = 0; p < 3; p++) {
  174. av_freep(&ctx->planes[p].buffers[0]);
  175. av_freep(&ctx->planes[p].buffers[1]);
  176. ctx->planes[p].pixels[0] = ctx->planes[p].pixels[1] = 0;
  177. }
  178. }
  179. /**
  180. * Copy pixels of the cell(x + mv_x, y + mv_y) from the previous frame into
  181. * the cell(x, y) in the current frame.
  182. *
  183. * @param ctx pointer to the decoder context
  184. * @param plane pointer to the plane descriptor
  185. * @param cell pointer to the cell descriptor
  186. */
  187. static void copy_cell(Indeo3DecodeContext *ctx, Plane *plane, Cell *cell)
  188. {
  189. int h, w, mv_x, mv_y, offset, offset_dst;
  190. uint8_t *src, *dst;
  191. /* setup output and reference pointers */
  192. offset_dst = (cell->ypos << 2) * plane->pitch + (cell->xpos << 2);
  193. dst = plane->pixels[ctx->buf_sel] + offset_dst;
  194. mv_y = cell->mv_ptr[0];
  195. mv_x = cell->mv_ptr[1];
  196. offset = offset_dst + mv_y * plane->pitch + mv_x;
  197. src = plane->pixels[ctx->buf_sel ^ 1] + offset;
  198. h = cell->height << 2;
  199. for (w = cell->width; w > 0;) {
  200. /* copy using 16xH blocks */
  201. if (!((cell->xpos << 2) & 15) && w >= 4) {
  202. for (; w >= 4; src += 16, dst += 16, w -= 4)
  203. ctx->dsp.put_no_rnd_pixels_tab[0][0](dst, src, plane->pitch, h);
  204. }
  205. /* copy using 8xH blocks */
  206. if (!((cell->xpos << 2) & 7) && w >= 2) {
  207. ctx->dsp.put_no_rnd_pixels_tab[1][0](dst, src, plane->pitch, h);
  208. w -= 2;
  209. src += 8;
  210. dst += 8;
  211. }
  212. if (w >= 1) {
  213. copy_block4(dst, src, plane->pitch, plane->pitch, h);
  214. w--;
  215. src += 4;
  216. dst += 4;
  217. }
  218. }
  219. }
  220. /* Average 4/8 pixels at once without rounding using SWAR */
  221. #define AVG_32(dst, src, ref) \
  222. AV_WN32A(dst, ((AV_RN32A(src) + AV_RN32A(ref)) >> 1) & 0x7F7F7F7FUL)
  223. #define AVG_64(dst, src, ref) \
  224. AV_WN64A(dst, ((AV_RN64A(src) + AV_RN64A(ref)) >> 1) & 0x7F7F7F7F7F7F7F7FULL)
  225. /*
  226. * Replicate each even pixel as follows:
  227. * ABCDEFGH -> AACCEEGG
  228. */
  229. static inline uint64_t replicate64(uint64_t a) {
  230. #if HAVE_BIGENDIAN
  231. a &= 0xFF00FF00FF00FF00ULL;
  232. a |= a >> 8;
  233. #else
  234. a &= 0x00FF00FF00FF00FFULL;
  235. a |= a << 8;
  236. #endif
  237. return a;
  238. }
  239. static inline uint32_t replicate32(uint32_t a) {
  240. #if HAVE_BIGENDIAN
  241. a &= 0xFF00FF00UL;
  242. a |= a >> 8;
  243. #else
  244. a &= 0x00FF00FFUL;
  245. a |= a << 8;
  246. #endif
  247. return a;
  248. }
  249. /* Fill n lines with 64bit pixel value pix */
  250. static inline void fill_64(uint8_t *dst, const uint64_t pix, int32_t n,
  251. int32_t row_offset)
  252. {
  253. for (; n > 0; dst += row_offset, n--)
  254. AV_WN64A(dst, pix);
  255. }
  256. /* Error codes for cell decoding. */
  257. enum {
  258. IV3_NOERR = 0,
  259. IV3_BAD_RLE = 1,
  260. IV3_BAD_DATA = 2,
  261. IV3_BAD_COUNTER = 3,
  262. IV3_UNSUPPORTED = 4,
  263. IV3_OUT_OF_DATA = 5
  264. };
  265. #define BUFFER_PRECHECK \
  266. if (*data_ptr >= last_ptr) \
  267. return IV3_OUT_OF_DATA; \
  268. #define RLE_BLOCK_COPY \
  269. if (cell->mv_ptr || !skip_flag) \
  270. copy_block4(dst, ref, row_offset, row_offset, 4 << v_zoom)
  271. #define RLE_BLOCK_COPY_8 \
  272. pix64 = AV_RN64A(ref);\
  273. if (is_first_row) {/* special prediction case: top line of a cell */\
  274. pix64 = replicate64(pix64);\
  275. fill_64(dst + row_offset, pix64, 7, row_offset);\
  276. AVG_64(dst, ref, dst + row_offset);\
  277. } else \
  278. fill_64(dst, pix64, 8, row_offset)
  279. #define RLE_LINES_COPY \
  280. copy_block4(dst, ref, row_offset, row_offset, num_lines << v_zoom)
  281. #define RLE_LINES_COPY_M10 \
  282. pix64 = AV_RN64A(ref);\
  283. if (is_top_of_cell) {\
  284. pix64 = replicate64(pix64);\
  285. fill_64(dst + row_offset, pix64, (num_lines << 1) - 1, row_offset);\
  286. AVG_64(dst, ref, dst + row_offset);\
  287. } else \
  288. fill_64(dst, pix64, num_lines << 1, row_offset)
  289. #define APPLY_DELTA_4 \
  290. AV_WN16A(dst + line_offset ,\
  291. (AV_RN16A(ref ) + delta_tab->deltas[dyad1]) & 0x7F7F);\
  292. AV_WN16A(dst + line_offset + 2,\
  293. (AV_RN16A(ref + 2) + delta_tab->deltas[dyad2]) & 0x7F7F);\
  294. if (mode >= 3) {\
  295. if (is_top_of_cell && !cell->ypos) {\
  296. AV_COPY32(dst, dst + row_offset);\
  297. } else {\
  298. AVG_32(dst, ref, dst + row_offset);\
  299. }\
  300. }
  301. #define APPLY_DELTA_8 \
  302. /* apply two 32-bit VQ deltas to next even line */\
  303. if (is_top_of_cell) { \
  304. AV_WN32A(dst + row_offset , \
  305. (replicate32(AV_RN32A(ref )) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
  306. AV_WN32A(dst + row_offset + 4, \
  307. (replicate32(AV_RN32A(ref + 4)) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
  308. } else { \
  309. AV_WN32A(dst + row_offset , \
  310. (AV_RN32A(ref ) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
  311. AV_WN32A(dst + row_offset + 4, \
  312. (AV_RN32A(ref + 4) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
  313. } \
  314. /* odd lines are not coded but rather interpolated/replicated */\
  315. /* first line of the cell on the top of image? - replicate */\
  316. /* otherwise - interpolate */\
  317. if (is_top_of_cell && !cell->ypos) {\
  318. AV_COPY64(dst, dst + row_offset);\
  319. } else \
  320. AVG_64(dst, ref, dst + row_offset);
  321. #define APPLY_DELTA_1011_INTER \
  322. if (mode == 10) { \
  323. AV_WN32A(dst , \
  324. (AV_RN32A(dst ) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
  325. AV_WN32A(dst + 4 , \
  326. (AV_RN32A(dst + 4 ) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
  327. AV_WN32A(dst + row_offset , \
  328. (AV_RN32A(dst + row_offset ) + delta_tab->deltas_m10[dyad1]) & 0x7F7F7F7F);\
  329. AV_WN32A(dst + row_offset + 4, \
  330. (AV_RN32A(dst + row_offset + 4) + delta_tab->deltas_m10[dyad2]) & 0x7F7F7F7F);\
  331. } else { \
  332. AV_WN16A(dst , \
  333. (AV_RN16A(dst ) + delta_tab->deltas[dyad1]) & 0x7F7F);\
  334. AV_WN16A(dst + 2 , \
  335. (AV_RN16A(dst + 2 ) + delta_tab->deltas[dyad2]) & 0x7F7F);\
  336. AV_WN16A(dst + row_offset , \
  337. (AV_RN16A(dst + row_offset ) + delta_tab->deltas[dyad1]) & 0x7F7F);\
  338. AV_WN16A(dst + row_offset + 2, \
  339. (AV_RN16A(dst + row_offset + 2) + delta_tab->deltas[dyad2]) & 0x7F7F);\
  340. }
  341. static int decode_cell_data(Cell *cell, uint8_t *block, uint8_t *ref_block,
  342. int pitch, int h_zoom, int v_zoom, int mode,
  343. const vqEntry *delta[2], int swap_quads[2],
  344. const uint8_t **data_ptr, const uint8_t *last_ptr)
  345. {
  346. int x, y, line, num_lines;
  347. int rle_blocks = 0;
  348. uint8_t code, *dst, *ref;
  349. const vqEntry *delta_tab;
  350. unsigned int dyad1, dyad2;
  351. uint64_t pix64;
  352. int skip_flag = 0, is_top_of_cell, is_first_row = 1;
  353. int row_offset, blk_row_offset, line_offset;
  354. row_offset = pitch;
  355. blk_row_offset = (row_offset << (2 + v_zoom)) - (cell->width << 2);
  356. line_offset = v_zoom ? row_offset : 0;
  357. if (cell->height & v_zoom || cell->width & h_zoom)
  358. return IV3_BAD_DATA;
  359. for (y = 0; y < cell->height; is_first_row = 0, y += 1 + v_zoom) {
  360. for (x = 0; x < cell->width; x += 1 + h_zoom) {
  361. ref = ref_block;
  362. dst = block;
  363. if (rle_blocks > 0) {
  364. if (mode <= 4) {
  365. RLE_BLOCK_COPY;
  366. } else if (mode == 10 && !cell->mv_ptr) {
  367. RLE_BLOCK_COPY_8;
  368. }
  369. rle_blocks--;
  370. } else {
  371. for (line = 0; line < 4;) {
  372. num_lines = 1;
  373. is_top_of_cell = is_first_row && !line;
  374. /* select primary VQ table for odd, secondary for even lines */
  375. if (mode <= 4)
  376. delta_tab = delta[line & 1];
  377. else
  378. delta_tab = delta[1];
  379. BUFFER_PRECHECK;
  380. code = bytestream_get_byte(data_ptr);
  381. if (code < 248) {
  382. if (code < delta_tab->num_dyads) {
  383. BUFFER_PRECHECK;
  384. dyad1 = bytestream_get_byte(data_ptr);
  385. dyad2 = code;
  386. if (dyad1 >= delta_tab->num_dyads || dyad1 >= 248)
  387. return IV3_BAD_DATA;
  388. } else {
  389. /* process QUADS */
  390. code -= delta_tab->num_dyads;
  391. dyad1 = code / delta_tab->quad_exp;
  392. dyad2 = code % delta_tab->quad_exp;
  393. if (swap_quads[line & 1])
  394. FFSWAP(unsigned int, dyad1, dyad2);
  395. }
  396. if (mode <= 4) {
  397. APPLY_DELTA_4;
  398. } else if (mode == 10 && !cell->mv_ptr) {
  399. APPLY_DELTA_8;
  400. } else {
  401. APPLY_DELTA_1011_INTER;
  402. }
  403. } else {
  404. /* process RLE codes */
  405. switch (code) {
  406. case RLE_ESC_FC:
  407. skip_flag = 0;
  408. rle_blocks = 1;
  409. code = 253;
  410. /* FALLTHROUGH */
  411. case RLE_ESC_FF:
  412. case RLE_ESC_FE:
  413. case RLE_ESC_FD:
  414. num_lines = 257 - code - line;
  415. if (num_lines <= 0)
  416. return IV3_BAD_RLE;
  417. if (mode <= 4) {
  418. RLE_LINES_COPY;
  419. } else if (mode == 10 && !cell->mv_ptr) {
  420. RLE_LINES_COPY_M10;
  421. }
  422. break;
  423. case RLE_ESC_FB:
  424. BUFFER_PRECHECK;
  425. code = bytestream_get_byte(data_ptr);
  426. rle_blocks = (code & 0x1F) - 1; /* set block counter */
  427. if (code >= 64 || rle_blocks < 0)
  428. return IV3_BAD_COUNTER;
  429. skip_flag = code & 0x20;
  430. num_lines = 4 - line; /* enforce next block processing */
  431. if (mode >= 10 || (cell->mv_ptr || !skip_flag)) {
  432. if (mode <= 4) {
  433. RLE_LINES_COPY;
  434. } else if (mode == 10 && !cell->mv_ptr) {
  435. RLE_LINES_COPY_M10;
  436. }
  437. }
  438. break;
  439. case RLE_ESC_F9:
  440. skip_flag = 1;
  441. rle_blocks = 1;
  442. /* FALLTHROUGH */
  443. case RLE_ESC_FA:
  444. if (line)
  445. return IV3_BAD_RLE;
  446. num_lines = 4; /* enforce next block processing */
  447. if (cell->mv_ptr) {
  448. if (mode <= 4) {
  449. RLE_LINES_COPY;
  450. } else if (mode == 10 && !cell->mv_ptr) {
  451. RLE_LINES_COPY_M10;
  452. }
  453. }
  454. break;
  455. default:
  456. return IV3_UNSUPPORTED;
  457. }
  458. }
  459. line += num_lines;
  460. ref += row_offset * (num_lines << v_zoom);
  461. dst += row_offset * (num_lines << v_zoom);
  462. }
  463. }
  464. /* move to next horizontal block */
  465. block += 4 << h_zoom;
  466. ref_block += 4 << h_zoom;
  467. }
  468. /* move to next line of blocks */
  469. ref_block += blk_row_offset;
  470. block += blk_row_offset;
  471. }
  472. return IV3_NOERR;
  473. }
  474. /**
  475. * Decode a vector-quantized cell.
  476. * It consists of several routines, each of which handles one or more "modes"
  477. * with which a cell can be encoded.
  478. *
  479. * @param ctx pointer to the decoder context
  480. * @param avctx ptr to the AVCodecContext
  481. * @param plane pointer to the plane descriptor
  482. * @param cell pointer to the cell descriptor
  483. * @param data_ptr pointer to the compressed data
  484. * @param last_ptr pointer to the last byte to catch reads past end of buffer
  485. * @return number of consumed bytes or negative number in case of error
  486. */
  487. static int decode_cell(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
  488. Plane *plane, Cell *cell, const uint8_t *data_ptr,
  489. const uint8_t *last_ptr)
  490. {
  491. int x, mv_x, mv_y, mode, vq_index, prim_indx, second_indx;
  492. int zoom_fac;
  493. int offset, error = 0, swap_quads[2];
  494. uint8_t code, *block, *ref_block = 0;
  495. const vqEntry *delta[2];
  496. const uint8_t *data_start = data_ptr;
  497. /* get coding mode and VQ table index from the VQ descriptor byte */
  498. code = *data_ptr++;
  499. mode = code >> 4;
  500. vq_index = code & 0xF;
  501. /* setup output and reference pointers */
  502. offset = (cell->ypos << 2) * plane->pitch + (cell->xpos << 2);
  503. block = plane->pixels[ctx->buf_sel] + offset;
  504. if (!cell->mv_ptr) {
  505. /* use previous line as reference for INTRA cells */
  506. ref_block = block - plane->pitch;
  507. } else if (mode >= 10) {
  508. /* for mode 10 and 11 INTER first copy the predicted cell into the current one */
  509. /* so we don't need to do data copying for each RLE code later */
  510. copy_cell(ctx, plane, cell);
  511. } else {
  512. /* set the pointer to the reference pixels for modes 0-4 INTER */
  513. mv_y = cell->mv_ptr[0];
  514. mv_x = cell->mv_ptr[1];
  515. offset += mv_y * plane->pitch + mv_x;
  516. ref_block = plane->pixels[ctx->buf_sel ^ 1] + offset;
  517. }
  518. /* select VQ tables as follows: */
  519. /* modes 0 and 3 use only the primary table for all lines in a block */
  520. /* while modes 1 and 4 switch between primary and secondary tables on alternate lines */
  521. if (mode == 1 || mode == 4) {
  522. code = ctx->alt_quant[vq_index];
  523. prim_indx = (code >> 4) + ctx->cb_offset;
  524. second_indx = (code & 0xF) + ctx->cb_offset;
  525. } else {
  526. vq_index += ctx->cb_offset;
  527. prim_indx = second_indx = vq_index;
  528. }
  529. if (prim_indx >= 24 || second_indx >= 24) {
  530. av_log(avctx, AV_LOG_ERROR, "Invalid VQ table indexes! Primary: %d, secondary: %d!\n",
  531. prim_indx, second_indx);
  532. return AVERROR_INVALIDDATA;
  533. }
  534. delta[0] = &vq_tab[second_indx];
  535. delta[1] = &vq_tab[prim_indx];
  536. swap_quads[0] = second_indx >= 16;
  537. swap_quads[1] = prim_indx >= 16;
  538. /* requantize the prediction if VQ index of this cell differs from VQ index */
  539. /* of the predicted cell in order to avoid overflows. */
  540. if (vq_index >= 8 && ref_block) {
  541. for (x = 0; x < cell->width << 2; x++)
  542. ref_block[x] = requant_tab[vq_index & 7][ref_block[x]];
  543. }
  544. error = IV3_NOERR;
  545. switch (mode) {
  546. case 0: /*------------------ MODES 0 & 1 (4x4 block processing) --------------------*/
  547. case 1:
  548. case 3: /*------------------ MODES 3 & 4 (4x8 block processing) --------------------*/
  549. case 4:
  550. if (mode >= 3 && cell->mv_ptr) {
  551. av_log(avctx, AV_LOG_ERROR, "Attempt to apply Mode 3/4 to an INTER cell!\n");
  552. return AVERROR_INVALIDDATA;
  553. }
  554. zoom_fac = mode >= 3;
  555. error = decode_cell_data(cell, block, ref_block, plane->pitch, 0, zoom_fac,
  556. mode, delta, swap_quads, &data_ptr, last_ptr);
  557. break;
  558. case 10: /*-------------------- MODE 10 (8x8 block processing) ---------------------*/
  559. case 11: /*----------------- MODE 11 (4x8 INTER block processing) ------------------*/
  560. if (mode == 10 && !cell->mv_ptr) { /* MODE 10 INTRA processing */
  561. error = decode_cell_data(cell, block, ref_block, plane->pitch, 1, 1,
  562. mode, delta, swap_quads, &data_ptr, last_ptr);
  563. } else { /* mode 10 and 11 INTER processing */
  564. if (mode == 11 && !cell->mv_ptr) {
  565. av_log(avctx, AV_LOG_ERROR, "Attempt to use Mode 11 for an INTRA cell!\n");
  566. return AVERROR_INVALIDDATA;
  567. }
  568. zoom_fac = mode == 10;
  569. error = decode_cell_data(cell, block, ref_block, plane->pitch,
  570. zoom_fac, 1, mode, delta, swap_quads,
  571. &data_ptr, last_ptr);
  572. }
  573. break;
  574. default:
  575. av_log(avctx, AV_LOG_ERROR, "Unsupported coding mode: %d\n", mode);
  576. return AVERROR_INVALIDDATA;
  577. }//switch mode
  578. switch (error) {
  579. case IV3_BAD_RLE:
  580. av_log(avctx, AV_LOG_ERROR, "Mode %d: RLE code %X is not allowed at the current line\n",
  581. mode, data_ptr[-1]);
  582. return AVERROR_INVALIDDATA;
  583. case IV3_BAD_DATA:
  584. av_log(avctx, AV_LOG_ERROR, "Mode %d: invalid VQ data\n", mode);
  585. return AVERROR_INVALIDDATA;
  586. case IV3_BAD_COUNTER:
  587. av_log(avctx, AV_LOG_ERROR, "Mode %d: RLE-FB invalid counter: %d\n", mode, code);
  588. return AVERROR_INVALIDDATA;
  589. case IV3_UNSUPPORTED:
  590. av_log(avctx, AV_LOG_ERROR, "Mode %d: unsupported RLE code: %X\n", mode, data_ptr[-1]);
  591. return AVERROR_INVALIDDATA;
  592. case IV3_OUT_OF_DATA:
  593. av_log(avctx, AV_LOG_ERROR, "Mode %d: attempt to read past end of buffer\n", mode);
  594. return AVERROR_INVALIDDATA;
  595. }
  596. return data_ptr - data_start; /* report number of bytes consumed from the input buffer */
  597. }
  598. /* Binary tree codes. */
  599. enum {
  600. H_SPLIT = 0,
  601. V_SPLIT = 1,
  602. INTRA_NULL = 2,
  603. INTER_DATA = 3
  604. };
  605. #define SPLIT_CELL(size, new_size) (new_size) = ((size) > 2) ? ((((size) + 2) >> 2) << 1) : 1
  606. #define UPDATE_BITPOS(n) \
  607. ctx->skip_bits += (n); \
  608. ctx->need_resync = 1
  609. #define RESYNC_BITSTREAM \
  610. if (ctx->need_resync && !(get_bits_count(&ctx->gb) & 7)) { \
  611. skip_bits_long(&ctx->gb, ctx->skip_bits); \
  612. ctx->skip_bits = 0; \
  613. ctx->need_resync = 0; \
  614. }
  615. #define CHECK_CELL \
  616. if (curr_cell.xpos + curr_cell.width > (plane->width >> 2) || \
  617. curr_cell.ypos + curr_cell.height > (plane->height >> 2)) { \
  618. av_log(avctx, AV_LOG_ERROR, "Invalid cell: x=%d, y=%d, w=%d, h=%d\n", \
  619. curr_cell.xpos, curr_cell.ypos, curr_cell.width, curr_cell.height); \
  620. return AVERROR_INVALIDDATA; \
  621. }
  622. static int parse_bintree(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
  623. Plane *plane, int code, Cell *ref_cell,
  624. const int depth, const int strip_width)
  625. {
  626. Cell curr_cell;
  627. int bytes_used;
  628. if (depth <= 0) {
  629. av_log(avctx, AV_LOG_ERROR, "Stack overflow (corrupted binary tree)!\n");
  630. return AVERROR_INVALIDDATA; // unwind recursion
  631. }
  632. curr_cell = *ref_cell; // clone parent cell
  633. if (code == H_SPLIT) {
  634. SPLIT_CELL(ref_cell->height, curr_cell.height);
  635. ref_cell->ypos += curr_cell.height;
  636. ref_cell->height -= curr_cell.height;
  637. if (ref_cell->height <= 0 || curr_cell.height <= 0)
  638. return AVERROR_INVALIDDATA;
  639. } else if (code == V_SPLIT) {
  640. if (curr_cell.width > strip_width) {
  641. /* split strip */
  642. curr_cell.width = (curr_cell.width <= (strip_width << 1) ? 1 : 2) * strip_width;
  643. } else
  644. SPLIT_CELL(ref_cell->width, curr_cell.width);
  645. ref_cell->xpos += curr_cell.width;
  646. ref_cell->width -= curr_cell.width;
  647. if (ref_cell->width <= 0 || curr_cell.width <= 0)
  648. return AVERROR_INVALIDDATA;
  649. }
  650. while (1) { /* loop until return */
  651. RESYNC_BITSTREAM;
  652. switch (code = get_bits(&ctx->gb, 2)) {
  653. case H_SPLIT:
  654. case V_SPLIT:
  655. if (parse_bintree(ctx, avctx, plane, code, &curr_cell, depth - 1, strip_width))
  656. return AVERROR_INVALIDDATA;
  657. break;
  658. case INTRA_NULL:
  659. if (!curr_cell.tree) { /* MC tree INTRA code */
  660. curr_cell.mv_ptr = 0; /* mark the current strip as INTRA */
  661. curr_cell.tree = 1; /* enter the VQ tree */
  662. } else { /* VQ tree NULL code */
  663. RESYNC_BITSTREAM;
  664. code = get_bits(&ctx->gb, 2);
  665. if (code >= 2) {
  666. av_log(avctx, AV_LOG_ERROR, "Invalid VQ_NULL code: %d\n", code);
  667. return AVERROR_INVALIDDATA;
  668. }
  669. if (code == 1)
  670. av_log(avctx, AV_LOG_ERROR, "SkipCell procedure not implemented yet!\n");
  671. CHECK_CELL
  672. if (!curr_cell.mv_ptr)
  673. return AVERROR_INVALIDDATA;
  674. copy_cell(ctx, plane, &curr_cell);
  675. return 0;
  676. }
  677. break;
  678. case INTER_DATA:
  679. if (!curr_cell.tree) { /* MC tree INTER code */
  680. unsigned mv_idx;
  681. /* get motion vector index and setup the pointer to the mv set */
  682. if (!ctx->need_resync)
  683. ctx->next_cell_data = &ctx->gb.buffer[(get_bits_count(&ctx->gb) + 7) >> 3];
  684. mv_idx = *(ctx->next_cell_data++);
  685. if (mv_idx >= ctx->num_vectors) {
  686. av_log(avctx, AV_LOG_ERROR, "motion vector index out of range\n");
  687. return AVERROR_INVALIDDATA;
  688. }
  689. curr_cell.mv_ptr = &ctx->mc_vectors[mv_idx << 1];
  690. curr_cell.tree = 1; /* enter the VQ tree */
  691. UPDATE_BITPOS(8);
  692. } else { /* VQ tree DATA code */
  693. if (!ctx->need_resync)
  694. ctx->next_cell_data = &ctx->gb.buffer[(get_bits_count(&ctx->gb) + 7) >> 3];
  695. CHECK_CELL
  696. bytes_used = decode_cell(ctx, avctx, plane, &curr_cell,
  697. ctx->next_cell_data, ctx->last_byte);
  698. if (bytes_used < 0)
  699. return AVERROR_INVALIDDATA;
  700. UPDATE_BITPOS(bytes_used << 3);
  701. ctx->next_cell_data += bytes_used;
  702. return 0;
  703. }
  704. break;
  705. }
  706. }//while
  707. return 0;
  708. }
  709. static int decode_plane(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
  710. Plane *plane, const uint8_t *data, int32_t data_size,
  711. int32_t strip_width)
  712. {
  713. Cell curr_cell;
  714. unsigned num_vectors;
  715. /* each plane data starts with mc_vector_count field, */
  716. /* an optional array of motion vectors followed by the vq data */
  717. num_vectors = bytestream_get_le32(&data);
  718. if (num_vectors > 256) {
  719. av_log(ctx->avctx, AV_LOG_ERROR,
  720. "Read invalid number of motion vectors %d\n", num_vectors);
  721. return AVERROR_INVALIDDATA;
  722. }
  723. if (num_vectors * 2 >= data_size)
  724. return AVERROR_INVALIDDATA;
  725. ctx->num_vectors = num_vectors;
  726. ctx->mc_vectors = num_vectors ? data : 0;
  727. /* init the bitreader */
  728. init_get_bits(&ctx->gb, &data[num_vectors * 2], (data_size - num_vectors * 2) << 3);
  729. ctx->skip_bits = 0;
  730. ctx->need_resync = 0;
  731. ctx->last_byte = data + data_size - 1;
  732. /* initialize the 1st cell and set its dimensions to whole plane */
  733. curr_cell.xpos = curr_cell.ypos = 0;
  734. curr_cell.width = plane->width >> 2;
  735. curr_cell.height = plane->height >> 2;
  736. curr_cell.tree = 0; // we are in the MC tree now
  737. curr_cell.mv_ptr = 0; // no motion vector = INTRA cell
  738. return parse_bintree(ctx, avctx, plane, INTRA_NULL, &curr_cell, CELL_STACK_MAX, strip_width);
  739. }
  740. #define OS_HDR_ID MKBETAG('F', 'R', 'M', 'H')
  741. static int decode_frame_headers(Indeo3DecodeContext *ctx, AVCodecContext *avctx,
  742. const uint8_t *buf, int buf_size)
  743. {
  744. const uint8_t *buf_ptr = buf, *bs_hdr;
  745. uint32_t frame_num, word2, check_sum, data_size;
  746. uint32_t y_offset, u_offset, v_offset, starts[3], ends[3];
  747. uint16_t height, width;
  748. int i, j;
  749. /* parse and check the OS header */
  750. frame_num = bytestream_get_le32(&buf_ptr);
  751. word2 = bytestream_get_le32(&buf_ptr);
  752. check_sum = bytestream_get_le32(&buf_ptr);
  753. data_size = bytestream_get_le32(&buf_ptr);
  754. if ((frame_num ^ word2 ^ data_size ^ OS_HDR_ID) != check_sum) {
  755. av_log(avctx, AV_LOG_ERROR, "OS header checksum mismatch!\n");
  756. return AVERROR_INVALIDDATA;
  757. }
  758. /* parse the bitstream header */
  759. bs_hdr = buf_ptr;
  760. if (bytestream_get_le16(&buf_ptr) != 32) {
  761. av_log(avctx, AV_LOG_ERROR, "Unsupported codec version!\n");
  762. return AVERROR_INVALIDDATA;
  763. }
  764. ctx->frame_num = frame_num;
  765. ctx->frame_flags = bytestream_get_le16(&buf_ptr);
  766. ctx->data_size = (bytestream_get_le32(&buf_ptr) + 7) >> 3;
  767. ctx->cb_offset = *buf_ptr++;
  768. if (ctx->data_size == 16)
  769. return 4;
  770. if (ctx->data_size > buf_size)
  771. ctx->data_size = buf_size;
  772. buf_ptr += 3; // skip reserved byte and checksum
  773. /* check frame dimensions */
  774. height = bytestream_get_le16(&buf_ptr);
  775. width = bytestream_get_le16(&buf_ptr);
  776. if (av_image_check_size(width, height, 0, avctx))
  777. return AVERROR_INVALIDDATA;
  778. if (width != ctx->width || height != ctx->height) {
  779. int res;
  780. av_dlog(avctx, "Frame dimensions changed!\n");
  781. if (width < 16 || width > 640 ||
  782. height < 16 || height > 480 ||
  783. width & 3 || height & 3) {
  784. av_log(avctx, AV_LOG_ERROR,
  785. "Invalid picture dimensions: %d x %d!\n", width, height);
  786. return AVERROR_INVALIDDATA;
  787. }
  788. ctx->width = width;
  789. ctx->height = height;
  790. free_frame_buffers(ctx);
  791. if ((res = allocate_frame_buffers(ctx, avctx)) < 0)
  792. return res;
  793. avcodec_set_dimensions(avctx, width, height);
  794. }
  795. y_offset = bytestream_get_le32(&buf_ptr);
  796. v_offset = bytestream_get_le32(&buf_ptr);
  797. u_offset = bytestream_get_le32(&buf_ptr);
  798. /* unfortunately there is no common order of planes in the buffer */
  799. /* so we use that sorting algo for determining planes data sizes */
  800. starts[0] = y_offset;
  801. starts[1] = v_offset;
  802. starts[2] = u_offset;
  803. for (j = 0; j < 3; j++) {
  804. ends[j] = ctx->data_size;
  805. for (i = 2; i >= 0; i--)
  806. if (starts[i] < ends[j] && starts[i] > starts[j])
  807. ends[j] = starts[i];
  808. }
  809. ctx->y_data_size = ends[0] - starts[0];
  810. ctx->v_data_size = ends[1] - starts[1];
  811. ctx->u_data_size = ends[2] - starts[2];
  812. if (FFMAX3(y_offset, v_offset, u_offset) >= ctx->data_size - 16 ||
  813. FFMIN3(ctx->y_data_size, ctx->v_data_size, ctx->u_data_size) <= 0) {
  814. av_log(avctx, AV_LOG_ERROR, "One of the y/u/v offsets is invalid\n");
  815. return AVERROR_INVALIDDATA;
  816. }
  817. ctx->y_data_ptr = bs_hdr + y_offset;
  818. ctx->v_data_ptr = bs_hdr + v_offset;
  819. ctx->u_data_ptr = bs_hdr + u_offset;
  820. ctx->alt_quant = buf_ptr + sizeof(uint32_t);
  821. if (ctx->data_size == 16) {
  822. av_log(avctx, AV_LOG_DEBUG, "Sync frame encountered!\n");
  823. return 16;
  824. }
  825. if (ctx->frame_flags & BS_8BIT_PEL) {
  826. av_log_ask_for_sample(avctx, "8-bit pixel format\n");
  827. return AVERROR_PATCHWELCOME;
  828. }
  829. if (ctx->frame_flags & BS_MV_X_HALF || ctx->frame_flags & BS_MV_Y_HALF) {
  830. av_log_ask_for_sample(avctx, "halfpel motion vectors\n");
  831. return AVERROR_PATCHWELCOME;
  832. }
  833. return 0;
  834. }
  835. /**
  836. * Convert and output the current plane.
  837. * All pixel values will be upsampled by shifting right by one bit.
  838. *
  839. * @param[in] plane pointer to the descriptor of the plane being processed
  840. * @param[in] buf_sel indicates which frame buffer the input data stored in
  841. * @param[out] dst pointer to the buffer receiving converted pixels
  842. * @param[in] dst_pitch pitch for moving to the next y line
  843. * @param[in] dst_height output plane height
  844. */
  845. static void output_plane(const Plane *plane, int buf_sel, uint8_t *dst,
  846. int dst_pitch, int dst_height)
  847. {
  848. int x,y;
  849. const uint8_t *src = plane->pixels[buf_sel];
  850. uint32_t pitch = plane->pitch;
  851. dst_height = FFMIN(dst_height, plane->height);
  852. for (y = 0; y < dst_height; y++) {
  853. /* convert four pixels at once using SWAR */
  854. for (x = 0; x < plane->width >> 2; x++) {
  855. AV_WN32A(dst, (AV_RN32A(src) & 0x7F7F7F7F) << 1);
  856. src += 4;
  857. dst += 4;
  858. }
  859. for (x <<= 2; x < plane->width; x++)
  860. *dst++ = *src++ << 1;
  861. src += pitch - plane->width;
  862. dst += dst_pitch - plane->width;
  863. }
  864. }
  865. static av_cold int decode_init(AVCodecContext *avctx)
  866. {
  867. Indeo3DecodeContext *ctx = avctx->priv_data;
  868. ctx->avctx = avctx;
  869. ctx->width = avctx->width;
  870. ctx->height = avctx->height;
  871. avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  872. build_requant_tab();
  873. ff_dsputil_init(&ctx->dsp, avctx);
  874. allocate_frame_buffers(ctx, avctx);
  875. return 0;
  876. }
  877. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  878. AVPacket *avpkt)
  879. {
  880. Indeo3DecodeContext *ctx = avctx->priv_data;
  881. const uint8_t *buf = avpkt->data;
  882. int buf_size = avpkt->size;
  883. int res;
  884. res = decode_frame_headers(ctx, avctx, buf, buf_size);
  885. if (res < 0)
  886. return res;
  887. /* skip sync(null) frames */
  888. if (res) {
  889. // we have processed 16 bytes but no data was decoded
  890. *data_size = 0;
  891. return buf_size;
  892. }
  893. /* skip droppable INTER frames if requested */
  894. if (ctx->frame_flags & BS_NONREF &&
  895. (avctx->skip_frame >= AVDISCARD_NONREF))
  896. return 0;
  897. /* skip INTER frames if requested */
  898. if (!(ctx->frame_flags & BS_KEYFRAME) && avctx->skip_frame >= AVDISCARD_NONKEY)
  899. return 0;
  900. /* use BS_BUFFER flag for buffer switching */
  901. ctx->buf_sel = (ctx->frame_flags >> BS_BUFFER) & 1;
  902. /* decode luma plane */
  903. if ((res = decode_plane(ctx, avctx, ctx->planes, ctx->y_data_ptr, ctx->y_data_size, 40)))
  904. return res;
  905. /* decode chroma planes */
  906. if ((res = decode_plane(ctx, avctx, &ctx->planes[1], ctx->u_data_ptr, ctx->u_data_size, 10)))
  907. return res;
  908. if ((res = decode_plane(ctx, avctx, &ctx->planes[2], ctx->v_data_ptr, ctx->v_data_size, 10)))
  909. return res;
  910. if (ctx->frame.data[0])
  911. avctx->release_buffer(avctx, &ctx->frame);
  912. ctx->frame.reference = 0;
  913. if ((res = avctx->get_buffer(avctx, &ctx->frame)) < 0) {
  914. av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  915. return res;
  916. }
  917. output_plane(&ctx->planes[0], ctx->buf_sel,
  918. ctx->frame.data[0], ctx->frame.linesize[0],
  919. avctx->height);
  920. output_plane(&ctx->planes[1], ctx->buf_sel,
  921. ctx->frame.data[1], ctx->frame.linesize[1],
  922. (avctx->height + 3) >> 2);
  923. output_plane(&ctx->planes[2], ctx->buf_sel,
  924. ctx->frame.data[2], ctx->frame.linesize[2],
  925. (avctx->height + 3) >> 2);
  926. *data_size = sizeof(AVFrame);
  927. *(AVFrame*)data = ctx->frame;
  928. return buf_size;
  929. }
  930. static av_cold int decode_close(AVCodecContext *avctx)
  931. {
  932. Indeo3DecodeContext *ctx = avctx->priv_data;
  933. free_frame_buffers(avctx->priv_data);
  934. if (ctx->frame.data[0])
  935. avctx->release_buffer(avctx, &ctx->frame);
  936. return 0;
  937. }
  938. AVCodec ff_indeo3_decoder = {
  939. .name = "indeo3",
  940. .type = AVMEDIA_TYPE_VIDEO,
  941. .id = AV_CODEC_ID_INDEO3,
  942. .priv_data_size = sizeof(Indeo3DecodeContext),
  943. .init = decode_init,
  944. .close = decode_close,
  945. .decode = decode_frame,
  946. .capabilities = CODEC_CAP_DR1,
  947. .long_name = NULL_IF_CONFIG_SMALL("Intel Indeo 3"),
  948. };