You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2835 lines
94KB

  1. /*
  2. * Copyright (C) 2003-2004 the ffmpeg project
  3. *
  4. * This library is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2 of the License, or (at your option) any later version.
  8. *
  9. * This library is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with this library; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. *
  18. * VP3 Video Decoder by Mike Melanson (melanson@pcisys.net)
  19. * For more information about the VP3 coding process, visit:
  20. * http://www.pcisys.net/~melanson/codecs/
  21. *
  22. * Theora decoder by Alex Beregszaszi
  23. *
  24. */
  25. /**
  26. * @file vp3.c
  27. * On2 VP3 Video Decoder
  28. */
  29. #include <stdio.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include <unistd.h>
  33. #include "common.h"
  34. #include "avcodec.h"
  35. #include "dsputil.h"
  36. #include "mpegvideo.h"
  37. #include "vp3data.h"
  38. #define FRAGMENT_PIXELS 8
  39. /*
  40. * Debugging Variables
  41. *
  42. * Define one or more of the following compile-time variables to 1 to obtain
  43. * elaborate information about certain aspects of the decoding process.
  44. *
  45. * KEYFRAMES_ONLY: set this to 1 to only see keyframes (VP3 slideshow mode)
  46. * DEBUG_VP3: high-level decoding flow
  47. * DEBUG_INIT: initialization parameters
  48. * DEBUG_DEQUANTIZERS: display how the dequanization tables are built
  49. * DEBUG_BLOCK_CODING: unpacking the superblock/macroblock/fragment coding
  50. * DEBUG_MODES: unpacking the coding modes for individual fragments
  51. * DEBUG_VECTORS: display the motion vectors
  52. * DEBUG_TOKEN: display exhaustive information about each DCT token
  53. * DEBUG_VLC: display the VLCs as they are extracted from the stream
  54. * DEBUG_DC_PRED: display the process of reversing DC prediction
  55. * DEBUG_IDCT: show every detail of the IDCT process
  56. */
  57. #define KEYFRAMES_ONLY 0
  58. #define DEBUG_VP3 0
  59. #define DEBUG_INIT 0
  60. #define DEBUG_DEQUANTIZERS 0
  61. #define DEBUG_BLOCK_CODING 0
  62. #define DEBUG_MODES 0
  63. #define DEBUG_VECTORS 0
  64. #define DEBUG_TOKEN 0
  65. #define DEBUG_VLC 0
  66. #define DEBUG_DC_PRED 0
  67. #define DEBUG_IDCT 0
  68. #if DEBUG_VP3
  69. #define debug_vp3 printf
  70. #else
  71. static inline void debug_vp3(const char *format, ...) { }
  72. #endif
  73. #if DEBUG_INIT
  74. #define debug_init printf
  75. #else
  76. static inline void debug_init(const char *format, ...) { }
  77. #endif
  78. #if DEBUG_DEQUANTIZERS
  79. #define debug_dequantizers printf
  80. #else
  81. static inline void debug_dequantizers(const char *format, ...) { }
  82. #endif
  83. #if DEBUG_BLOCK_CODING
  84. #define debug_block_coding printf
  85. #else
  86. static inline void debug_block_coding(const char *format, ...) { }
  87. #endif
  88. #if DEBUG_MODES
  89. #define debug_modes printf
  90. #else
  91. static inline void debug_modes(const char *format, ...) { }
  92. #endif
  93. #if DEBUG_VECTORS
  94. #define debug_vectors printf
  95. #else
  96. static inline void debug_vectors(const char *format, ...) { }
  97. #endif
  98. #if DEBUG_TOKEN
  99. #define debug_token printf
  100. #else
  101. static inline void debug_token(const char *format, ...) { }
  102. #endif
  103. #if DEBUG_VLC
  104. #define debug_vlc printf
  105. #else
  106. static inline void debug_vlc(const char *format, ...) { }
  107. #endif
  108. #if DEBUG_DC_PRED
  109. #define debug_dc_pred printf
  110. #else
  111. static inline void debug_dc_pred(const char *format, ...) { }
  112. #endif
  113. #if DEBUG_IDCT
  114. #define debug_idct printf
  115. #else
  116. static inline void debug_idct(const char *format, ...) { }
  117. #endif
  118. typedef struct Vp3Fragment {
  119. DCTELEM coeffs[64];
  120. int coding_method;
  121. int coeff_count;
  122. int last_coeff;
  123. int motion_x;
  124. int motion_y;
  125. /* address of first pixel taking into account which plane the fragment
  126. * lives on as well as the plane stride */
  127. int first_pixel;
  128. /* this is the macroblock that the fragment belongs to */
  129. int macroblock;
  130. } Vp3Fragment;
  131. #define SB_NOT_CODED 0
  132. #define SB_PARTIALLY_CODED 1
  133. #define SB_FULLY_CODED 2
  134. #define MODE_INTER_NO_MV 0
  135. #define MODE_INTRA 1
  136. #define MODE_INTER_PLUS_MV 2
  137. #define MODE_INTER_LAST_MV 3
  138. #define MODE_INTER_PRIOR_LAST 4
  139. #define MODE_USING_GOLDEN 5
  140. #define MODE_GOLDEN_MV 6
  141. #define MODE_INTER_FOURMV 7
  142. #define CODING_MODE_COUNT 8
  143. /* special internal mode */
  144. #define MODE_COPY 8
  145. /* There are 6 preset schemes, plus a free-form scheme */
  146. static int ModeAlphabet[7][CODING_MODE_COUNT] =
  147. {
  148. /* this is the custom scheme */
  149. { 0, 0, 0, 0, 0, 0, 0, 0 },
  150. /* scheme 1: Last motion vector dominates */
  151. { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
  152. MODE_INTER_PLUS_MV, MODE_INTER_NO_MV,
  153. MODE_INTRA, MODE_USING_GOLDEN,
  154. MODE_GOLDEN_MV, MODE_INTER_FOURMV },
  155. /* scheme 2 */
  156. { MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
  157. MODE_INTER_NO_MV, MODE_INTER_PLUS_MV,
  158. MODE_INTRA, MODE_USING_GOLDEN,
  159. MODE_GOLDEN_MV, MODE_INTER_FOURMV },
  160. /* scheme 3 */
  161. { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
  162. MODE_INTER_PRIOR_LAST, MODE_INTER_NO_MV,
  163. MODE_INTRA, MODE_USING_GOLDEN,
  164. MODE_GOLDEN_MV, MODE_INTER_FOURMV },
  165. /* scheme 4 */
  166. { MODE_INTER_LAST_MV, MODE_INTER_PLUS_MV,
  167. MODE_INTER_NO_MV, MODE_INTER_PRIOR_LAST,
  168. MODE_INTRA, MODE_USING_GOLDEN,
  169. MODE_GOLDEN_MV, MODE_INTER_FOURMV },
  170. /* scheme 5: No motion vector dominates */
  171. { MODE_INTER_NO_MV, MODE_INTER_LAST_MV,
  172. MODE_INTER_PRIOR_LAST, MODE_INTER_PLUS_MV,
  173. MODE_INTRA, MODE_USING_GOLDEN,
  174. MODE_GOLDEN_MV, MODE_INTER_FOURMV },
  175. /* scheme 6 */
  176. { MODE_INTER_NO_MV, MODE_USING_GOLDEN,
  177. MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
  178. MODE_INTER_PLUS_MV, MODE_INTRA,
  179. MODE_GOLDEN_MV, MODE_INTER_FOURMV },
  180. };
  181. #define MIN_DEQUANT_VAL 2
  182. typedef struct Vp3DecodeContext {
  183. AVCodecContext *avctx;
  184. int theora, theora_tables;
  185. int version;
  186. int width, height;
  187. AVFrame golden_frame;
  188. AVFrame last_frame;
  189. AVFrame current_frame;
  190. int keyframe;
  191. DSPContext dsp;
  192. int flipped_image;
  193. int quality_index;
  194. int last_quality_index;
  195. int superblock_count;
  196. int superblock_width;
  197. int superblock_height;
  198. int y_superblock_width;
  199. int y_superblock_height;
  200. int c_superblock_width;
  201. int c_superblock_height;
  202. int u_superblock_start;
  203. int v_superblock_start;
  204. unsigned char *superblock_coding;
  205. int macroblock_count;
  206. int macroblock_width;
  207. int macroblock_height;
  208. int fragment_count;
  209. int fragment_width;
  210. int fragment_height;
  211. Vp3Fragment *all_fragments;
  212. int u_fragment_start;
  213. int v_fragment_start;
  214. /* tables */
  215. uint16_t coded_dc_scale_factor[64];
  216. uint32_t coded_ac_scale_factor[64];
  217. uint16_t coded_intra_y_dequant[64];
  218. uint16_t coded_intra_c_dequant[64];
  219. uint16_t coded_inter_dequant[64];
  220. /* this is a list of indices into the all_fragments array indicating
  221. * which of the fragments are coded */
  222. int *coded_fragment_list;
  223. int coded_fragment_list_index;
  224. int pixel_addresses_inited;
  225. VLC dc_vlc[16];
  226. VLC ac_vlc_1[16];
  227. VLC ac_vlc_2[16];
  228. VLC ac_vlc_3[16];
  229. VLC ac_vlc_4[16];
  230. /* these arrays need to be on 16-byte boundaries since SSE2 operations
  231. * index into them */
  232. int16_t __align16 intra_y_dequant[64];
  233. int16_t __align16 intra_c_dequant[64];
  234. int16_t __align16 inter_dequant[64];
  235. /* This table contains superblock_count * 16 entries. Each set of 16
  236. * numbers corresponds to the fragment indices 0..15 of the superblock.
  237. * An entry will be -1 to indicate that no entry corresponds to that
  238. * index. */
  239. int *superblock_fragments;
  240. /* This table contains superblock_count * 4 entries. Each set of 4
  241. * numbers corresponds to the macroblock indices 0..3 of the superblock.
  242. * An entry will be -1 to indicate that no entry corresponds to that
  243. * index. */
  244. int *superblock_macroblocks;
  245. /* This table contains macroblock_count * 6 entries. Each set of 6
  246. * numbers corresponds to the fragment indices 0..5 which comprise
  247. * the macroblock (4 Y fragments and 2 C fragments). */
  248. int *macroblock_fragments;
  249. /* This is an array that indicates how a particular macroblock
  250. * is coded. */
  251. unsigned char *macroblock_coding;
  252. int first_coded_y_fragment;
  253. int first_coded_c_fragment;
  254. int last_coded_y_fragment;
  255. int last_coded_c_fragment;
  256. uint8_t edge_emu_buffer[9*2048]; //FIXME dynamic alloc
  257. uint8_t qscale_table[2048]; //FIXME dynamic alloc (width+15)/16
  258. } Vp3DecodeContext;
  259. static int theora_decode_comments(AVCodecContext *avctx, GetBitContext gb);
  260. static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb);
  261. /************************************************************************
  262. * VP3 specific functions
  263. ************************************************************************/
  264. /*
  265. * This function sets up all of the various blocks mappings:
  266. * superblocks <-> fragments, macroblocks <-> fragments,
  267. * superblocks <-> macroblocks
  268. *
  269. * Returns 0 is successful; returns 1 if *anything* went wrong.
  270. */
  271. static int init_block_mapping(Vp3DecodeContext *s)
  272. {
  273. int i, j;
  274. signed int hilbert_walk_y[16];
  275. signed int hilbert_walk_c[16];
  276. signed int hilbert_walk_mb[4];
  277. int current_fragment = 0;
  278. int current_width = 0;
  279. int current_height = 0;
  280. int right_edge = 0;
  281. int bottom_edge = 0;
  282. int superblock_row_inc = 0;
  283. int *hilbert = NULL;
  284. int mapping_index = 0;
  285. int current_macroblock;
  286. int c_fragment;
  287. signed char travel_width[16] = {
  288. 1, 1, 0, -1,
  289. 0, 0, 1, 0,
  290. 1, 0, 1, 0,
  291. 0, -1, 0, 1
  292. };
  293. signed char travel_height[16] = {
  294. 0, 0, 1, 0,
  295. 1, 1, 0, -1,
  296. 0, 1, 0, -1,
  297. -1, 0, -1, 0
  298. };
  299. signed char travel_width_mb[4] = {
  300. 1, 0, 1, 0
  301. };
  302. signed char travel_height_mb[4] = {
  303. 0, 1, 0, -1
  304. };
  305. debug_vp3(" vp3: initialize block mapping tables\n");
  306. /* figure out hilbert pattern per these frame dimensions */
  307. hilbert_walk_y[0] = 1;
  308. hilbert_walk_y[1] = 1;
  309. hilbert_walk_y[2] = s->fragment_width;
  310. hilbert_walk_y[3] = -1;
  311. hilbert_walk_y[4] = s->fragment_width;
  312. hilbert_walk_y[5] = s->fragment_width;
  313. hilbert_walk_y[6] = 1;
  314. hilbert_walk_y[7] = -s->fragment_width;
  315. hilbert_walk_y[8] = 1;
  316. hilbert_walk_y[9] = s->fragment_width;
  317. hilbert_walk_y[10] = 1;
  318. hilbert_walk_y[11] = -s->fragment_width;
  319. hilbert_walk_y[12] = -s->fragment_width;
  320. hilbert_walk_y[13] = -1;
  321. hilbert_walk_y[14] = -s->fragment_width;
  322. hilbert_walk_y[15] = 1;
  323. hilbert_walk_c[0] = 1;
  324. hilbert_walk_c[1] = 1;
  325. hilbert_walk_c[2] = s->fragment_width / 2;
  326. hilbert_walk_c[3] = -1;
  327. hilbert_walk_c[4] = s->fragment_width / 2;
  328. hilbert_walk_c[5] = s->fragment_width / 2;
  329. hilbert_walk_c[6] = 1;
  330. hilbert_walk_c[7] = -s->fragment_width / 2;
  331. hilbert_walk_c[8] = 1;
  332. hilbert_walk_c[9] = s->fragment_width / 2;
  333. hilbert_walk_c[10] = 1;
  334. hilbert_walk_c[11] = -s->fragment_width / 2;
  335. hilbert_walk_c[12] = -s->fragment_width / 2;
  336. hilbert_walk_c[13] = -1;
  337. hilbert_walk_c[14] = -s->fragment_width / 2;
  338. hilbert_walk_c[15] = 1;
  339. hilbert_walk_mb[0] = 1;
  340. hilbert_walk_mb[1] = s->macroblock_width;
  341. hilbert_walk_mb[2] = 1;
  342. hilbert_walk_mb[3] = -s->macroblock_width;
  343. /* iterate through each superblock (all planes) and map the fragments */
  344. for (i = 0; i < s->superblock_count; i++) {
  345. debug_init(" superblock %d (u starts @ %d, v starts @ %d)\n",
  346. i, s->u_superblock_start, s->v_superblock_start);
  347. /* time to re-assign the limits? */
  348. if (i == 0) {
  349. /* start of Y superblocks */
  350. right_edge = s->fragment_width;
  351. bottom_edge = s->fragment_height;
  352. current_width = -1;
  353. current_height = 0;
  354. superblock_row_inc = 3 * s->fragment_width -
  355. (s->y_superblock_width * 4 - s->fragment_width);
  356. hilbert = hilbert_walk_y;
  357. /* the first operation for this variable is to advance by 1 */
  358. current_fragment = -1;
  359. } else if (i == s->u_superblock_start) {
  360. /* start of U superblocks */
  361. right_edge = s->fragment_width / 2;
  362. bottom_edge = s->fragment_height / 2;
  363. current_width = -1;
  364. current_height = 0;
  365. superblock_row_inc = 3 * (s->fragment_width / 2) -
  366. (s->c_superblock_width * 4 - s->fragment_width / 2);
  367. hilbert = hilbert_walk_c;
  368. /* the first operation for this variable is to advance by 1 */
  369. current_fragment = s->u_fragment_start - 1;
  370. } else if (i == s->v_superblock_start) {
  371. /* start of V superblocks */
  372. right_edge = s->fragment_width / 2;
  373. bottom_edge = s->fragment_height / 2;
  374. current_width = -1;
  375. current_height = 0;
  376. superblock_row_inc = 3 * (s->fragment_width / 2) -
  377. (s->c_superblock_width * 4 - s->fragment_width / 2);
  378. hilbert = hilbert_walk_c;
  379. /* the first operation for this variable is to advance by 1 */
  380. current_fragment = s->v_fragment_start - 1;
  381. }
  382. if (current_width >= right_edge - 1) {
  383. /* reset width and move to next superblock row */
  384. current_width = -1;
  385. current_height += 4;
  386. /* fragment is now at the start of a new superblock row */
  387. current_fragment += superblock_row_inc;
  388. }
  389. /* iterate through all 16 fragments in a superblock */
  390. for (j = 0; j < 16; j++) {
  391. current_fragment += hilbert[j];
  392. current_width += travel_width[j];
  393. current_height += travel_height[j];
  394. /* check if the fragment is in bounds */
  395. if ((current_width < right_edge) &&
  396. (current_height < bottom_edge)) {
  397. s->superblock_fragments[mapping_index] = current_fragment;
  398. debug_init(" mapping fragment %d to superblock %d, position %d (%d/%d x %d/%d)\n",
  399. s->superblock_fragments[mapping_index], i, j,
  400. current_width, right_edge, current_height, bottom_edge);
  401. } else {
  402. s->superblock_fragments[mapping_index] = -1;
  403. debug_init(" superblock %d, position %d has no fragment (%d/%d x %d/%d)\n",
  404. i, j,
  405. current_width, right_edge, current_height, bottom_edge);
  406. }
  407. mapping_index++;
  408. }
  409. }
  410. /* initialize the superblock <-> macroblock mapping; iterate through
  411. * all of the Y plane superblocks to build this mapping */
  412. right_edge = s->macroblock_width;
  413. bottom_edge = s->macroblock_height;
  414. current_width = -1;
  415. current_height = 0;
  416. superblock_row_inc = s->macroblock_width -
  417. (s->y_superblock_width * 2 - s->macroblock_width);;
  418. hilbert = hilbert_walk_mb;
  419. mapping_index = 0;
  420. current_macroblock = -1;
  421. for (i = 0; i < s->u_superblock_start; i++) {
  422. if (current_width >= right_edge - 1) {
  423. /* reset width and move to next superblock row */
  424. current_width = -1;
  425. current_height += 2;
  426. /* macroblock is now at the start of a new superblock row */
  427. current_macroblock += superblock_row_inc;
  428. }
  429. /* iterate through each potential macroblock in the superblock */
  430. for (j = 0; j < 4; j++) {
  431. current_macroblock += hilbert_walk_mb[j];
  432. current_width += travel_width_mb[j];
  433. current_height += travel_height_mb[j];
  434. /* check if the macroblock is in bounds */
  435. if ((current_width < right_edge) &&
  436. (current_height < bottom_edge)) {
  437. s->superblock_macroblocks[mapping_index] = current_macroblock;
  438. debug_init(" mapping macroblock %d to superblock %d, position %d (%d/%d x %d/%d)\n",
  439. s->superblock_macroblocks[mapping_index], i, j,
  440. current_width, right_edge, current_height, bottom_edge);
  441. } else {
  442. s->superblock_macroblocks[mapping_index] = -1;
  443. debug_init(" superblock %d, position %d has no macroblock (%d/%d x %d/%d)\n",
  444. i, j,
  445. current_width, right_edge, current_height, bottom_edge);
  446. }
  447. mapping_index++;
  448. }
  449. }
  450. /* initialize the macroblock <-> fragment mapping */
  451. current_fragment = 0;
  452. current_macroblock = 0;
  453. mapping_index = 0;
  454. for (i = 0; i < s->fragment_height; i += 2) {
  455. for (j = 0; j < s->fragment_width; j += 2) {
  456. debug_init(" macroblock %d contains fragments: ", current_macroblock);
  457. s->all_fragments[current_fragment].macroblock = current_macroblock;
  458. s->macroblock_fragments[mapping_index++] = current_fragment;
  459. debug_init("%d ", current_fragment);
  460. if (j + 1 < s->fragment_width) {
  461. s->all_fragments[current_fragment + 1].macroblock = current_macroblock;
  462. s->macroblock_fragments[mapping_index++] = current_fragment + 1;
  463. debug_init("%d ", current_fragment + 1);
  464. } else
  465. s->macroblock_fragments[mapping_index++] = -1;
  466. if (i + 1 < s->fragment_height) {
  467. s->all_fragments[current_fragment + s->fragment_width].macroblock =
  468. current_macroblock;
  469. s->macroblock_fragments[mapping_index++] =
  470. current_fragment + s->fragment_width;
  471. debug_init("%d ", current_fragment + s->fragment_width);
  472. } else
  473. s->macroblock_fragments[mapping_index++] = -1;
  474. if ((j + 1 < s->fragment_width) && (i + 1 < s->fragment_height)) {
  475. s->all_fragments[current_fragment + s->fragment_width + 1].macroblock =
  476. current_macroblock;
  477. s->macroblock_fragments[mapping_index++] =
  478. current_fragment + s->fragment_width + 1;
  479. debug_init("%d ", current_fragment + s->fragment_width + 1);
  480. } else
  481. s->macroblock_fragments[mapping_index++] = -1;
  482. /* C planes */
  483. c_fragment = s->u_fragment_start +
  484. (i * s->fragment_width / 4) + (j / 2);
  485. s->all_fragments[c_fragment].macroblock = s->macroblock_count;
  486. s->macroblock_fragments[mapping_index++] = c_fragment;
  487. debug_init("%d ", c_fragment);
  488. c_fragment = s->v_fragment_start +
  489. (i * s->fragment_width / 4) + (j / 2);
  490. s->all_fragments[c_fragment].macroblock = s->macroblock_count;
  491. s->macroblock_fragments[mapping_index++] = c_fragment;
  492. debug_init("%d ", c_fragment);
  493. debug_init("\n");
  494. if (j + 2 <= s->fragment_width)
  495. current_fragment += 2;
  496. else
  497. current_fragment++;
  498. current_macroblock++;
  499. }
  500. current_fragment += s->fragment_width;
  501. }
  502. return 0; /* successful path out */
  503. }
  504. /*
  505. * This function unpacks a single token (which should be in the range 0..31)
  506. * and returns a zero run (number of zero coefficients in current DCT matrix
  507. * before next non-zero coefficient), the next DCT coefficient, and the
  508. * number of consecutive, non-EOB'd DCT blocks to EOB.
  509. */
  510. static void unpack_token(GetBitContext *gb, int token, int *zero_run,
  511. DCTELEM *coeff, int *eob_run)
  512. {
  513. int sign;
  514. *zero_run = 0;
  515. *eob_run = 0;
  516. *coeff = 0;
  517. debug_token(" vp3 token %d: ", token);
  518. switch (token) {
  519. case 0:
  520. debug_token("DCT_EOB_TOKEN, EOB next block\n");
  521. *eob_run = 1;
  522. break;
  523. case 1:
  524. debug_token("DCT_EOB_PAIR_TOKEN, EOB next 2 blocks\n");
  525. *eob_run = 2;
  526. break;
  527. case 2:
  528. debug_token("DCT_EOB_TRIPLE_TOKEN, EOB next 3 blocks\n");
  529. *eob_run = 3;
  530. break;
  531. case 3:
  532. debug_token("DCT_REPEAT_RUN_TOKEN, ");
  533. *eob_run = get_bits(gb, 2) + 4;
  534. debug_token("EOB the next %d blocks\n", *eob_run);
  535. break;
  536. case 4:
  537. debug_token("DCT_REPEAT_RUN2_TOKEN, ");
  538. *eob_run = get_bits(gb, 3) + 8;
  539. debug_token("EOB the next %d blocks\n", *eob_run);
  540. break;
  541. case 5:
  542. debug_token("DCT_REPEAT_RUN3_TOKEN, ");
  543. *eob_run = get_bits(gb, 4) + 16;
  544. debug_token("EOB the next %d blocks\n", *eob_run);
  545. break;
  546. case 6:
  547. debug_token("DCT_REPEAT_RUN4_TOKEN, ");
  548. *eob_run = get_bits(gb, 12);
  549. debug_token("EOB the next %d blocks\n", *eob_run);
  550. break;
  551. case 7:
  552. debug_token("DCT_SHORT_ZRL_TOKEN, ");
  553. /* note that this token actually indicates that (3 extra bits) + 1 0s
  554. * should be output; this case specifies a run of (3 EBs) 0s and a
  555. * coefficient of 0. */
  556. *zero_run = get_bits(gb, 3);
  557. *coeff = 0;
  558. debug_token("skip the next %d positions in output matrix\n", *zero_run + 1);
  559. break;
  560. case 8:
  561. debug_token("DCT_ZRL_TOKEN, ");
  562. /* note that this token actually indicates that (6 extra bits) + 1 0s
  563. * should be output; this case specifies a run of (6 EBs) 0s and a
  564. * coefficient of 0. */
  565. *zero_run = get_bits(gb, 6);
  566. *coeff = 0;
  567. debug_token("skip the next %d positions in output matrix\n", *zero_run + 1);
  568. break;
  569. case 9:
  570. debug_token("ONE_TOKEN, output 1\n");
  571. *coeff = 1;
  572. break;
  573. case 10:
  574. debug_token("MINUS_ONE_TOKEN, output -1\n");
  575. *coeff = -1;
  576. break;
  577. case 11:
  578. debug_token("TWO_TOKEN, output 2\n");
  579. *coeff = 2;
  580. break;
  581. case 12:
  582. debug_token("MINUS_TWO_TOKEN, output -2\n");
  583. *coeff = -2;
  584. break;
  585. case 13:
  586. case 14:
  587. case 15:
  588. case 16:
  589. debug_token("LOW_VAL_TOKENS, ");
  590. if (get_bits(gb, 1))
  591. *coeff = -(3 + (token - 13));
  592. else
  593. *coeff = 3 + (token - 13);
  594. debug_token("output %d\n", *coeff);
  595. break;
  596. case 17:
  597. debug_token("DCT_VAL_CATEGORY3, ");
  598. sign = get_bits(gb, 1);
  599. *coeff = 7 + get_bits(gb, 1);
  600. if (sign)
  601. *coeff = -(*coeff);
  602. debug_token("output %d\n", *coeff);
  603. break;
  604. case 18:
  605. debug_token("DCT_VAL_CATEGORY4, ");
  606. sign = get_bits(gb, 1);
  607. *coeff = 9 + get_bits(gb, 2);
  608. if (sign)
  609. *coeff = -(*coeff);
  610. debug_token("output %d\n", *coeff);
  611. break;
  612. case 19:
  613. debug_token("DCT_VAL_CATEGORY5, ");
  614. sign = get_bits(gb, 1);
  615. *coeff = 13 + get_bits(gb, 3);
  616. if (sign)
  617. *coeff = -(*coeff);
  618. debug_token("output %d\n", *coeff);
  619. break;
  620. case 20:
  621. debug_token("DCT_VAL_CATEGORY6, ");
  622. sign = get_bits(gb, 1);
  623. *coeff = 21 + get_bits(gb, 4);
  624. if (sign)
  625. *coeff = -(*coeff);
  626. debug_token("output %d\n", *coeff);
  627. break;
  628. case 21:
  629. debug_token("DCT_VAL_CATEGORY7, ");
  630. sign = get_bits(gb, 1);
  631. *coeff = 37 + get_bits(gb, 5);
  632. if (sign)
  633. *coeff = -(*coeff);
  634. debug_token("output %d\n", *coeff);
  635. break;
  636. case 22:
  637. debug_token("DCT_VAL_CATEGORY8, ");
  638. sign = get_bits(gb, 1);
  639. *coeff = 69 + get_bits(gb, 9);
  640. if (sign)
  641. *coeff = -(*coeff);
  642. debug_token("output %d\n", *coeff);
  643. break;
  644. case 23:
  645. case 24:
  646. case 25:
  647. case 26:
  648. case 27:
  649. debug_token("DCT_RUN_CATEGORY1, ");
  650. *zero_run = token - 22;
  651. if (get_bits(gb, 1))
  652. *coeff = -1;
  653. else
  654. *coeff = 1;
  655. debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
  656. break;
  657. case 28:
  658. debug_token("DCT_RUN_CATEGORY1B, ");
  659. if (get_bits(gb, 1))
  660. *coeff = -1;
  661. else
  662. *coeff = 1;
  663. *zero_run = 6 + get_bits(gb, 2);
  664. debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
  665. break;
  666. case 29:
  667. debug_token("DCT_RUN_CATEGORY1C, ");
  668. if (get_bits(gb, 1))
  669. *coeff = -1;
  670. else
  671. *coeff = 1;
  672. *zero_run = 10 + get_bits(gb, 3);
  673. debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
  674. break;
  675. case 30:
  676. debug_token("DCT_RUN_CATEGORY2, ");
  677. sign = get_bits(gb, 1);
  678. *coeff = 2 + get_bits(gb, 1);
  679. if (sign)
  680. *coeff = -(*coeff);
  681. *zero_run = 1;
  682. debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
  683. break;
  684. case 31:
  685. debug_token("DCT_RUN_CATEGORY2, ");
  686. sign = get_bits(gb, 1);
  687. *coeff = 2 + get_bits(gb, 1);
  688. if (sign)
  689. *coeff = -(*coeff);
  690. *zero_run = 2 + get_bits(gb, 1);
  691. debug_token("output %d 0s, then %d\n", *zero_run, *coeff);
  692. break;
  693. default:
  694. av_log(NULL, AV_LOG_ERROR, " vp3: help! Got a bad token: %d > 31\n", token);
  695. break;
  696. }
  697. }
  698. /*
  699. * This function wipes out all of the fragment data.
  700. */
  701. static void init_frame(Vp3DecodeContext *s, GetBitContext *gb)
  702. {
  703. int i;
  704. /* zero out all of the fragment information */
  705. s->coded_fragment_list_index = 0;
  706. for (i = 0; i < s->fragment_count; i++) {
  707. memset(s->all_fragments[i].coeffs, 0, 64 * sizeof(DCTELEM));
  708. s->all_fragments[i].coeff_count = 0;
  709. s->all_fragments[i].last_coeff = 0;
  710. s->all_fragments[i].motion_x = 0xbeef;
  711. s->all_fragments[i].motion_y = 0xbeef;
  712. }
  713. }
  714. /*
  715. * This function sets of the dequantization tables used for a particular
  716. * frame.
  717. */
  718. static void init_dequantizer(Vp3DecodeContext *s)
  719. {
  720. int ac_scale_factor = s->coded_ac_scale_factor[s->quality_index];
  721. int dc_scale_factor = s->coded_dc_scale_factor[s->quality_index];
  722. int i, j;
  723. debug_vp3(" vp3: initializing dequantization tables\n");
  724. /*
  725. * Scale dequantizers:
  726. *
  727. * quantizer * sf
  728. * --------------
  729. * 100
  730. *
  731. * where sf = dc_scale_factor for DC quantizer
  732. * or ac_scale_factor for AC quantizer
  733. *
  734. * Then, saturate the result to a lower limit of MIN_DEQUANT_VAL.
  735. */
  736. #define SCALER 4
  737. /* scale DC quantizers */
  738. s->intra_y_dequant[0] = s->coded_intra_y_dequant[0] * dc_scale_factor / 100;
  739. if (s->intra_y_dequant[0] < MIN_DEQUANT_VAL * 2)
  740. s->intra_y_dequant[0] = MIN_DEQUANT_VAL * 2;
  741. s->intra_y_dequant[0] *= SCALER;
  742. s->intra_c_dequant[0] = s->coded_intra_c_dequant[0] * dc_scale_factor / 100;
  743. if (s->intra_c_dequant[0] < MIN_DEQUANT_VAL * 2)
  744. s->intra_c_dequant[0] = MIN_DEQUANT_VAL * 2;
  745. s->intra_c_dequant[0] *= SCALER;
  746. s->inter_dequant[0] = s->coded_inter_dequant[0] * dc_scale_factor / 100;
  747. if (s->inter_dequant[0] < MIN_DEQUANT_VAL * 4)
  748. s->inter_dequant[0] = MIN_DEQUANT_VAL * 4;
  749. s->inter_dequant[0] *= SCALER;
  750. /* scale AC quantizers, zigzag at the same time in preparation for
  751. * the dequantization phase */
  752. for (i = 1; i < 64; i++) {
  753. j = zigzag_index[i];
  754. s->intra_y_dequant[j] = s->coded_intra_y_dequant[i] * ac_scale_factor / 100;
  755. if (s->intra_y_dequant[j] < MIN_DEQUANT_VAL)
  756. s->intra_y_dequant[j] = MIN_DEQUANT_VAL;
  757. s->intra_y_dequant[j] *= SCALER;
  758. s->intra_c_dequant[j] = s->coded_intra_c_dequant[i] * ac_scale_factor / 100;
  759. if (s->intra_c_dequant[j] < MIN_DEQUANT_VAL)
  760. s->intra_c_dequant[j] = MIN_DEQUANT_VAL;
  761. s->intra_c_dequant[j] *= SCALER;
  762. s->inter_dequant[j] = s->coded_inter_dequant[i] * ac_scale_factor / 100;
  763. if (s->inter_dequant[j] < MIN_DEQUANT_VAL * 2)
  764. s->inter_dequant[j] = MIN_DEQUANT_VAL * 2;
  765. s->inter_dequant[j] *= SCALER;
  766. }
  767. memset(s->qscale_table, (FFMAX(s->intra_y_dequant[1], s->intra_c_dequant[1])+8)/16, 512); //FIXME finetune
  768. /* print debug information as requested */
  769. debug_dequantizers("intra Y dequantizers:\n");
  770. for (i = 0; i < 8; i++) {
  771. for (j = i * 8; j < i * 8 + 8; j++) {
  772. debug_dequantizers(" %4d,", s->intra_y_dequant[j]);
  773. }
  774. debug_dequantizers("\n");
  775. }
  776. debug_dequantizers("\n");
  777. debug_dequantizers("intra C dequantizers:\n");
  778. for (i = 0; i < 8; i++) {
  779. for (j = i * 8; j < i * 8 + 8; j++) {
  780. debug_dequantizers(" %4d,", s->intra_c_dequant[j]);
  781. }
  782. debug_dequantizers("\n");
  783. }
  784. debug_dequantizers("\n");
  785. debug_dequantizers("interframe dequantizers:\n");
  786. for (i = 0; i < 8; i++) {
  787. for (j = i * 8; j < i * 8 + 8; j++) {
  788. debug_dequantizers(" %4d,", s->inter_dequant[j]);
  789. }
  790. debug_dequantizers("\n");
  791. }
  792. debug_dequantizers("\n");
  793. }
  794. /*
  795. * This function is used to fetch runs of 1s or 0s from the bitstream for
  796. * use in determining which superblocks are fully and partially coded.
  797. *
  798. * Codeword RunLength
  799. * 0 1
  800. * 10x 2-3
  801. * 110x 4-5
  802. * 1110xx 6-9
  803. * 11110xxx 10-17
  804. * 111110xxxx 18-33
  805. * 111111xxxxxxxxxxxx 34-4129
  806. */
  807. static int get_superblock_run_length(GetBitContext *gb)
  808. {
  809. if (get_bits(gb, 1) == 0)
  810. return 1;
  811. else if (get_bits(gb, 1) == 0)
  812. return (2 + get_bits(gb, 1));
  813. else if (get_bits(gb, 1) == 0)
  814. return (4 + get_bits(gb, 1));
  815. else if (get_bits(gb, 1) == 0)
  816. return (6 + get_bits(gb, 2));
  817. else if (get_bits(gb, 1) == 0)
  818. return (10 + get_bits(gb, 3));
  819. else if (get_bits(gb, 1) == 0)
  820. return (18 + get_bits(gb, 4));
  821. else
  822. return (34 + get_bits(gb, 12));
  823. }
  824. /*
  825. * This function is used to fetch runs of 1s or 0s from the bitstream for
  826. * use in determining which particular fragments are coded.
  827. *
  828. * Codeword RunLength
  829. * 0x 1-2
  830. * 10x 3-4
  831. * 110x 5-6
  832. * 1110xx 7-10
  833. * 11110xx 11-14
  834. * 11111xxxx 15-30
  835. */
  836. static int get_fragment_run_length(GetBitContext *gb)
  837. {
  838. if (get_bits(gb, 1) == 0)
  839. return (1 + get_bits(gb, 1));
  840. else if (get_bits(gb, 1) == 0)
  841. return (3 + get_bits(gb, 1));
  842. else if (get_bits(gb, 1) == 0)
  843. return (5 + get_bits(gb, 1));
  844. else if (get_bits(gb, 1) == 0)
  845. return (7 + get_bits(gb, 2));
  846. else if (get_bits(gb, 1) == 0)
  847. return (11 + get_bits(gb, 2));
  848. else
  849. return (15 + get_bits(gb, 4));
  850. }
  851. /*
  852. * This function decodes a VLC from the bitstream and returns a number
  853. * that ranges from 0..7. The number indicates which of the 8 coding
  854. * modes to use.
  855. *
  856. * VLC Number
  857. * 0 0
  858. * 10 1
  859. * 110 2
  860. * 1110 3
  861. * 11110 4
  862. * 111110 5
  863. * 1111110 6
  864. * 1111111 7
  865. *
  866. */
  867. static int get_mode_code(GetBitContext *gb)
  868. {
  869. if (get_bits(gb, 1) == 0)
  870. return 0;
  871. else if (get_bits(gb, 1) == 0)
  872. return 1;
  873. else if (get_bits(gb, 1) == 0)
  874. return 2;
  875. else if (get_bits(gb, 1) == 0)
  876. return 3;
  877. else if (get_bits(gb, 1) == 0)
  878. return 4;
  879. else if (get_bits(gb, 1) == 0)
  880. return 5;
  881. else if (get_bits(gb, 1) == 0)
  882. return 6;
  883. else
  884. return 7;
  885. }
  886. /*
  887. * This function extracts a motion vector from the bitstream using a VLC
  888. * scheme. 3 bits are fetched from the bitstream and 1 of 8 actions is
  889. * taken depending on the value on those 3 bits:
  890. *
  891. * 0: return 0
  892. * 1: return 1
  893. * 2: return -1
  894. * 3: if (next bit is 1) return -2, else return 2
  895. * 4: if (next bit is 1) return -3, else return 3
  896. * 5: return 4 + (next 2 bits), next bit is sign
  897. * 6: return 8 + (next 3 bits), next bit is sign
  898. * 7: return 16 + (next 4 bits), next bit is sign
  899. */
  900. static int get_motion_vector_vlc(GetBitContext *gb)
  901. {
  902. int bits;
  903. bits = get_bits(gb, 3);
  904. switch(bits) {
  905. case 0:
  906. bits = 0;
  907. break;
  908. case 1:
  909. bits = 1;
  910. break;
  911. case 2:
  912. bits = -1;
  913. break;
  914. case 3:
  915. if (get_bits(gb, 1) == 0)
  916. bits = 2;
  917. else
  918. bits = -2;
  919. break;
  920. case 4:
  921. if (get_bits(gb, 1) == 0)
  922. bits = 3;
  923. else
  924. bits = -3;
  925. break;
  926. case 5:
  927. bits = 4 + get_bits(gb, 2);
  928. if (get_bits(gb, 1) == 1)
  929. bits = -bits;
  930. break;
  931. case 6:
  932. bits = 8 + get_bits(gb, 3);
  933. if (get_bits(gb, 1) == 1)
  934. bits = -bits;
  935. break;
  936. case 7:
  937. bits = 16 + get_bits(gb, 4);
  938. if (get_bits(gb, 1) == 1)
  939. bits = -bits;
  940. break;
  941. }
  942. return bits;
  943. }
  944. /*
  945. * This function fetches a 5-bit number from the stream followed by
  946. * a sign and calls it a motion vector.
  947. */
  948. static int get_motion_vector_fixed(GetBitContext *gb)
  949. {
  950. int bits;
  951. bits = get_bits(gb, 5);
  952. if (get_bits(gb, 1) == 1)
  953. bits = -bits;
  954. return bits;
  955. }
  956. /*
  957. * This function unpacks all of the superblock/macroblock/fragment coding
  958. * information from the bitstream.
  959. */
  960. static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
  961. {
  962. int bit = 0;
  963. int current_superblock = 0;
  964. int current_run = 0;
  965. int decode_fully_flags = 0;
  966. int decode_partial_blocks = 0;
  967. int first_c_fragment_seen;
  968. int i, j;
  969. int current_fragment;
  970. debug_vp3(" vp3: unpacking superblock coding\n");
  971. if (s->keyframe) {
  972. debug_vp3(" keyframe-- all superblocks are fully coded\n");
  973. memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
  974. } else {
  975. /* unpack the list of partially-coded superblocks */
  976. bit = get_bits(gb, 1);
  977. /* toggle the bit because as soon as the first run length is
  978. * fetched the bit will be toggled again */
  979. bit ^= 1;
  980. while (current_superblock < s->superblock_count) {
  981. if (current_run == 0) {
  982. bit ^= 1;
  983. current_run = get_superblock_run_length(gb);
  984. debug_block_coding(" setting superblocks %d..%d to %s\n",
  985. current_superblock,
  986. current_superblock + current_run - 1,
  987. (bit) ? "partially coded" : "not coded");
  988. /* if any of the superblocks are not partially coded, flag
  989. * a boolean to decode the list of fully-coded superblocks */
  990. if (bit == 0) {
  991. decode_fully_flags = 1;
  992. } else {
  993. /* make a note of the fact that there are partially coded
  994. * superblocks */
  995. decode_partial_blocks = 1;
  996. }
  997. }
  998. s->superblock_coding[current_superblock++] =
  999. (bit) ? SB_PARTIALLY_CODED : SB_NOT_CODED;
  1000. current_run--;
  1001. }
  1002. /* unpack the list of fully coded superblocks if any of the blocks were
  1003. * not marked as partially coded in the previous step */
  1004. if (decode_fully_flags) {
  1005. current_superblock = 0;
  1006. current_run = 0;
  1007. bit = get_bits(gb, 1);
  1008. /* toggle the bit because as soon as the first run length is
  1009. * fetched the bit will be toggled again */
  1010. bit ^= 1;
  1011. while (current_superblock < s->superblock_count) {
  1012. /* skip any superblocks already marked as partially coded */
  1013. if (s->superblock_coding[current_superblock] == SB_NOT_CODED) {
  1014. if (current_run == 0) {
  1015. bit ^= 1;
  1016. current_run = get_superblock_run_length(gb);
  1017. }
  1018. debug_block_coding(" setting superblock %d to %s\n",
  1019. current_superblock,
  1020. (bit) ? "fully coded" : "not coded");
  1021. s->superblock_coding[current_superblock] =
  1022. (bit) ? SB_FULLY_CODED : SB_NOT_CODED;
  1023. current_run--;
  1024. }
  1025. current_superblock++;
  1026. }
  1027. }
  1028. /* if there were partial blocks, initialize bitstream for
  1029. * unpacking fragment codings */
  1030. if (decode_partial_blocks) {
  1031. current_run = 0;
  1032. bit = get_bits(gb, 1);
  1033. /* toggle the bit because as soon as the first run length is
  1034. * fetched the bit will be toggled again */
  1035. bit ^= 1;
  1036. }
  1037. }
  1038. /* figure out which fragments are coded; iterate through each
  1039. * superblock (all planes) */
  1040. s->coded_fragment_list_index = 0;
  1041. s->first_coded_y_fragment = s->first_coded_c_fragment = 0;
  1042. s->last_coded_y_fragment = s->last_coded_c_fragment = -1;
  1043. first_c_fragment_seen = 0;
  1044. memset(s->macroblock_coding, MODE_COPY, s->macroblock_count);
  1045. for (i = 0; i < s->superblock_count; i++) {
  1046. /* iterate through all 16 fragments in a superblock */
  1047. for (j = 0; j < 16; j++) {
  1048. /* if the fragment is in bounds, check its coding status */
  1049. current_fragment = s->superblock_fragments[i * 16 + j];
  1050. if (current_fragment >= s->fragment_count) {
  1051. av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_superblocks(): bad fragment number (%d >= %d)\n",
  1052. current_fragment, s->fragment_count);
  1053. return 1;
  1054. }
  1055. if (current_fragment != -1) {
  1056. if (s->superblock_coding[i] == SB_NOT_CODED) {
  1057. /* copy all the fragments from the prior frame */
  1058. s->all_fragments[current_fragment].coding_method =
  1059. MODE_COPY;
  1060. } else if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
  1061. /* fragment may or may not be coded; this is the case
  1062. * that cares about the fragment coding runs */
  1063. if (current_run == 0) {
  1064. bit ^= 1;
  1065. current_run = get_fragment_run_length(gb);
  1066. }
  1067. if (bit) {
  1068. /* default mode; actual mode will be decoded in
  1069. * the next phase */
  1070. s->all_fragments[current_fragment].coding_method =
  1071. MODE_INTER_NO_MV;
  1072. s->coded_fragment_list[s->coded_fragment_list_index] =
  1073. current_fragment;
  1074. if ((current_fragment >= s->u_fragment_start) &&
  1075. (s->last_coded_y_fragment == -1) &&
  1076. (!first_c_fragment_seen)) {
  1077. s->first_coded_c_fragment = s->coded_fragment_list_index;
  1078. s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
  1079. first_c_fragment_seen = 1;
  1080. }
  1081. s->coded_fragment_list_index++;
  1082. s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
  1083. debug_block_coding(" superblock %d is partially coded, fragment %d is coded\n",
  1084. i, current_fragment);
  1085. } else {
  1086. /* not coded; copy this fragment from the prior frame */
  1087. s->all_fragments[current_fragment].coding_method =
  1088. MODE_COPY;
  1089. debug_block_coding(" superblock %d is partially coded, fragment %d is not coded\n",
  1090. i, current_fragment);
  1091. }
  1092. current_run--;
  1093. } else {
  1094. /* fragments are fully coded in this superblock; actual
  1095. * coding will be determined in next step */
  1096. s->all_fragments[current_fragment].coding_method =
  1097. MODE_INTER_NO_MV;
  1098. s->coded_fragment_list[s->coded_fragment_list_index] =
  1099. current_fragment;
  1100. if ((current_fragment >= s->u_fragment_start) &&
  1101. (s->last_coded_y_fragment == -1) &&
  1102. (!first_c_fragment_seen)) {
  1103. s->first_coded_c_fragment = s->coded_fragment_list_index;
  1104. s->last_coded_y_fragment = s->first_coded_c_fragment - 1;
  1105. first_c_fragment_seen = 1;
  1106. }
  1107. s->coded_fragment_list_index++;
  1108. s->macroblock_coding[s->all_fragments[current_fragment].macroblock] = MODE_INTER_NO_MV;
  1109. debug_block_coding(" superblock %d is fully coded, fragment %d is coded\n",
  1110. i, current_fragment);
  1111. }
  1112. }
  1113. }
  1114. }
  1115. if (!first_c_fragment_seen)
  1116. /* only Y fragments coded in this frame */
  1117. s->last_coded_y_fragment = s->coded_fragment_list_index - 1;
  1118. else
  1119. /* end the list of coded C fragments */
  1120. s->last_coded_c_fragment = s->coded_fragment_list_index - 1;
  1121. debug_block_coding(" %d total coded fragments, y: %d -> %d, c: %d -> %d\n",
  1122. s->coded_fragment_list_index,
  1123. s->first_coded_y_fragment,
  1124. s->last_coded_y_fragment,
  1125. s->first_coded_c_fragment,
  1126. s->last_coded_c_fragment);
  1127. return 0;
  1128. }
  1129. /*
  1130. * This function unpacks all the coding mode data for individual macroblocks
  1131. * from the bitstream.
  1132. */
  1133. static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
  1134. {
  1135. int i, j, k;
  1136. int scheme;
  1137. int current_macroblock;
  1138. int current_fragment;
  1139. int coding_mode;
  1140. debug_vp3(" vp3: unpacking encoding modes\n");
  1141. if (s->keyframe) {
  1142. debug_vp3(" keyframe-- all blocks are coded as INTRA\n");
  1143. for (i = 0; i < s->fragment_count; i++)
  1144. s->all_fragments[i].coding_method = MODE_INTRA;
  1145. } else {
  1146. /* fetch the mode coding scheme for this frame */
  1147. scheme = get_bits(gb, 3);
  1148. debug_modes(" using mode alphabet %d\n", scheme);
  1149. /* is it a custom coding scheme? */
  1150. if (scheme == 0) {
  1151. debug_modes(" custom mode alphabet ahead:\n");
  1152. for (i = 0; i < 8; i++)
  1153. ModeAlphabet[scheme][get_bits(gb, 3)] = i;
  1154. }
  1155. for (i = 0; i < 8; i++)
  1156. debug_modes(" mode[%d][%d] = %d\n", scheme, i,
  1157. ModeAlphabet[scheme][i]);
  1158. /* iterate through all of the macroblocks that contain 1 or more
  1159. * coded fragments */
  1160. for (i = 0; i < s->u_superblock_start; i++) {
  1161. for (j = 0; j < 4; j++) {
  1162. current_macroblock = s->superblock_macroblocks[i * 4 + j];
  1163. if ((current_macroblock == -1) ||
  1164. (s->macroblock_coding[current_macroblock] == MODE_COPY))
  1165. continue;
  1166. if (current_macroblock >= s->macroblock_count) {
  1167. av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad macroblock number (%d >= %d)\n",
  1168. current_macroblock, s->macroblock_count);
  1169. return 1;
  1170. }
  1171. /* mode 7 means get 3 bits for each coding mode */
  1172. if (scheme == 7)
  1173. coding_mode = get_bits(gb, 3);
  1174. else
  1175. coding_mode = ModeAlphabet[scheme][get_mode_code(gb)];
  1176. s->macroblock_coding[current_macroblock] = coding_mode;
  1177. for (k = 0; k < 6; k++) {
  1178. current_fragment =
  1179. s->macroblock_fragments[current_macroblock * 6 + k];
  1180. if (current_fragment == -1)
  1181. continue;
  1182. if (current_fragment >= s->fragment_count) {
  1183. av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_modes(): bad fragment number (%d >= %d)\n",
  1184. current_fragment, s->fragment_count);
  1185. return 1;
  1186. }
  1187. if (s->all_fragments[current_fragment].coding_method !=
  1188. MODE_COPY)
  1189. s->all_fragments[current_fragment].coding_method =
  1190. coding_mode;
  1191. }
  1192. debug_modes(" coding method for macroblock starting @ fragment %d = %d\n",
  1193. s->macroblock_fragments[current_macroblock * 6], coding_mode);
  1194. }
  1195. }
  1196. }
  1197. return 0;
  1198. }
  1199. /*
  1200. * This function unpacks all the motion vectors for the individual
  1201. * macroblocks from the bitstream.
  1202. */
  1203. static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
  1204. {
  1205. int i, j, k;
  1206. int coding_mode;
  1207. int motion_x[6];
  1208. int motion_y[6];
  1209. int last_motion_x = 0;
  1210. int last_motion_y = 0;
  1211. int prior_last_motion_x = 0;
  1212. int prior_last_motion_y = 0;
  1213. int current_macroblock;
  1214. int current_fragment;
  1215. debug_vp3(" vp3: unpacking motion vectors\n");
  1216. if (s->keyframe) {
  1217. debug_vp3(" keyframe-- there are no motion vectors\n");
  1218. } else {
  1219. memset(motion_x, 0, 6 * sizeof(int));
  1220. memset(motion_y, 0, 6 * sizeof(int));
  1221. /* coding mode 0 is the VLC scheme; 1 is the fixed code scheme */
  1222. coding_mode = get_bits(gb, 1);
  1223. debug_vectors(" using %s scheme for unpacking motion vectors\n",
  1224. (coding_mode == 0) ? "VLC" : "fixed-length");
  1225. /* iterate through all of the macroblocks that contain 1 or more
  1226. * coded fragments */
  1227. for (i = 0; i < s->u_superblock_start; i++) {
  1228. for (j = 0; j < 4; j++) {
  1229. current_macroblock = s->superblock_macroblocks[i * 4 + j];
  1230. if ((current_macroblock == -1) ||
  1231. (s->macroblock_coding[current_macroblock] == MODE_COPY))
  1232. continue;
  1233. if (current_macroblock >= s->macroblock_count) {
  1234. av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad macroblock number (%d >= %d)\n",
  1235. current_macroblock, s->macroblock_count);
  1236. return 1;
  1237. }
  1238. current_fragment = s->macroblock_fragments[current_macroblock * 6];
  1239. if (current_fragment >= s->fragment_count) {
  1240. av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d\n",
  1241. current_fragment, s->fragment_count);
  1242. return 1;
  1243. }
  1244. switch (s->macroblock_coding[current_macroblock]) {
  1245. case MODE_INTER_PLUS_MV:
  1246. case MODE_GOLDEN_MV:
  1247. /* all 6 fragments use the same motion vector */
  1248. if (coding_mode == 0) {
  1249. motion_x[0] = get_motion_vector_vlc(gb);
  1250. motion_y[0] = get_motion_vector_vlc(gb);
  1251. } else {
  1252. motion_x[0] = get_motion_vector_fixed(gb);
  1253. motion_y[0] = get_motion_vector_fixed(gb);
  1254. }
  1255. for (k = 1; k < 6; k++) {
  1256. motion_x[k] = motion_x[0];
  1257. motion_y[k] = motion_y[0];
  1258. }
  1259. /* vector maintenance, only on MODE_INTER_PLUS_MV */
  1260. if (s->macroblock_coding[current_macroblock] ==
  1261. MODE_INTER_PLUS_MV) {
  1262. prior_last_motion_x = last_motion_x;
  1263. prior_last_motion_y = last_motion_y;
  1264. last_motion_x = motion_x[0];
  1265. last_motion_y = motion_y[0];
  1266. }
  1267. break;
  1268. case MODE_INTER_FOURMV:
  1269. /* fetch 4 vectors from the bitstream, one for each
  1270. * Y fragment, then average for the C fragment vectors */
  1271. motion_x[4] = motion_y[4] = 0;
  1272. for (k = 0; k < 4; k++) {
  1273. if (coding_mode == 0) {
  1274. motion_x[k] = get_motion_vector_vlc(gb);
  1275. motion_y[k] = get_motion_vector_vlc(gb);
  1276. } else {
  1277. motion_x[k] = get_motion_vector_fixed(gb);
  1278. motion_y[k] = get_motion_vector_fixed(gb);
  1279. }
  1280. motion_x[4] += motion_x[k];
  1281. motion_y[4] += motion_y[k];
  1282. }
  1283. if (motion_x[4] >= 0)
  1284. motion_x[4] = (motion_x[4] + 2) / 4;
  1285. else
  1286. motion_x[4] = (motion_x[4] - 2) / 4;
  1287. motion_x[5] = motion_x[4];
  1288. if (motion_y[4] >= 0)
  1289. motion_y[4] = (motion_y[4] + 2) / 4;
  1290. else
  1291. motion_y[4] = (motion_y[4] - 2) / 4;
  1292. motion_y[5] = motion_y[4];
  1293. /* vector maintenance; vector[3] is treated as the
  1294. * last vector in this case */
  1295. prior_last_motion_x = last_motion_x;
  1296. prior_last_motion_y = last_motion_y;
  1297. last_motion_x = motion_x[3];
  1298. last_motion_y = motion_y[3];
  1299. break;
  1300. case MODE_INTER_LAST_MV:
  1301. /* all 6 fragments use the last motion vector */
  1302. motion_x[0] = last_motion_x;
  1303. motion_y[0] = last_motion_y;
  1304. for (k = 1; k < 6; k++) {
  1305. motion_x[k] = motion_x[0];
  1306. motion_y[k] = motion_y[0];
  1307. }
  1308. /* no vector maintenance (last vector remains the
  1309. * last vector) */
  1310. break;
  1311. case MODE_INTER_PRIOR_LAST:
  1312. /* all 6 fragments use the motion vector prior to the
  1313. * last motion vector */
  1314. motion_x[0] = prior_last_motion_x;
  1315. motion_y[0] = prior_last_motion_y;
  1316. for (k = 1; k < 6; k++) {
  1317. motion_x[k] = motion_x[0];
  1318. motion_y[k] = motion_y[0];
  1319. }
  1320. /* vector maintenance */
  1321. prior_last_motion_x = last_motion_x;
  1322. prior_last_motion_y = last_motion_y;
  1323. last_motion_x = motion_x[0];
  1324. last_motion_y = motion_y[0];
  1325. break;
  1326. default:
  1327. /* covers intra, inter without MV, golden without MV */
  1328. memset(motion_x, 0, 6 * sizeof(int));
  1329. memset(motion_y, 0, 6 * sizeof(int));
  1330. /* no vector maintenance */
  1331. break;
  1332. }
  1333. /* assign the motion vectors to the correct fragments */
  1334. debug_vectors(" vectors for macroblock starting @ fragment %d (coding method %d):\n",
  1335. current_fragment,
  1336. s->macroblock_coding[current_macroblock]);
  1337. for (k = 0; k < 6; k++) {
  1338. current_fragment =
  1339. s->macroblock_fragments[current_macroblock * 6 + k];
  1340. if (current_fragment == -1)
  1341. continue;
  1342. if (current_fragment >= s->fragment_count) {
  1343. av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vectors(): bad fragment number (%d >= %d)\n",
  1344. current_fragment, s->fragment_count);
  1345. return 1;
  1346. }
  1347. s->all_fragments[current_fragment].motion_x = motion_x[k];
  1348. s->all_fragments[current_fragment].motion_y = motion_y[k];
  1349. debug_vectors(" vector %d: fragment %d = (%d, %d)\n",
  1350. k, current_fragment, motion_x[k], motion_y[k]);
  1351. }
  1352. }
  1353. }
  1354. }
  1355. return 0;
  1356. }
  1357. /*
  1358. * This function is called by unpack_dct_coeffs() to extract the VLCs from
  1359. * the bitstream. The VLCs encode tokens which are used to unpack DCT
  1360. * data. This function unpacks all the VLCs for either the Y plane or both
  1361. * C planes, and is called for DC coefficients or different AC coefficient
  1362. * levels (since different coefficient types require different VLC tables.
  1363. *
  1364. * This function returns a residual eob run. E.g, if a particular token gave
  1365. * instructions to EOB the next 5 fragments and there were only 2 fragments
  1366. * left in the current fragment range, 3 would be returned so that it could
  1367. * be passed into the next call to this same function.
  1368. */
  1369. static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
  1370. VLC *table, int coeff_index,
  1371. int first_fragment, int last_fragment,
  1372. int eob_run)
  1373. {
  1374. int i;
  1375. int token;
  1376. int zero_run;
  1377. DCTELEM coeff;
  1378. Vp3Fragment *fragment;
  1379. if ((first_fragment >= s->fragment_count) ||
  1380. (last_fragment >= s->fragment_count)) {
  1381. av_log(s->avctx, AV_LOG_ERROR, " vp3:unpack_vlcs(): bad fragment number (%d -> %d ?)\n",
  1382. first_fragment, last_fragment);
  1383. return 0;
  1384. }
  1385. for (i = first_fragment; i <= last_fragment; i++) {
  1386. fragment = &s->all_fragments[s->coded_fragment_list[i]];
  1387. if (fragment->coeff_count > coeff_index)
  1388. continue;
  1389. if (!eob_run) {
  1390. /* decode a VLC into a token */
  1391. token = get_vlc2(gb, table->table, 5, 3);
  1392. debug_vlc(" token = %2d, ", token);
  1393. /* use the token to get a zero run, a coefficient, and an eob run */
  1394. unpack_token(gb, token, &zero_run, &coeff, &eob_run);
  1395. }
  1396. if (!eob_run) {
  1397. fragment->coeff_count += zero_run;
  1398. if (fragment->coeff_count < 64)
  1399. fragment->coeffs[fragment->coeff_count++] = coeff;
  1400. debug_vlc(" fragment %d coeff = %d\n",
  1401. s->coded_fragment_list[i], fragment->coeffs[coeff_index]);
  1402. } else {
  1403. fragment->last_coeff = fragment->coeff_count;
  1404. fragment->coeff_count = 64;
  1405. debug_vlc(" fragment %d eob with %d coefficients\n",
  1406. s->coded_fragment_list[i], fragment->last_coeff);
  1407. eob_run--;
  1408. }
  1409. }
  1410. return eob_run;
  1411. }
  1412. /*
  1413. * This function unpacks all of the DCT coefficient data from the
  1414. * bitstream.
  1415. */
  1416. static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
  1417. {
  1418. int i;
  1419. int dc_y_table;
  1420. int dc_c_table;
  1421. int ac_y_table;
  1422. int ac_c_table;
  1423. int residual_eob_run = 0;
  1424. /* fetch the DC table indices */
  1425. dc_y_table = get_bits(gb, 4);
  1426. dc_c_table = get_bits(gb, 4);
  1427. /* unpack the Y plane DC coefficients */
  1428. debug_vp3(" vp3: unpacking Y plane DC coefficients using table %d\n",
  1429. dc_y_table);
  1430. residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_y_table], 0,
  1431. s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
  1432. /* unpack the C plane DC coefficients */
  1433. debug_vp3(" vp3: unpacking C plane DC coefficients using table %d\n",
  1434. dc_c_table);
  1435. residual_eob_run = unpack_vlcs(s, gb, &s->dc_vlc[dc_c_table], 0,
  1436. s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
  1437. /* fetch the AC table indices */
  1438. ac_y_table = get_bits(gb, 4);
  1439. ac_c_table = get_bits(gb, 4);
  1440. /* unpack the group 1 AC coefficients (coeffs 1-5) */
  1441. for (i = 1; i <= 5; i++) {
  1442. debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
  1443. i, ac_y_table);
  1444. residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_y_table], i,
  1445. s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
  1446. debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
  1447. i, ac_c_table);
  1448. residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_1[ac_c_table], i,
  1449. s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
  1450. }
  1451. /* unpack the group 2 AC coefficients (coeffs 6-14) */
  1452. for (i = 6; i <= 14; i++) {
  1453. debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
  1454. i, ac_y_table);
  1455. residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_y_table], i,
  1456. s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
  1457. debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
  1458. i, ac_c_table);
  1459. residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_2[ac_c_table], i,
  1460. s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
  1461. }
  1462. /* unpack the group 3 AC coefficients (coeffs 15-27) */
  1463. for (i = 15; i <= 27; i++) {
  1464. debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
  1465. i, ac_y_table);
  1466. residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_y_table], i,
  1467. s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
  1468. debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
  1469. i, ac_c_table);
  1470. residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_3[ac_c_table], i,
  1471. s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
  1472. }
  1473. /* unpack the group 4 AC coefficients (coeffs 28-63) */
  1474. for (i = 28; i <= 63; i++) {
  1475. debug_vp3(" vp3: unpacking level %d Y plane AC coefficients using table %d\n",
  1476. i, ac_y_table);
  1477. residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_y_table], i,
  1478. s->first_coded_y_fragment, s->last_coded_y_fragment, residual_eob_run);
  1479. debug_vp3(" vp3: unpacking level %d C plane AC coefficients using table %d\n",
  1480. i, ac_c_table);
  1481. residual_eob_run = unpack_vlcs(s, gb, &s->ac_vlc_4[ac_c_table], i,
  1482. s->first_coded_c_fragment, s->last_coded_c_fragment, residual_eob_run);
  1483. }
  1484. return 0;
  1485. }
  1486. /*
  1487. * This function reverses the DC prediction for each coded fragment in
  1488. * the frame. Much of this function is adapted directly from the original
  1489. * VP3 source code.
  1490. */
  1491. #define COMPATIBLE_FRAME(x) \
  1492. (compatible_frame[s->all_fragments[x].coding_method] == current_frame_type)
  1493. #define FRAME_CODED(x) (s->all_fragments[x].coding_method != MODE_COPY)
  1494. static inline int iabs (int x) { return ((x < 0) ? -x : x); }
  1495. static void reverse_dc_prediction(Vp3DecodeContext *s,
  1496. int first_fragment,
  1497. int fragment_width,
  1498. int fragment_height)
  1499. {
  1500. #define PUL 8
  1501. #define PU 4
  1502. #define PUR 2
  1503. #define PL 1
  1504. int x, y;
  1505. int i = first_fragment;
  1506. /*
  1507. * Fragment prediction groups:
  1508. *
  1509. * 32222222226
  1510. * 10000000004
  1511. * 10000000004
  1512. * 10000000004
  1513. * 10000000004
  1514. *
  1515. * Note: Groups 5 and 7 do not exist as it would mean that the
  1516. * fragment's x coordinate is both 0 and (width - 1) at the same time.
  1517. */
  1518. int predictor_group;
  1519. short predicted_dc;
  1520. /* validity flags for the left, up-left, up, and up-right fragments */
  1521. int fl, ful, fu, fur;
  1522. /* DC values for the left, up-left, up, and up-right fragments */
  1523. int vl, vul, vu, vur;
  1524. /* indices for the left, up-left, up, and up-right fragments */
  1525. int l, ul, u, ur;
  1526. /*
  1527. * The 6 fields mean:
  1528. * 0: up-left multiplier
  1529. * 1: up multiplier
  1530. * 2: up-right multiplier
  1531. * 3: left multiplier
  1532. * 4: mask
  1533. * 5: right bit shift divisor (e.g., 7 means >>=7, a.k.a. div by 128)
  1534. */
  1535. int predictor_transform[16][6] = {
  1536. { 0, 0, 0, 0, 0, 0 },
  1537. { 0, 0, 0, 1, 0, 0 }, // PL
  1538. { 0, 0, 1, 0, 0, 0 }, // PUR
  1539. { 0, 0, 53, 75, 127, 7 }, // PUR|PL
  1540. { 0, 1, 0, 0, 0, 0 }, // PU
  1541. { 0, 1, 0, 1, 1, 1 }, // PU|PL
  1542. { 0, 1, 0, 0, 0, 0 }, // PU|PUR
  1543. { 0, 0, 53, 75, 127, 7 }, // PU|PUR|PL
  1544. { 1, 0, 0, 0, 0, 0 }, // PUL
  1545. { 0, 0, 0, 1, 0, 0 }, // PUL|PL
  1546. { 1, 0, 1, 0, 1, 1 }, // PUL|PUR
  1547. { 0, 0, 53, 75, 127, 7 }, // PUL|PUR|PL
  1548. { 0, 1, 0, 0, 0, 0 }, // PUL|PU
  1549. {-26, 29, 0, 29, 31, 5 }, // PUL|PU|PL
  1550. { 3, 10, 3, 0, 15, 4 }, // PUL|PU|PUR
  1551. {-26, 29, 0, 29, 31, 5 } // PUL|PU|PUR|PL
  1552. };
  1553. /* This table shows which types of blocks can use other blocks for
  1554. * prediction. For example, INTRA is the only mode in this table to
  1555. * have a frame number of 0. That means INTRA blocks can only predict
  1556. * from other INTRA blocks. There are 2 golden frame coding types;
  1557. * blocks encoding in these modes can only predict from other blocks
  1558. * that were encoded with these 1 of these 2 modes. */
  1559. unsigned char compatible_frame[8] = {
  1560. 1, /* MODE_INTER_NO_MV */
  1561. 0, /* MODE_INTRA */
  1562. 1, /* MODE_INTER_PLUS_MV */
  1563. 1, /* MODE_INTER_LAST_MV */
  1564. 1, /* MODE_INTER_PRIOR_MV */
  1565. 2, /* MODE_USING_GOLDEN */
  1566. 2, /* MODE_GOLDEN_MV */
  1567. 1 /* MODE_INTER_FOUR_MV */
  1568. };
  1569. int current_frame_type;
  1570. /* there is a last DC predictor for each of the 3 frame types */
  1571. short last_dc[3];
  1572. int transform = 0;
  1573. debug_vp3(" vp3: reversing DC prediction\n");
  1574. vul = vu = vur = vl = 0;
  1575. last_dc[0] = last_dc[1] = last_dc[2] = 0;
  1576. /* for each fragment row... */
  1577. for (y = 0; y < fragment_height; y++) {
  1578. /* for each fragment in a row... */
  1579. for (x = 0; x < fragment_width; x++, i++) {
  1580. /* reverse prediction if this block was coded */
  1581. if (s->all_fragments[i].coding_method != MODE_COPY) {
  1582. current_frame_type =
  1583. compatible_frame[s->all_fragments[i].coding_method];
  1584. predictor_group = (x == 0) + ((y == 0) << 1) +
  1585. ((x + 1 == fragment_width) << 2);
  1586. debug_dc_pred(" frag %d: group %d, orig DC = %d, ",
  1587. i, predictor_group, s->all_fragments[i].coeffs[0]);
  1588. switch (predictor_group) {
  1589. case 0:
  1590. /* main body of fragments; consider all 4 possible
  1591. * fragments for prediction */
  1592. /* calculate the indices of the predicting fragments */
  1593. ul = i - fragment_width - 1;
  1594. u = i - fragment_width;
  1595. ur = i - fragment_width + 1;
  1596. l = i - 1;
  1597. /* fetch the DC values for the predicting fragments */
  1598. vul = s->all_fragments[ul].coeffs[0];
  1599. vu = s->all_fragments[u].coeffs[0];
  1600. vur = s->all_fragments[ur].coeffs[0];
  1601. vl = s->all_fragments[l].coeffs[0];
  1602. /* figure out which fragments are valid */
  1603. ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul);
  1604. fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u);
  1605. fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur);
  1606. fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l);
  1607. /* decide which predictor transform to use */
  1608. transform = (fl*PL) | (fu*PU) | (ful*PUL) | (fur*PUR);
  1609. break;
  1610. case 1:
  1611. /* left column of fragments, not including top corner;
  1612. * only consider up and up-right fragments */
  1613. /* calculate the indices of the predicting fragments */
  1614. u = i - fragment_width;
  1615. ur = i - fragment_width + 1;
  1616. /* fetch the DC values for the predicting fragments */
  1617. vu = s->all_fragments[u].coeffs[0];
  1618. vur = s->all_fragments[ur].coeffs[0];
  1619. /* figure out which fragments are valid */
  1620. fur = FRAME_CODED(ur) && COMPATIBLE_FRAME(ur);
  1621. fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u);
  1622. /* decide which predictor transform to use */
  1623. transform = (fu*PU) | (fur*PUR);
  1624. break;
  1625. case 2:
  1626. case 6:
  1627. /* top row of fragments, not including top-left frag;
  1628. * only consider the left fragment for prediction */
  1629. /* calculate the indices of the predicting fragments */
  1630. l = i - 1;
  1631. /* fetch the DC values for the predicting fragments */
  1632. vl = s->all_fragments[l].coeffs[0];
  1633. /* figure out which fragments are valid */
  1634. fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l);
  1635. /* decide which predictor transform to use */
  1636. transform = (fl*PL);
  1637. break;
  1638. case 3:
  1639. /* top-left fragment */
  1640. /* nothing to predict from in this case */
  1641. transform = 0;
  1642. break;
  1643. case 4:
  1644. /* right column of fragments, not including top corner;
  1645. * consider up-left, up, and left fragments for
  1646. * prediction */
  1647. /* calculate the indices of the predicting fragments */
  1648. ul = i - fragment_width - 1;
  1649. u = i - fragment_width;
  1650. l = i - 1;
  1651. /* fetch the DC values for the predicting fragments */
  1652. vul = s->all_fragments[ul].coeffs[0];
  1653. vu = s->all_fragments[u].coeffs[0];
  1654. vl = s->all_fragments[l].coeffs[0];
  1655. /* figure out which fragments are valid */
  1656. ful = FRAME_CODED(ul) && COMPATIBLE_FRAME(ul);
  1657. fu = FRAME_CODED(u) && COMPATIBLE_FRAME(u);
  1658. fl = FRAME_CODED(l) && COMPATIBLE_FRAME(l);
  1659. /* decide which predictor transform to use */
  1660. transform = (fl*PL) | (fu*PU) | (ful*PUL);
  1661. break;
  1662. }
  1663. debug_dc_pred("transform = %d, ", transform);
  1664. if (transform == 0) {
  1665. /* if there were no fragments to predict from, use last
  1666. * DC saved */
  1667. s->all_fragments[i].coeffs[0] += last_dc[current_frame_type];
  1668. debug_dc_pred("from last DC (%d) = %d\n",
  1669. current_frame_type, s->all_fragments[i].coeffs[0]);
  1670. } else {
  1671. /* apply the appropriate predictor transform */
  1672. predicted_dc =
  1673. (predictor_transform[transform][0] * vul) +
  1674. (predictor_transform[transform][1] * vu) +
  1675. (predictor_transform[transform][2] * vur) +
  1676. (predictor_transform[transform][3] * vl);
  1677. /* if there is a shift value in the transform, add
  1678. * the sign bit before the shift */
  1679. if (predictor_transform[transform][5] != 0) {
  1680. predicted_dc += ((predicted_dc >> 15) &
  1681. predictor_transform[transform][4]);
  1682. predicted_dc >>= predictor_transform[transform][5];
  1683. }
  1684. /* check for outranging on the [ul u l] and
  1685. * [ul u ur l] predictors */
  1686. if ((transform == 13) || (transform == 15)) {
  1687. if (iabs(predicted_dc - vu) > 128)
  1688. predicted_dc = vu;
  1689. else if (iabs(predicted_dc - vl) > 128)
  1690. predicted_dc = vl;
  1691. else if (iabs(predicted_dc - vul) > 128)
  1692. predicted_dc = vul;
  1693. }
  1694. /* at long last, apply the predictor */
  1695. s->all_fragments[i].coeffs[0] += predicted_dc;
  1696. debug_dc_pred("from pred DC = %d\n",
  1697. s->all_fragments[i].coeffs[0]);
  1698. }
  1699. /* save the DC */
  1700. last_dc[current_frame_type] = s->all_fragments[i].coeffs[0];
  1701. }
  1702. }
  1703. }
  1704. }
  1705. /*
  1706. * This function performs the final rendering of each fragment's data
  1707. * onto the output frame.
  1708. */
  1709. static void render_fragments(Vp3DecodeContext *s,
  1710. int first_fragment,
  1711. int width,
  1712. int height,
  1713. int plane /* 0 = Y, 1 = U, 2 = V */)
  1714. {
  1715. int x, y;
  1716. int m, n;
  1717. int i = first_fragment;
  1718. int16_t *dequantizer;
  1719. DCTELEM __align16 output_samples[64];
  1720. unsigned char *output_plane;
  1721. unsigned char *last_plane;
  1722. unsigned char *golden_plane;
  1723. int stride;
  1724. int motion_x = 0xdeadbeef, motion_y = 0xdeadbeef;
  1725. int upper_motion_limit, lower_motion_limit;
  1726. int motion_halfpel_index;
  1727. uint8_t *motion_source;
  1728. debug_vp3(" vp3: rendering final fragments for %s\n",
  1729. (plane == 0) ? "Y plane" : (plane == 1) ? "U plane" : "V plane");
  1730. /* set up plane-specific parameters */
  1731. if (plane == 0) {
  1732. dequantizer = s->intra_y_dequant;
  1733. output_plane = s->current_frame.data[0];
  1734. last_plane = s->last_frame.data[0];
  1735. golden_plane = s->golden_frame.data[0];
  1736. stride = s->current_frame.linesize[0];
  1737. if (!s->flipped_image) stride = -stride;
  1738. upper_motion_limit = 7 * s->current_frame.linesize[0];
  1739. lower_motion_limit = height * s->current_frame.linesize[0] + width - 8;
  1740. } else if (plane == 1) {
  1741. dequantizer = s->intra_c_dequant;
  1742. output_plane = s->current_frame.data[1];
  1743. last_plane = s->last_frame.data[1];
  1744. golden_plane = s->golden_frame.data[1];
  1745. stride = s->current_frame.linesize[1];
  1746. if (!s->flipped_image) stride = -stride;
  1747. upper_motion_limit = 7 * s->current_frame.linesize[1];
  1748. lower_motion_limit = height * s->current_frame.linesize[1] + width - 8;
  1749. } else {
  1750. dequantizer = s->intra_c_dequant;
  1751. output_plane = s->current_frame.data[2];
  1752. last_plane = s->last_frame.data[2];
  1753. golden_plane = s->golden_frame.data[2];
  1754. stride = s->current_frame.linesize[2];
  1755. if (!s->flipped_image) stride = -stride;
  1756. upper_motion_limit = 7 * s->current_frame.linesize[2];
  1757. lower_motion_limit = height * s->current_frame.linesize[2] + width - 8;
  1758. }
  1759. /* for each fragment row... */
  1760. for (y = 0; y < height; y += 8) {
  1761. /* for each fragment in a row... */
  1762. for (x = 0; x < width; x += 8, i++) {
  1763. if ((i < 0) || (i >= s->fragment_count)) {
  1764. av_log(s->avctx, AV_LOG_ERROR, " vp3:render_fragments(): bad fragment number (%d)\n", i);
  1765. return;
  1766. }
  1767. /* transform if this block was coded */
  1768. if ((s->all_fragments[i].coding_method != MODE_COPY) &&
  1769. !((s->avctx->flags & CODEC_FLAG_GRAY) && plane)) {
  1770. if ((s->all_fragments[i].coding_method == MODE_USING_GOLDEN) ||
  1771. (s->all_fragments[i].coding_method == MODE_GOLDEN_MV))
  1772. motion_source= golden_plane;
  1773. else
  1774. motion_source= last_plane;
  1775. motion_source += s->all_fragments[i].first_pixel;
  1776. motion_halfpel_index = 0;
  1777. /* sort out the motion vector if this fragment is coded
  1778. * using a motion vector method */
  1779. if ((s->all_fragments[i].coding_method > MODE_INTRA) &&
  1780. (s->all_fragments[i].coding_method != MODE_USING_GOLDEN)) {
  1781. int src_x, src_y;
  1782. motion_x = s->all_fragments[i].motion_x;
  1783. motion_y = s->all_fragments[i].motion_y;
  1784. if(plane){
  1785. motion_x= (motion_x>>1) | (motion_x&1);
  1786. motion_y= (motion_y>>1) | (motion_y&1);
  1787. }
  1788. src_x= (motion_x>>1) + x;
  1789. src_y= (motion_y>>1) + y;
  1790. if ((motion_x == 0xbeef) || (motion_y == 0xbeef))
  1791. av_log(s->avctx, AV_LOG_ERROR, " help! got beefy vector! (%X, %X)\n", motion_x, motion_y);
  1792. motion_halfpel_index = motion_x & 0x01;
  1793. motion_source += (motion_x >> 1);
  1794. // motion_y = -motion_y;
  1795. motion_halfpel_index |= (motion_y & 0x01) << 1;
  1796. motion_source += ((motion_y >> 1) * stride);
  1797. if(src_x<0 || src_y<0 || src_x + 9 >= width || src_y + 9 >= height){
  1798. uint8_t *temp= s->edge_emu_buffer;
  1799. if(stride<0) temp -= 9*stride;
  1800. else temp += 9*stride;
  1801. ff_emulated_edge_mc(temp, motion_source, stride, 9, 9, src_x, src_y, width, height);
  1802. motion_source= temp;
  1803. }
  1804. }
  1805. /* first, take care of copying a block from either the
  1806. * previous or the golden frame */
  1807. if (s->all_fragments[i].coding_method != MODE_INTRA) {
  1808. //Note, it is possible to implement all MC cases with put_no_rnd_pixels_l2 which would look more like the VP3 source but this would be slower as put_no_rnd_pixels_tab is better optimzed
  1809. if(motion_halfpel_index != 3){
  1810. s->dsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
  1811. output_plane + s->all_fragments[i].first_pixel,
  1812. motion_source, stride, 8);
  1813. }else{
  1814. int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
  1815. s->dsp.put_no_rnd_pixels_l2[1](
  1816. output_plane + s->all_fragments[i].first_pixel,
  1817. motion_source - d,
  1818. motion_source + stride + 1 + d,
  1819. stride, 8);
  1820. }
  1821. }
  1822. /* dequantize the DCT coefficients */
  1823. debug_idct("fragment %d, coding mode %d, DC = %d, dequant = %d:\n",
  1824. i, s->all_fragments[i].coding_method,
  1825. s->all_fragments[i].coeffs[0], dequantizer[0]);
  1826. /* invert DCT and place (or add) in final output */
  1827. s->dsp.vp3_idct(s->all_fragments[i].coeffs,
  1828. dequantizer,
  1829. s->all_fragments[i].coeff_count,
  1830. output_samples);
  1831. if (s->all_fragments[i].coding_method == MODE_INTRA) {
  1832. s->dsp.put_signed_pixels_clamped(output_samples,
  1833. output_plane + s->all_fragments[i].first_pixel,
  1834. stride);
  1835. } else {
  1836. s->dsp.add_pixels_clamped(output_samples,
  1837. output_plane + s->all_fragments[i].first_pixel,
  1838. stride);
  1839. }
  1840. debug_idct("block after idct_%s():\n",
  1841. (s->all_fragments[i].coding_method == MODE_INTRA)?
  1842. "put" : "add");
  1843. for (m = 0; m < 8; m++) {
  1844. for (n = 0; n < 8; n++) {
  1845. debug_idct(" %3d", *(output_plane +
  1846. s->all_fragments[i].first_pixel + (m * stride + n)));
  1847. }
  1848. debug_idct("\n");
  1849. }
  1850. debug_idct("\n");
  1851. } else {
  1852. /* copy directly from the previous frame */
  1853. s->dsp.put_pixels_tab[1][0](
  1854. output_plane + s->all_fragments[i].first_pixel,
  1855. last_plane + s->all_fragments[i].first_pixel,
  1856. stride, 8);
  1857. }
  1858. }
  1859. }
  1860. emms_c();
  1861. }
  1862. /*
  1863. * This function computes the first pixel addresses for each fragment.
  1864. * This function needs to be invoked after the first frame is allocated
  1865. * so that it has access to the plane strides.
  1866. */
  1867. static void vp3_calculate_pixel_addresses(Vp3DecodeContext *s)
  1868. {
  1869. int i, x, y;
  1870. /* figure out the first pixel addresses for each of the fragments */
  1871. /* Y plane */
  1872. i = 0;
  1873. for (y = s->fragment_height; y > 0; y--) {
  1874. for (x = 0; x < s->fragment_width; x++) {
  1875. s->all_fragments[i++].first_pixel =
  1876. s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
  1877. s->golden_frame.linesize[0] +
  1878. x * FRAGMENT_PIXELS;
  1879. debug_init(" fragment %d, first pixel @ %d\n",
  1880. i-1, s->all_fragments[i-1].first_pixel);
  1881. }
  1882. }
  1883. /* U plane */
  1884. i = s->u_fragment_start;
  1885. for (y = s->fragment_height / 2; y > 0; y--) {
  1886. for (x = 0; x < s->fragment_width / 2; x++) {
  1887. s->all_fragments[i++].first_pixel =
  1888. s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
  1889. s->golden_frame.linesize[1] +
  1890. x * FRAGMENT_PIXELS;
  1891. debug_init(" fragment %d, first pixel @ %d\n",
  1892. i-1, s->all_fragments[i-1].first_pixel);
  1893. }
  1894. }
  1895. /* V plane */
  1896. i = s->v_fragment_start;
  1897. for (y = s->fragment_height / 2; y > 0; y--) {
  1898. for (x = 0; x < s->fragment_width / 2; x++) {
  1899. s->all_fragments[i++].first_pixel =
  1900. s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
  1901. s->golden_frame.linesize[2] +
  1902. x * FRAGMENT_PIXELS;
  1903. debug_init(" fragment %d, first pixel @ %d\n",
  1904. i-1, s->all_fragments[i-1].first_pixel);
  1905. }
  1906. }
  1907. }
  1908. /* FIXME: this should be merged with the above! */
  1909. static void theora_calculate_pixel_addresses(Vp3DecodeContext *s)
  1910. {
  1911. int i, x, y;
  1912. /* figure out the first pixel addresses for each of the fragments */
  1913. /* Y plane */
  1914. i = 0;
  1915. for (y = 1; y <= s->fragment_height; y++) {
  1916. for (x = 0; x < s->fragment_width; x++) {
  1917. s->all_fragments[i++].first_pixel =
  1918. s->golden_frame.linesize[0] * y * FRAGMENT_PIXELS -
  1919. s->golden_frame.linesize[0] +
  1920. x * FRAGMENT_PIXELS;
  1921. debug_init(" fragment %d, first pixel @ %d\n",
  1922. i-1, s->all_fragments[i-1].first_pixel);
  1923. }
  1924. }
  1925. /* U plane */
  1926. i = s->u_fragment_start;
  1927. for (y = 1; y <= s->fragment_height / 2; y++) {
  1928. for (x = 0; x < s->fragment_width / 2; x++) {
  1929. s->all_fragments[i++].first_pixel =
  1930. s->golden_frame.linesize[1] * y * FRAGMENT_PIXELS -
  1931. s->golden_frame.linesize[1] +
  1932. x * FRAGMENT_PIXELS;
  1933. debug_init(" fragment %d, first pixel @ %d\n",
  1934. i-1, s->all_fragments[i-1].first_pixel);
  1935. }
  1936. }
  1937. /* V plane */
  1938. i = s->v_fragment_start;
  1939. for (y = 1; y <= s->fragment_height / 2; y++) {
  1940. for (x = 0; x < s->fragment_width / 2; x++) {
  1941. s->all_fragments[i++].first_pixel =
  1942. s->golden_frame.linesize[2] * y * FRAGMENT_PIXELS -
  1943. s->golden_frame.linesize[2] +
  1944. x * FRAGMENT_PIXELS;
  1945. debug_init(" fragment %d, first pixel @ %d\n",
  1946. i-1, s->all_fragments[i-1].first_pixel);
  1947. }
  1948. }
  1949. }
  1950. /*
  1951. * This is the ffmpeg/libavcodec API init function.
  1952. */
  1953. static int vp3_decode_init(AVCodecContext *avctx)
  1954. {
  1955. Vp3DecodeContext *s = avctx->priv_data;
  1956. int i;
  1957. int c_width;
  1958. int c_height;
  1959. int y_superblock_count;
  1960. int c_superblock_count;
  1961. if (avctx->codec_tag == MKTAG('V','P','3','0'))
  1962. s->version = 0;
  1963. else
  1964. s->version = 1;
  1965. s->avctx = avctx;
  1966. #if 0
  1967. s->width = avctx->width;
  1968. s->height = avctx->height;
  1969. #else
  1970. s->width = (avctx->width + 15) & 0xFFFFFFF0;
  1971. s->height = (avctx->height + 15) & 0xFFFFFFF0;
  1972. #endif
  1973. avctx->pix_fmt = PIX_FMT_YUV420P;
  1974. avctx->has_b_frames = 0;
  1975. dsputil_init(&s->dsp, avctx);
  1976. s->dsp.vp3_dsp_init();
  1977. /* initialize to an impossible value which will force a recalculation
  1978. * in the first frame decode */
  1979. s->quality_index = -1;
  1980. s->y_superblock_width = (s->width + 31) / 32;
  1981. s->y_superblock_height = (s->height + 31) / 32;
  1982. y_superblock_count = s->y_superblock_width * s->y_superblock_height;
  1983. /* work out the dimensions for the C planes */
  1984. c_width = s->width / 2;
  1985. c_height = s->height / 2;
  1986. s->c_superblock_width = (c_width + 31) / 32;
  1987. s->c_superblock_height = (c_height + 31) / 32;
  1988. c_superblock_count = s->c_superblock_width * s->c_superblock_height;
  1989. s->superblock_count = y_superblock_count + (c_superblock_count * 2);
  1990. s->u_superblock_start = y_superblock_count;
  1991. s->v_superblock_start = s->u_superblock_start + c_superblock_count;
  1992. s->superblock_coding = av_malloc(s->superblock_count);
  1993. s->macroblock_width = (s->width + 15) / 16;
  1994. s->macroblock_height = (s->height + 15) / 16;
  1995. s->macroblock_count = s->macroblock_width * s->macroblock_height;
  1996. s->fragment_width = s->width / FRAGMENT_PIXELS;
  1997. s->fragment_height = s->height / FRAGMENT_PIXELS;
  1998. /* fragment count covers all 8x8 blocks for all 3 planes */
  1999. s->fragment_count = s->fragment_width * s->fragment_height * 3 / 2;
  2000. s->u_fragment_start = s->fragment_width * s->fragment_height;
  2001. s->v_fragment_start = s->fragment_width * s->fragment_height * 5 / 4;
  2002. debug_init(" Y plane: %d x %d\n", s->width, s->height);
  2003. debug_init(" C plane: %d x %d\n", c_width, c_height);
  2004. debug_init(" Y superblocks: %d x %d, %d total\n",
  2005. s->y_superblock_width, s->y_superblock_height, y_superblock_count);
  2006. debug_init(" C superblocks: %d x %d, %d total\n",
  2007. s->c_superblock_width, s->c_superblock_height, c_superblock_count);
  2008. debug_init(" total superblocks = %d, U starts @ %d, V starts @ %d\n",
  2009. s->superblock_count, s->u_superblock_start, s->v_superblock_start);
  2010. debug_init(" macroblocks: %d x %d, %d total\n",
  2011. s->macroblock_width, s->macroblock_height, s->macroblock_count);
  2012. debug_init(" %d fragments, %d x %d, u starts @ %d, v starts @ %d\n",
  2013. s->fragment_count,
  2014. s->fragment_width,
  2015. s->fragment_height,
  2016. s->u_fragment_start,
  2017. s->v_fragment_start);
  2018. s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
  2019. s->coded_fragment_list = av_malloc(s->fragment_count * sizeof(int));
  2020. s->pixel_addresses_inited = 0;
  2021. if (!s->theora_tables)
  2022. {
  2023. for (i = 0; i < 64; i++)
  2024. s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
  2025. for (i = 0; i < 64; i++)
  2026. s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
  2027. for (i = 0; i < 64; i++)
  2028. s->coded_intra_y_dequant[i] = vp31_intra_y_dequant[i];
  2029. for (i = 0; i < 64; i++)
  2030. s->coded_intra_c_dequant[i] = vp31_intra_c_dequant[i];
  2031. for (i = 0; i < 64; i++)
  2032. s->coded_inter_dequant[i] = vp31_inter_dequant[i];
  2033. }
  2034. /* init VLC tables */
  2035. for (i = 0; i < 16; i++) {
  2036. /* DC histograms */
  2037. init_vlc(&s->dc_vlc[i], 5, 32,
  2038. &dc_bias[i][0][1], 4, 2,
  2039. &dc_bias[i][0][0], 4, 2);
  2040. /* group 1 AC histograms */
  2041. init_vlc(&s->ac_vlc_1[i], 5, 32,
  2042. &ac_bias_0[i][0][1], 4, 2,
  2043. &ac_bias_0[i][0][0], 4, 2);
  2044. /* group 2 AC histograms */
  2045. init_vlc(&s->ac_vlc_2[i], 5, 32,
  2046. &ac_bias_1[i][0][1], 4, 2,
  2047. &ac_bias_1[i][0][0], 4, 2);
  2048. /* group 3 AC histograms */
  2049. init_vlc(&s->ac_vlc_3[i], 5, 32,
  2050. &ac_bias_2[i][0][1], 4, 2,
  2051. &ac_bias_2[i][0][0], 4, 2);
  2052. /* group 4 AC histograms */
  2053. init_vlc(&s->ac_vlc_4[i], 5, 32,
  2054. &ac_bias_3[i][0][1], 4, 2,
  2055. &ac_bias_3[i][0][0], 4, 2);
  2056. }
  2057. /* build quantization zigzag table */
  2058. for (i = 0; i < 64; i++)
  2059. zigzag_index[dezigzag_index[i]] = i;
  2060. /* work out the block mapping tables */
  2061. s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
  2062. s->superblock_macroblocks = av_malloc(s->superblock_count * 4 * sizeof(int));
  2063. s->macroblock_fragments = av_malloc(s->macroblock_count * 6 * sizeof(int));
  2064. s->macroblock_coding = av_malloc(s->macroblock_count + 1);
  2065. init_block_mapping(s);
  2066. for (i = 0; i < 3; i++) {
  2067. s->current_frame.data[i] = NULL;
  2068. s->last_frame.data[i] = NULL;
  2069. s->golden_frame.data[i] = NULL;
  2070. }
  2071. return 0;
  2072. }
  2073. /*
  2074. * This is the ffmpeg/libavcodec API frame decode function.
  2075. */
  2076. static int vp3_decode_frame(AVCodecContext *avctx,
  2077. void *data, int *data_size,
  2078. uint8_t *buf, int buf_size)
  2079. {
  2080. Vp3DecodeContext *s = avctx->priv_data;
  2081. GetBitContext gb;
  2082. static int counter = 0;
  2083. init_get_bits(&gb, buf, buf_size * 8);
  2084. if (s->theora && get_bits1(&gb))
  2085. {
  2086. int ptype = get_bits(&gb, 7);
  2087. skip_bits(&gb, 6*8); /* "theora" */
  2088. switch(ptype)
  2089. {
  2090. case 1:
  2091. theora_decode_comments(avctx, gb);
  2092. break;
  2093. case 2:
  2094. theora_decode_tables(avctx, gb);
  2095. init_dequantizer(s);
  2096. break;
  2097. default:
  2098. av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype);
  2099. }
  2100. return buf_size;
  2101. }
  2102. s->keyframe = !get_bits1(&gb);
  2103. if (!s->theora)
  2104. skip_bits(&gb, 1);
  2105. s->last_quality_index = s->quality_index;
  2106. s->quality_index = get_bits(&gb, 6);
  2107. if (s->theora >= 0x030300)
  2108. skip_bits1(&gb);
  2109. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  2110. av_log(s->avctx, AV_LOG_INFO, " VP3 %sframe #%d: Q index = %d\n",
  2111. s->keyframe?"key":"", counter, s->quality_index);
  2112. counter++;
  2113. if (s->quality_index != s->last_quality_index)
  2114. init_dequantizer(s);
  2115. if (s->keyframe) {
  2116. if (!s->theora)
  2117. {
  2118. skip_bits(&gb, 4); /* width code */
  2119. skip_bits(&gb, 4); /* height code */
  2120. if (s->version)
  2121. {
  2122. s->version = get_bits(&gb, 5);
  2123. if (counter == 1)
  2124. av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
  2125. }
  2126. }
  2127. if (s->version || s->theora)
  2128. {
  2129. if (get_bits1(&gb))
  2130. av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
  2131. skip_bits(&gb, 2); /* reserved? */
  2132. }
  2133. if (s->last_frame.data[0] == s->golden_frame.data[0]) {
  2134. if (s->golden_frame.data[0])
  2135. avctx->release_buffer(avctx, &s->golden_frame);
  2136. s->last_frame= s->golden_frame; /* ensure that we catch any access to this released frame */
  2137. } else {
  2138. if (s->golden_frame.data[0])
  2139. avctx->release_buffer(avctx, &s->golden_frame);
  2140. if (s->last_frame.data[0])
  2141. avctx->release_buffer(avctx, &s->last_frame);
  2142. }
  2143. s->golden_frame.reference = 3;
  2144. if(avctx->get_buffer(avctx, &s->golden_frame) < 0) {
  2145. av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
  2146. return -1;
  2147. }
  2148. /* golden frame is also the current frame */
  2149. memcpy(&s->current_frame, &s->golden_frame, sizeof(AVFrame));
  2150. /* time to figure out pixel addresses? */
  2151. if (!s->pixel_addresses_inited)
  2152. {
  2153. if (!s->flipped_image)
  2154. vp3_calculate_pixel_addresses(s);
  2155. else
  2156. theora_calculate_pixel_addresses(s);
  2157. }
  2158. } else {
  2159. /* allocate a new current frame */
  2160. s->current_frame.reference = 3;
  2161. if(avctx->get_buffer(avctx, &s->current_frame) < 0) {
  2162. av_log(s->avctx, AV_LOG_ERROR, "vp3: get_buffer() failed\n");
  2163. return -1;
  2164. }
  2165. }
  2166. s->current_frame.qscale_table= s->qscale_table; //FIXME allocate individual tables per AVFrame
  2167. s->current_frame.qstride= 0;
  2168. init_frame(s, &gb);
  2169. #if KEYFRAMES_ONLY
  2170. if (!s->keyframe) {
  2171. memcpy(s->current_frame.data[0], s->golden_frame.data[0],
  2172. s->current_frame.linesize[0] * s->height);
  2173. memcpy(s->current_frame.data[1], s->golden_frame.data[1],
  2174. s->current_frame.linesize[1] * s->height / 2);
  2175. memcpy(s->current_frame.data[2], s->golden_frame.data[2],
  2176. s->current_frame.linesize[2] * s->height / 2);
  2177. } else {
  2178. #endif
  2179. if (unpack_superblocks(s, &gb) ||
  2180. unpack_modes(s, &gb) ||
  2181. unpack_vectors(s, &gb) ||
  2182. unpack_dct_coeffs(s, &gb)) {
  2183. av_log(s->avctx, AV_LOG_ERROR, " vp3: could not decode frame\n");
  2184. return -1;
  2185. }
  2186. reverse_dc_prediction(s, 0, s->fragment_width, s->fragment_height);
  2187. render_fragments(s, 0, s->width, s->height, 0);
  2188. if ((avctx->flags & CODEC_FLAG_GRAY) == 0) {
  2189. reverse_dc_prediction(s, s->u_fragment_start,
  2190. s->fragment_width / 2, s->fragment_height / 2);
  2191. reverse_dc_prediction(s, s->v_fragment_start,
  2192. s->fragment_width / 2, s->fragment_height / 2);
  2193. render_fragments(s, s->u_fragment_start, s->width / 2, s->height / 2, 1);
  2194. render_fragments(s, s->v_fragment_start, s->width / 2, s->height / 2, 2);
  2195. } else {
  2196. memset(s->current_frame.data[1], 0x80, s->width * s->height / 4);
  2197. memset(s->current_frame.data[2], 0x80, s->width * s->height / 4);
  2198. }
  2199. #if KEYFRAMES_ONLY
  2200. }
  2201. #endif
  2202. *data_size=sizeof(AVFrame);
  2203. *(AVFrame*)data= s->current_frame;
  2204. /* release the last frame, if it is allocated and if it is not the
  2205. * golden frame */
  2206. if ((s->last_frame.data[0]) &&
  2207. (s->last_frame.data[0] != s->golden_frame.data[0]))
  2208. avctx->release_buffer(avctx, &s->last_frame);
  2209. /* shuffle frames (last = current) */
  2210. memcpy(&s->last_frame, &s->current_frame, sizeof(AVFrame));
  2211. s->current_frame.data[0]= NULL; /* ensure that we catch any access to this released frame */
  2212. return buf_size;
  2213. }
  2214. /*
  2215. * This is the ffmpeg/libavcodec API module cleanup function.
  2216. */
  2217. static int vp3_decode_end(AVCodecContext *avctx)
  2218. {
  2219. Vp3DecodeContext *s = avctx->priv_data;
  2220. av_free(s->all_fragments);
  2221. av_free(s->coded_fragment_list);
  2222. av_free(s->superblock_fragments);
  2223. av_free(s->superblock_macroblocks);
  2224. av_free(s->macroblock_fragments);
  2225. av_free(s->macroblock_coding);
  2226. /* release all frames */
  2227. if (s->golden_frame.data[0] && s->golden_frame.data[0] != s->last_frame.data[0])
  2228. avctx->release_buffer(avctx, &s->golden_frame);
  2229. if (s->last_frame.data[0])
  2230. avctx->release_buffer(avctx, &s->last_frame);
  2231. /* no need to release the current_frame since it will always be pointing
  2232. * to the same frame as either the golden or last frame */
  2233. return 0;
  2234. }
  2235. static int theora_decode_header(AVCodecContext *avctx, GetBitContext gb)
  2236. {
  2237. Vp3DecodeContext *s = avctx->priv_data;
  2238. int major, minor, micro;
  2239. major = get_bits(&gb, 8); /* version major */
  2240. minor = get_bits(&gb, 8); /* version minor */
  2241. micro = get_bits(&gb, 8); /* version micro */
  2242. av_log(avctx, AV_LOG_INFO, "Theora bitstream version %d.%d.%d\n",
  2243. major, minor, micro);
  2244. /* FIXME: endianess? */
  2245. s->theora = (major << 16) | (minor << 8) | micro;
  2246. /* 3.3.0 aka alpha3 has the same frame orientation as original vp3 */
  2247. /* but previous versions have the image flipped relative to vp3 */
  2248. if (s->theora < 0x030300)
  2249. {
  2250. s->flipped_image = 1;
  2251. av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
  2252. }
  2253. s->width = get_bits(&gb, 16) << 4;
  2254. s->height = get_bits(&gb, 16) << 4;
  2255. skip_bits(&gb, 24); /* frame width */
  2256. skip_bits(&gb, 24); /* frame height */
  2257. skip_bits(&gb, 8); /* offset x */
  2258. skip_bits(&gb, 8); /* offset y */
  2259. skip_bits(&gb, 32); /* fps numerator */
  2260. skip_bits(&gb, 32); /* fps denumerator */
  2261. skip_bits(&gb, 24); /* aspect numerator */
  2262. skip_bits(&gb, 24); /* aspect denumerator */
  2263. if (s->theora < 0x030300)
  2264. skip_bits(&gb, 5); /* keyframe frequency force */
  2265. skip_bits(&gb, 8); /* colorspace */
  2266. skip_bits(&gb, 24); /* bitrate */
  2267. skip_bits(&gb, 6); /* last(?) quality index */
  2268. if (s->theora >= 0x030300)
  2269. {
  2270. skip_bits(&gb, 5); /* keyframe frequency force */
  2271. skip_bits(&gb, 5); /* spare bits */
  2272. }
  2273. // align_get_bits(&gb);
  2274. avctx->width = s->width;
  2275. avctx->height = s->height;
  2276. vp3_decode_init(avctx);
  2277. return 0;
  2278. }
  2279. static int theora_decode_comments(AVCodecContext *avctx, GetBitContext gb)
  2280. {
  2281. int nb_comments, i, tmp;
  2282. tmp = get_bits(&gb, 32);
  2283. tmp = be2me_32(tmp);
  2284. while(tmp--)
  2285. skip_bits(&gb, 8);
  2286. nb_comments = get_bits(&gb, 32);
  2287. nb_comments = be2me_32(nb_comments);
  2288. for (i = 0; i < nb_comments; i++)
  2289. {
  2290. tmp = get_bits(&gb, 32);
  2291. tmp = be2me_32(tmp);
  2292. while(tmp--)
  2293. skip_bits(&gb, 8);
  2294. }
  2295. return 0;
  2296. }
  2297. static int theora_decode_tables(AVCodecContext *avctx, GetBitContext gb)
  2298. {
  2299. Vp3DecodeContext *s = avctx->priv_data;
  2300. int i;
  2301. /* quality threshold table */
  2302. for (i = 0; i < 64; i++)
  2303. s->coded_ac_scale_factor[i] = get_bits(&gb, 16);
  2304. /* dc scale factor table */
  2305. for (i = 0; i < 64; i++)
  2306. s->coded_dc_scale_factor[i] = get_bits(&gb, 16);
  2307. /* y coeffs */
  2308. for (i = 0; i < 64; i++)
  2309. s->coded_intra_y_dequant[i] = get_bits(&gb, 8);
  2310. /* uv coeffs */
  2311. for (i = 0; i < 64; i++)
  2312. s->coded_intra_c_dequant[i] = get_bits(&gb, 8);
  2313. /* inter coeffs */
  2314. for (i = 0; i < 64; i++)
  2315. s->coded_inter_dequant[i] = get_bits(&gb, 8);
  2316. /* FIXME: read huffmann tree.. */
  2317. s->theora_tables = 1;
  2318. return 0;
  2319. }
  2320. static int theora_decode_init(AVCodecContext *avctx)
  2321. {
  2322. Vp3DecodeContext *s = avctx->priv_data;
  2323. GetBitContext gb;
  2324. int ptype;
  2325. s->theora = 1;
  2326. if (!avctx->extradata_size)
  2327. return -1;
  2328. init_get_bits(&gb, avctx->extradata, avctx->extradata_size);
  2329. ptype = get_bits(&gb, 8);
  2330. debug_vp3("Theora headerpacket type: %x\n", ptype);
  2331. if (!(ptype & 0x80))
  2332. return -1;
  2333. skip_bits(&gb, 6*8); /* "theora" */
  2334. switch(ptype)
  2335. {
  2336. case 0x80:
  2337. theora_decode_header(avctx, gb);
  2338. vp3_decode_init(avctx);
  2339. break;
  2340. case 0x81:
  2341. theora_decode_comments(avctx, gb);
  2342. break;
  2343. case 0x82:
  2344. theora_decode_tables(avctx, gb);
  2345. break;
  2346. }
  2347. return 0;
  2348. }
  2349. AVCodec vp3_decoder = {
  2350. "vp3",
  2351. CODEC_TYPE_VIDEO,
  2352. CODEC_ID_VP3,
  2353. sizeof(Vp3DecodeContext),
  2354. vp3_decode_init,
  2355. NULL,
  2356. vp3_decode_end,
  2357. vp3_decode_frame,
  2358. 0,
  2359. NULL
  2360. };
  2361. AVCodec theora_decoder = {
  2362. "theora",
  2363. CODEC_TYPE_VIDEO,
  2364. CODEC_ID_THEORA,
  2365. sizeof(Vp3DecodeContext),
  2366. theora_decode_init,
  2367. NULL,
  2368. vp3_decode_end,
  2369. vp3_decode_frame,
  2370. 0,
  2371. NULL
  2372. };