You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

716 lines
22KB

  1. /*
  2. * MidiVid MV30 decoder
  3. *
  4. * Copyright (c) 2020 Paul B Mahol
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <stdio.h>
  23. #include <stdlib.h>
  24. #include <string.h>
  25. #include "libavutil/thread.h"
  26. #include "avcodec.h"
  27. #include "bytestream.h"
  28. #include "copy_block.h"
  29. #include "mathops.h"
  30. #include "blockdsp.h"
  31. #include "get_bits.h"
  32. #include "internal.h"
  33. #include "aandcttab.h"
  34. typedef struct MV30Context {
  35. GetBitContext gb;
  36. int intra_quant;
  37. int inter_quant;
  38. int is_inter;
  39. int mode_size;
  40. int nb_mvectors;
  41. int block[6][64];
  42. int16_t *mvectors;
  43. unsigned int mvectors_size;
  44. int16_t *coeffs;
  45. unsigned int coeffs_size;
  46. int16_t intraq_tab[2][64];
  47. int16_t interq_tab[2][64];
  48. BlockDSPContext bdsp;
  49. AVFrame *prev_frame;
  50. } MV30Context;
  51. static VLC cbp_tab;
  52. static const uint8_t luma_tab[] = {
  53. 12, 12, 15, 19, 25, 34, 40, 48,
  54. 12, 12, 18, 22, 27, 44, 47, 46,
  55. 17, 18, 21, 26, 35, 46, 52, 47,
  56. 18, 20, 24, 28, 40, 61, 59, 51,
  57. 20, 24, 32, 43, 50, 72, 72, 63,
  58. 25, 31, 42, 48, 58, 72, 81, 75,
  59. 38, 46, 54, 61, 71, 84, 88, 85,
  60. 50, 61, 65, 68, 79, 78, 86, 91,
  61. };
  62. static const uint8_t chroma_tab[] = {
  63. 12, 16, 24, 47, 99, 99, 99, 99,
  64. 16, 21, 26, 66, 99, 99, 99, 99,
  65. 24, 26, 56, 99, 99, 99, 99, 99,
  66. 47, 66, 99, 99, 99, 99, 99, 99,
  67. 99, 99, 99, 99, 99, 99, 99, 99,
  68. 99, 99, 99, 99, 99, 99, 99, 99,
  69. 99, 99, 99, 99, 99, 99, 99, 99,
  70. 99, 99, 99, 99, 99, 99, 99, 99,
  71. };
  72. static const uint8_t zigzag[] = {
  73. 0, 1, 8, 9, 16, 2, 3, 10,
  74. 17, 24, 32, 25, 18, 11, 4, 5,
  75. 12, 19, 26, 33, 40, 48, 41, 34,
  76. 27, 20, 13, 6, 7, 14, 21, 28,
  77. 35, 42, 49, 56, 57, 50, 43, 36,
  78. 29, 22, 15, 23, 30, 37, 44, 51,
  79. 58, 59, 52, 45, 38, 31, 39, 46,
  80. 53, 60, 61, 54, 47, 55, 62, 63,
  81. };
  82. static void get_qtable(int16_t *table, int quant, const uint8_t *quant_tab)
  83. {
  84. int factor = quant < 50 ? 5000 / FFMAX(quant, 1) : 200 - FFMIN(quant, 100) * 2;
  85. for (int i = 0; i < 64; i++) {
  86. table[i] = av_clip((quant_tab[i] * factor + 0x32) / 100, 1, 0x7fff);
  87. table[i] = ((int)ff_aanscales[i] * (int)table[i] + 0x800) >> 12;
  88. }
  89. }
  90. static inline void idct_1d(int *blk, int step)
  91. {
  92. const int t0 = blk[0 * step] + blk[4 * step];
  93. const int t1 = blk[0 * step] - blk[4 * step];
  94. const int t2 = blk[2 * step] + blk[6 * step];
  95. const int t3 = (((blk[2 * step] - blk[6 * step]) * 362) >> 8) - t2;
  96. const int t4 = t0 + t2;
  97. const int t5 = t0 - t2;
  98. const int t6 = t1 + t3;
  99. const int t7 = t1 - t3;
  100. const int t8 = blk[5 * step] + blk[3 * step];
  101. const int t9 = blk[5 * step] - blk[3 * step];
  102. const int tA = blk[1 * step] + blk[7 * step];
  103. const int tB = blk[1 * step] - blk[7 * step];
  104. const int tC = t8 + tA;
  105. const int tD = (tB + t9) * 473 >> 8;
  106. const int tE = ((t9 * -669 >> 8) - tC) + tD;
  107. const int tF = ((tA - t8) * 362 >> 8) - tE;
  108. const int t10 = ((tB * 277 >> 8) - tD) + tF;
  109. blk[0 * step] = t4 + tC;
  110. blk[1 * step] = t6 + tE;
  111. blk[2 * step] = t7 + tF;
  112. blk[3 * step] = t5 - t10;
  113. blk[4 * step] = t5 + t10;
  114. blk[5 * step] = t7 - tF;
  115. blk[6 * step] = t6 - tE;
  116. blk[7 * step] = t4 - tC;
  117. }
  118. static void idct_put(uint8_t *dst, int stride, int *block)
  119. {
  120. for (int i = 0; i < 8; i++) {
  121. if ((block[0x08 + i] |
  122. block[0x10 + i] |
  123. block[0x18 + i] |
  124. block[0x20 + i] |
  125. block[0x28 + i] |
  126. block[0x30 + i] |
  127. block[0x38 + i]) == 0) {
  128. block[0x08 + i] = block[i];
  129. block[0x10 + i] = block[i];
  130. block[0x18 + i] = block[i];
  131. block[0x20 + i] = block[i];
  132. block[0x28 + i] = block[i];
  133. block[0x30 + i] = block[i];
  134. block[0x38 + i] = block[i];
  135. } else {
  136. idct_1d(block + i, 8);
  137. }
  138. }
  139. for (int i = 0; i < 8; i++) {
  140. idct_1d(block, 1);
  141. for (int j = 0; j < 8; j++)
  142. dst[j] = av_clip_uint8((block[j] >> 5) + 128);
  143. block += 8;
  144. dst += stride;
  145. }
  146. }
  147. static void idct_add(uint8_t *dst, int stride,
  148. const uint8_t *src, int in_linesize, int *block)
  149. {
  150. for (int i = 0; i < 8; i++) {
  151. if ((block[0x08 + i] |
  152. block[0x10 + i] |
  153. block[0x18 + i] |
  154. block[0x20 + i] |
  155. block[0x28 + i] |
  156. block[0x30 + i] |
  157. block[0x38 + i]) == 0) {
  158. block[0x08 + i] = block[i];
  159. block[0x10 + i] = block[i];
  160. block[0x18 + i] = block[i];
  161. block[0x20 + i] = block[i];
  162. block[0x28 + i] = block[i];
  163. block[0x30 + i] = block[i];
  164. block[0x38 + i] = block[i];
  165. } else {
  166. idct_1d(block + i, 8);
  167. }
  168. }
  169. for (int i = 0; i < 8; i++) {
  170. idct_1d(block, 1);
  171. for (int j = 0; j < 8; j++)
  172. dst[j] = av_clip_uint8((block[j] >> 5) + src[j]);
  173. block += 8;
  174. dst += stride;
  175. src += in_linesize;
  176. }
  177. }
  178. static inline void idct2_1d(int *blk, int step)
  179. {
  180. const int t0 = blk[0 * step];
  181. const int t1 = blk[1 * step];
  182. const int t2 = t1 * 473 >> 8;
  183. const int t3 = t2 - t1;
  184. const int t4 = (t1 * 362 >> 8) - t3;
  185. const int t5 = ((t1 * 277 >> 8) - t2) + t4;
  186. blk[0 * step] = t1 + t0;
  187. blk[1 * step] = t0 + t3;
  188. blk[2 * step] = t4 + t0;
  189. blk[3 * step] = t0 - t5;
  190. blk[4 * step] = t5 + t0;
  191. blk[5 * step] = t0 - t4;
  192. blk[6 * step] = t0 - t3;
  193. blk[7 * step] = t0 - t1;
  194. }
  195. static void idct2_put(uint8_t *dst, int stride, int *block)
  196. {
  197. for (int i = 0; i < 2; i++) {
  198. if ((block[0x08 + i]) == 0) {
  199. block[0x08 + i] = block[i];
  200. block[0x10 + i] = block[i];
  201. block[0x18 + i] = block[i];
  202. block[0x20 + i] = block[i];
  203. block[0x28 + i] = block[i];
  204. block[0x30 + i] = block[i];
  205. block[0x38 + i] = block[i];
  206. } else {
  207. idct2_1d(block + i, 8);
  208. }
  209. }
  210. for (int i = 0; i < 8; i++) {
  211. if (block[1] == 0) {
  212. for (int j = 0; j < 8; j++)
  213. dst[j] = av_clip_uint8((block[0] >> 5) + 128);
  214. } else {
  215. idct2_1d(block, 1);
  216. for (int j = 0; j < 8; j++)
  217. dst[j] = av_clip_uint8((block[j] >> 5) + 128);
  218. }
  219. block += 8;
  220. dst += stride;
  221. }
  222. }
  223. static void idct2_add(uint8_t *dst, int stride,
  224. const uint8_t *src, int in_linesize,
  225. int *block)
  226. {
  227. for (int i = 0; i < 2; i++) {
  228. if ((block[0x08 + i]) == 0) {
  229. block[0x08 + i] = block[i];
  230. block[0x10 + i] = block[i];
  231. block[0x18 + i] = block[i];
  232. block[0x20 + i] = block[i];
  233. block[0x28 + i] = block[i];
  234. block[0x30 + i] = block[i];
  235. block[0x38 + i] = block[i];
  236. } else {
  237. idct2_1d(block + i, 8);
  238. }
  239. }
  240. for (int i = 0; i < 8; i++) {
  241. if (block[1] == 0) {
  242. for (int j = 0; j < 8; j++)
  243. dst[j] = av_clip_uint8((block[0] >> 5) + src[j]);
  244. } else {
  245. idct2_1d(block, 1);
  246. for (int j = 0; j < 8; j++)
  247. dst[j] = av_clip_uint8((block[j] >> 5) + src[j]);
  248. }
  249. block += 8;
  250. dst += stride;
  251. src += in_linesize;
  252. }
  253. }
  254. static void update_inter_block(uint8_t *dst, int stride,
  255. const uint8_t *src, int in_linesize,
  256. int block)
  257. {
  258. for (int i = 0; i < 8; i++) {
  259. for (int j = 0; j < 8; j++)
  260. dst[j] = av_clip_uint8(block + src[j]);
  261. dst += stride;
  262. src += in_linesize;
  263. }
  264. }
  265. static int decode_intra_block(AVCodecContext *avctx, int mode,
  266. GetByteContext *gbyte, int16_t *qtab,
  267. int *block, int *pfill,
  268. uint8_t *dst, int linesize)
  269. {
  270. MV30Context *s = avctx->priv_data;
  271. int fill;
  272. switch (mode) {
  273. case 0:
  274. s->bdsp.fill_block_tab[1](dst, 128, linesize, 8);
  275. break;
  276. case 1:
  277. fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
  278. pfill[0] += fill;
  279. block[0] = ((pfill[0] * qtab[0]) >> 5) + 128;
  280. s->bdsp.fill_block_tab[1](dst, block[0], linesize, 8);
  281. break;
  282. case 2:
  283. memset(block, 0, sizeof(*block) * 64);
  284. fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
  285. pfill[0] += fill;
  286. block[0] = pfill[0] * qtab[0];
  287. block[1] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[1];
  288. block[8] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[8];
  289. block[9] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[9];
  290. idct2_put(dst, linesize, block);
  291. break;
  292. case 3:
  293. fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
  294. pfill[0] += fill;
  295. block[0] = pfill[0] * qtab[0];
  296. for (int i = 1; i < 64; i++)
  297. block[zigzag[i]] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[zigzag[i]];
  298. idct_put(dst, linesize, block);
  299. break;
  300. }
  301. return 0;
  302. }
  303. static int decode_inter_block(AVCodecContext *avctx, int mode,
  304. GetByteContext *gbyte, int16_t *qtab,
  305. int *block, int *pfill,
  306. uint8_t *dst, int linesize,
  307. const uint8_t *src, int in_linesize)
  308. {
  309. int fill;
  310. switch (mode) {
  311. case 0:
  312. copy_block8(dst, src, linesize, in_linesize, 8);
  313. break;
  314. case 1:
  315. fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
  316. pfill[0] += fill;
  317. block[0] = (pfill[0] * qtab[0]) >> 5;
  318. update_inter_block(dst, linesize, src, in_linesize, block[0]);
  319. break;
  320. case 2:
  321. memset(block, 0, sizeof(*block) * 64);
  322. fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
  323. pfill[0] += fill;
  324. block[0] = pfill[0] * qtab[0];
  325. block[1] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[1];
  326. block[8] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[8];
  327. block[9] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[9];
  328. idct2_add(dst, linesize, src, in_linesize, block);
  329. break;
  330. case 3:
  331. fill = sign_extend(bytestream2_get_ne16(gbyte), 16);
  332. pfill[0] += fill;
  333. block[0] = pfill[0] * qtab[0];
  334. for (int i = 1; i < 64; i++)
  335. block[zigzag[i]] = sign_extend(bytestream2_get_ne16(gbyte), 16) * qtab[zigzag[i]];
  336. idct_add(dst, linesize, src, in_linesize, block);
  337. break;
  338. }
  339. return 0;
  340. }
  341. static int decode_coeffs(GetBitContext *gb, int16_t *coeffs, int nb_codes)
  342. {
  343. memset(coeffs, 0, nb_codes * sizeof(*coeffs));
  344. for (int i = 0; i < nb_codes;) {
  345. int value = get_vlc2(gb, cbp_tab.table, cbp_tab.bits, 1);
  346. if (value < 0)
  347. return AVERROR_INVALIDDATA;
  348. if (value > 0) {
  349. int x = get_bits(gb, value);
  350. if (x < (1 << value) / 2) {
  351. x = (1 << (value - 1)) + (x & ((1 << value) - 1 >> 1));
  352. } else {
  353. x = -(1 << (value - 1)) - (x & ((1 << value) - 1 >> 1));
  354. }
  355. coeffs[i++] = x;
  356. } else {
  357. int flag = get_bits1(gb);
  358. i += get_bits(gb, 3 + flag * 3) + 1 + flag * 8;
  359. }
  360. }
  361. return 0;
  362. }
  363. static int decode_intra(AVCodecContext *avctx, GetBitContext *gb, AVFrame *frame)
  364. {
  365. MV30Context *s = avctx->priv_data;
  366. GetBitContext mgb;
  367. uint8_t *dst[6];
  368. int linesize[6];
  369. int ret;
  370. mgb = *gb;
  371. skip_bits_long(gb, s->mode_size * 8);
  372. linesize[0] = frame->linesize[0];
  373. linesize[1] = frame->linesize[0];
  374. linesize[2] = frame->linesize[0];
  375. linesize[3] = frame->linesize[0];
  376. linesize[4] = frame->linesize[1];
  377. linesize[5] = frame->linesize[2];
  378. for (int y = 0; y < avctx->height; y += 16) {
  379. GetByteContext gbyte;
  380. int pfill[3][1] = { {0} };
  381. int nb_codes = get_bits(gb, 16);
  382. av_fast_padded_malloc(&s->coeffs, &s->coeffs_size, nb_codes * sizeof(*s->coeffs));
  383. if (!s->coeffs)
  384. return AVERROR(ENOMEM);
  385. ret = decode_coeffs(gb, s->coeffs, nb_codes);
  386. if (ret < 0)
  387. return ret;
  388. bytestream2_init(&gbyte, (uint8_t *)s->coeffs, nb_codes * sizeof(*s->coeffs));
  389. for (int x = 0; x < avctx->width; x += 16) {
  390. dst[0] = frame->data[0] + linesize[0] * y + x;
  391. dst[1] = frame->data[0] + linesize[0] * y + x + 8;
  392. dst[2] = frame->data[0] + linesize[0] * (y + 8) + x;
  393. dst[3] = frame->data[0] + linesize[0] * (y + 8) + x + 8;
  394. dst[4] = frame->data[1] + linesize[4] * (y >> 1) + (x >> 1);
  395. dst[5] = frame->data[2] + linesize[5] * (y >> 1) + (x >> 1);
  396. for (int b = 0; b < 6; b++) {
  397. int mode = get_bits_le(&mgb, 2);
  398. ret = decode_intra_block(avctx, mode, &gbyte, s->intraq_tab[b >= 4],
  399. s->block[b],
  400. pfill[(b >= 4) + (b >= 5)],
  401. dst[b], linesize[b]);
  402. if (ret < 0)
  403. return ret;
  404. }
  405. }
  406. }
  407. return 0;
  408. }
  409. static int decode_inter(AVCodecContext *avctx, GetBitContext *gb,
  410. AVFrame *frame, AVFrame *prev)
  411. {
  412. MV30Context *s = avctx->priv_data;
  413. GetBitContext mask;
  414. GetBitContext mgb;
  415. GetByteContext mv;
  416. const int mask_size = ((avctx->height >> 4) * (avctx->width >> 4) * 2 + 7) / 8;
  417. uint8_t *dst[6], *src[6];
  418. int in_linesize[6];
  419. int linesize[6];
  420. int ret, cnt = 0;
  421. int flags = 0;
  422. in_linesize[0] = prev->linesize[0];
  423. in_linesize[1] = prev->linesize[0];
  424. in_linesize[2] = prev->linesize[0];
  425. in_linesize[3] = prev->linesize[0];
  426. in_linesize[4] = prev->linesize[1];
  427. in_linesize[5] = prev->linesize[2];
  428. linesize[0] = frame->linesize[0];
  429. linesize[1] = frame->linesize[0];
  430. linesize[2] = frame->linesize[0];
  431. linesize[3] = frame->linesize[0];
  432. linesize[4] = frame->linesize[1];
  433. linesize[5] = frame->linesize[2];
  434. av_fast_padded_malloc(&s->mvectors, &s->mvectors_size, 2 * s->nb_mvectors * sizeof(*s->mvectors));
  435. if (!s->mvectors) {
  436. ret = AVERROR(ENOMEM);
  437. goto fail;
  438. }
  439. mask = *gb;
  440. skip_bits_long(gb, mask_size * 8);
  441. mgb = *gb;
  442. skip_bits_long(gb, s->mode_size * 8);
  443. ret = decode_coeffs(gb, s->mvectors, 2 * s->nb_mvectors);
  444. if (ret < 0)
  445. goto fail;
  446. bytestream2_init(&mv, (uint8_t *)s->mvectors, 2 * s->nb_mvectors * sizeof(*s->mvectors));
  447. for (int y = 0; y < avctx->height; y += 16) {
  448. GetByteContext gbyte;
  449. int pfill[3][1] = { {0} };
  450. int nb_codes = get_bits(gb, 16);
  451. skip_bits(gb, 8);
  452. if (get_bits_left(gb) < 0) {
  453. ret = AVERROR_INVALIDDATA;
  454. goto fail;
  455. }
  456. av_fast_padded_malloc(&s->coeffs, &s->coeffs_size, nb_codes * sizeof(*s->coeffs));
  457. if (!s->coeffs) {
  458. ret = AVERROR(ENOMEM);
  459. goto fail;
  460. }
  461. ret = decode_coeffs(gb, s->coeffs, nb_codes);
  462. if (ret < 0)
  463. goto fail;
  464. bytestream2_init(&gbyte, (uint8_t *)s->coeffs, nb_codes * sizeof(*s->coeffs));
  465. for (int x = 0; x < avctx->width; x += 16) {
  466. if (cnt >= 4)
  467. cnt = 0;
  468. if (cnt == 0)
  469. flags = get_bits(&mask, 8);
  470. dst[0] = frame->data[0] + linesize[0] * y + x;
  471. dst[1] = frame->data[0] + linesize[0] * y + x + 8;
  472. dst[2] = frame->data[0] + linesize[0] * (y + 8) + x;
  473. dst[3] = frame->data[0] + linesize[0] * (y + 8) + x + 8;
  474. dst[4] = frame->data[1] + linesize[4] * (y >> 1) + (x >> 1);
  475. dst[5] = frame->data[2] + linesize[5] * (y >> 1) + (x >> 1);
  476. if ((flags >> (cnt)) & 1) {
  477. int mv_x = sign_extend(bytestream2_get_ne16(&mv), 16);
  478. int mv_y = sign_extend(bytestream2_get_ne16(&mv), 16);
  479. int px = x + mv_x;
  480. int py = y + mv_y;
  481. if (px < 0 || px >= avctx->width ||
  482. py < 0 || py >= avctx->height)
  483. return AVERROR_INVALIDDATA;
  484. src[0] = prev->data[0] + in_linesize[0] * py + px;
  485. src[1] = prev->data[0] + in_linesize[0] * py + px + 8;
  486. src[2] = prev->data[0] + in_linesize[0] * (py + 8) + px;
  487. src[3] = prev->data[0] + in_linesize[0] * (py + 8) + px + 8;
  488. src[4] = prev->data[1] + in_linesize[4] * (py >> 1) + (px >> 1);
  489. src[5] = prev->data[2] + in_linesize[5] * (py >> 1) + (px >> 1);
  490. if ((flags >> (cnt + 4)) & 1) {
  491. for (int b = 0; b < 6; b++)
  492. copy_block8(dst[b], src[b], linesize[b], in_linesize[b], 8);
  493. } else {
  494. for (int b = 0; b < 6; b++) {
  495. int mode = get_bits_le(&mgb, 2);
  496. ret = decode_inter_block(avctx, mode, &gbyte, s->interq_tab[b >= 4],
  497. s->block[b],
  498. pfill[(b >= 4) + (b >= 5)],
  499. dst[b], linesize[b],
  500. src[b], in_linesize[b]);
  501. if (ret < 0)
  502. goto fail;
  503. }
  504. }
  505. } else {
  506. for (int b = 0; b < 6; b++) {
  507. int mode = get_bits_le(&mgb, 2);
  508. ret = decode_intra_block(avctx, mode, &gbyte, s->intraq_tab[b >= 4],
  509. s->block[b],
  510. pfill[(b >= 4) + (b >= 5)],
  511. dst[b], linesize[b]);
  512. if (ret < 0)
  513. goto fail;
  514. }
  515. }
  516. cnt++;
  517. }
  518. }
  519. fail:
  520. return ret;
  521. }
  522. static int decode_frame(AVCodecContext *avctx, void *data,
  523. int *got_frame, AVPacket *avpkt)
  524. {
  525. MV30Context *s = avctx->priv_data;
  526. GetBitContext *gb = &s->gb;
  527. AVFrame *frame = data;
  528. int ret;
  529. if ((ret = init_get_bits8(gb, avpkt->data, avpkt->size)) < 0)
  530. return ret;
  531. if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
  532. return ret;
  533. s->intra_quant = get_bits(gb, 8);
  534. s->inter_quant = s->intra_quant + get_sbits(gb, 8);
  535. s->is_inter = get_bits_le(gb, 16);
  536. s->mode_size = get_bits_le(gb, 16);
  537. if (s->is_inter)
  538. s->nb_mvectors = get_bits_le(gb, 16);
  539. get_qtable(s->intraq_tab[0], s->intra_quant, luma_tab);
  540. get_qtable(s->intraq_tab[1], s->intra_quant, chroma_tab);
  541. frame->key_frame = s->is_inter == 0;
  542. if (frame->key_frame) {
  543. ret = decode_intra(avctx, gb, frame);
  544. if (ret < 0)
  545. return ret;
  546. } else {
  547. get_qtable(s->interq_tab[0], s->inter_quant, luma_tab);
  548. get_qtable(s->interq_tab[1], s->inter_quant, chroma_tab);
  549. if (!s->prev_frame->data[0]) {
  550. av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
  551. return AVERROR_INVALIDDATA;
  552. }
  553. ret = decode_inter(avctx, gb, frame, s->prev_frame);
  554. if (ret < 0)
  555. return ret;
  556. }
  557. av_frame_unref(s->prev_frame);
  558. if ((ret = av_frame_ref(s->prev_frame, frame)) < 0)
  559. return ret;
  560. *got_frame = 1;
  561. return avpkt->size;
  562. }
  563. static const uint16_t cbp_codes[] = {
  564. 0, 1, 4, 5, 6, 0xE, 0x1E, 0x3E, 0x7E, 0xFE, 0x1FE, 0x1FF,
  565. };
  566. static const uint8_t cbp_bits[] = {
  567. 2, 2, 3, 3, 3, 4, 5, 6, 7, 8, 9, 9,
  568. };
  569. static av_cold void init_static_data(void)
  570. {
  571. INIT_VLC_SPARSE_STATIC(&cbp_tab, 9, FF_ARRAY_ELEMS(cbp_bits),
  572. cbp_bits, 1, 1, cbp_codes, 2, 2, NULL, 0, 0, 512);
  573. }
  574. static av_cold int decode_init(AVCodecContext *avctx)
  575. {
  576. MV30Context *s = avctx->priv_data;
  577. static AVOnce init_static_once = AV_ONCE_INIT;
  578. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  579. avctx->color_range = AVCOL_RANGE_JPEG;
  580. ff_blockdsp_init(&s->bdsp, avctx);
  581. s->prev_frame = av_frame_alloc();
  582. if (!s->prev_frame)
  583. return AVERROR(ENOMEM);
  584. ff_thread_once(&init_static_once, init_static_data);
  585. return 0;
  586. }
  587. static void decode_flush(AVCodecContext *avctx)
  588. {
  589. MV30Context *s = avctx->priv_data;
  590. av_frame_unref(s->prev_frame);
  591. }
  592. static av_cold int decode_close(AVCodecContext *avctx)
  593. {
  594. MV30Context *s = avctx->priv_data;
  595. av_frame_free(&s->prev_frame);
  596. av_freep(&s->coeffs);
  597. s->coeffs_size = 0;
  598. av_freep(&s->mvectors);
  599. s->mvectors_size = 0;
  600. return 0;
  601. }
  602. AVCodec ff_mv30_decoder = {
  603. .name = "mv30",
  604. .long_name = NULL_IF_CONFIG_SMALL("MidiVid 3.0"),
  605. .type = AVMEDIA_TYPE_VIDEO,
  606. .id = AV_CODEC_ID_MV30,
  607. .priv_data_size = sizeof(MV30Context),
  608. .init = decode_init,
  609. .close = decode_close,
  610. .decode = decode_frame,
  611. .flush = decode_flush,
  612. .capabilities = AV_CODEC_CAP_DR1,
  613. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  614. FF_CODEC_CAP_INIT_CLEANUP,
  615. };