You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

880 lines
23KB

  1. /*
  2. * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder
  3. * Copyright (c) 2012 Konstantin Shishkov
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder
  24. */
  25. #include "avcodec.h"
  26. #include "bytestream.h"
  27. #include "mss34dsp.h"
  28. #define HEADER_SIZE 27
  29. #define MODEL2_SCALE 13
  30. #define MODEL_SCALE 15
  31. #define MODEL256_SEC_SCALE 9
  32. typedef struct Model2 {
  33. int upd_val, till_rescale;
  34. unsigned zero_freq, zero_weight;
  35. unsigned total_freq, total_weight;
  36. } Model2;
  37. typedef struct Model {
  38. int weights[16], freqs[16];
  39. int num_syms;
  40. int tot_weight;
  41. int upd_val, max_upd_val, till_rescale;
  42. } Model;
  43. typedef struct Model256 {
  44. int weights[256], freqs[256];
  45. int tot_weight;
  46. int secondary[68];
  47. int sec_size;
  48. int upd_val, max_upd_val, till_rescale;
  49. } Model256;
  50. #define RAC_BOTTOM 0x01000000
  51. typedef struct RangeCoder {
  52. const uint8_t *src, *src_end;
  53. uint32_t range, low;
  54. int got_error;
  55. } RangeCoder;
  56. enum BlockType {
  57. FILL_BLOCK = 0,
  58. IMAGE_BLOCK,
  59. DCT_BLOCK,
  60. HAAR_BLOCK,
  61. SKIP_BLOCK
  62. };
  63. typedef struct BlockTypeContext {
  64. int last_type;
  65. Model bt_model[5];
  66. } BlockTypeContext;
  67. typedef struct FillBlockCoder {
  68. int fill_val;
  69. Model coef_model;
  70. } FillBlockCoder;
  71. typedef struct ImageBlockCoder {
  72. Model256 esc_model, vec_entry_model;
  73. Model vec_size_model;
  74. Model vq_model[125];
  75. } ImageBlockCoder;
  76. typedef struct DCTBlockCoder {
  77. int *prev_dc;
  78. int prev_dc_stride;
  79. int prev_dc_height;
  80. int quality;
  81. uint16_t qmat[64];
  82. Model dc_model;
  83. Model2 sign_model;
  84. Model256 ac_model;
  85. } DCTBlockCoder;
  86. typedef struct HaarBlockCoder {
  87. int quality, scale;
  88. Model256 coef_model;
  89. Model coef_hi_model;
  90. } HaarBlockCoder;
  91. typedef struct MSS3Context {
  92. AVCodecContext *avctx;
  93. AVFrame pic;
  94. int got_error;
  95. RangeCoder coder;
  96. BlockTypeContext btype[3];
  97. FillBlockCoder fill_coder[3];
  98. ImageBlockCoder image_coder[3];
  99. DCTBlockCoder dct_coder[3];
  100. HaarBlockCoder haar_coder[3];
  101. int dctblock[64];
  102. int hblock[16 * 16];
  103. } MSS3Context;
  104. static const uint8_t zigzag_scan[64] = {
  105. 0, 1, 8, 16, 9, 2, 3, 10,
  106. 17, 24, 32, 25, 18, 11, 4, 5,
  107. 12, 19, 26, 33, 40, 48, 41, 34,
  108. 27, 20, 13, 6, 7, 14, 21, 28,
  109. 35, 42, 49, 56, 57, 50, 43, 36,
  110. 29, 22, 15, 23, 30, 37, 44, 51,
  111. 58, 59, 52, 45, 38, 31, 39, 46,
  112. 53, 60, 61, 54, 47, 55, 62, 63
  113. };
  114. static void model2_reset(Model2 *m)
  115. {
  116. m->zero_weight = 1;
  117. m->total_weight = 2;
  118. m->zero_freq = 0x1000;
  119. m->total_freq = 0x2000;
  120. m->upd_val = 4;
  121. m->till_rescale = 4;
  122. }
  123. static void model2_update(Model2 *m, int bit)
  124. {
  125. unsigned scale;
  126. if (!bit)
  127. m->zero_weight++;
  128. m->till_rescale--;
  129. if (m->till_rescale)
  130. return;
  131. m->total_weight += m->upd_val;
  132. if (m->total_weight > 0x2000) {
  133. m->total_weight = (m->total_weight + 1) >> 1;
  134. m->zero_weight = (m->zero_weight + 1) >> 1;
  135. if (m->total_weight == m->zero_weight)
  136. m->total_weight = m->zero_weight + 1;
  137. }
  138. m->upd_val = m->upd_val * 5 >> 2;
  139. if (m->upd_val > 64)
  140. m->upd_val = 64;
  141. scale = 0x80000000u / m->total_weight;
  142. m->zero_freq = m->zero_weight * scale >> 18;
  143. m->total_freq = m->total_weight * scale >> 18;
  144. m->till_rescale = m->upd_val;
  145. }
  146. static void model_update(Model *m, int val)
  147. {
  148. int i, sum = 0;
  149. unsigned scale;
  150. m->weights[val]++;
  151. m->till_rescale--;
  152. if (m->till_rescale)
  153. return;
  154. m->tot_weight += m->upd_val;
  155. if (m->tot_weight > 0x8000) {
  156. m->tot_weight = 0;
  157. for (i = 0; i < m->num_syms; i++) {
  158. m->weights[i] = (m->weights[i] + 1) >> 1;
  159. m->tot_weight += m->weights[i];
  160. }
  161. }
  162. scale = 0x80000000u / m->tot_weight;
  163. for (i = 0; i < m->num_syms; i++) {
  164. m->freqs[i] = sum * scale >> 16;
  165. sum += m->weights[i];
  166. }
  167. m->upd_val = m->upd_val * 5 >> 2;
  168. if (m->upd_val > m->max_upd_val)
  169. m->upd_val = m->max_upd_val;
  170. m->till_rescale = m->upd_val;
  171. }
  172. static void model_reset(Model *m)
  173. {
  174. int i;
  175. m->tot_weight = 0;
  176. for (i = 0; i < m->num_syms - 1; i++)
  177. m->weights[i] = 1;
  178. m->weights[m->num_syms - 1] = 0;
  179. m->upd_val = m->num_syms;
  180. m->till_rescale = 1;
  181. model_update(m, m->num_syms - 1);
  182. m->till_rescale =
  183. m->upd_val = (m->num_syms + 6) >> 1;
  184. }
  185. static av_cold void model_init(Model *m, int num_syms)
  186. {
  187. m->num_syms = num_syms;
  188. m->max_upd_val = 8 * num_syms + 48;
  189. model_reset(m);
  190. }
  191. static void model256_update(Model256 *m, int val)
  192. {
  193. int i, sum = 0;
  194. unsigned scale;
  195. int send, sidx = 1;
  196. m->weights[val]++;
  197. m->till_rescale--;
  198. if (m->till_rescale)
  199. return;
  200. m->tot_weight += m->upd_val;
  201. if (m->tot_weight > 0x8000) {
  202. m->tot_weight = 0;
  203. for (i = 0; i < 256; i++) {
  204. m->weights[i] = (m->weights[i] + 1) >> 1;
  205. m->tot_weight += m->weights[i];
  206. }
  207. }
  208. scale = 0x80000000u / m->tot_weight;
  209. m->secondary[0] = 0;
  210. for (i = 0; i < 256; i++) {
  211. m->freqs[i] = sum * scale >> 16;
  212. sum += m->weights[i];
  213. send = m->freqs[i] >> MODEL256_SEC_SCALE;
  214. while (sidx <= send)
  215. m->secondary[sidx++] = i - 1;
  216. }
  217. while (sidx < m->sec_size)
  218. m->secondary[sidx++] = 255;
  219. m->upd_val = m->upd_val * 5 >> 2;
  220. if (m->upd_val > m->max_upd_val)
  221. m->upd_val = m->max_upd_val;
  222. m->till_rescale = m->upd_val;
  223. }
  224. static void model256_reset(Model256 *m)
  225. {
  226. int i;
  227. for (i = 0; i < 255; i++)
  228. m->weights[i] = 1;
  229. m->weights[255] = 0;
  230. m->tot_weight = 0;
  231. m->upd_val = 256;
  232. m->till_rescale = 1;
  233. model256_update(m, 255);
  234. m->till_rescale =
  235. m->upd_val = (256 + 6) >> 1;
  236. }
  237. static av_cold void model256_init(Model256 *m)
  238. {
  239. m->max_upd_val = 8 * 256 + 48;
  240. m->sec_size = (1 << 6) + 2;
  241. model256_reset(m);
  242. }
  243. static void rac_init(RangeCoder *c, const uint8_t *src, int size)
  244. {
  245. int i;
  246. c->src = src;
  247. c->src_end = src + size;
  248. c->low = 0;
  249. for (i = 0; i < FFMIN(size, 4); i++)
  250. c->low = (c->low << 8) | *c->src++;
  251. c->range = 0xFFFFFFFF;
  252. c->got_error = 0;
  253. }
  254. static void rac_normalise(RangeCoder *c)
  255. {
  256. for (;;) {
  257. c->range <<= 8;
  258. c->low <<= 8;
  259. if (c->src < c->src_end) {
  260. c->low |= *c->src++;
  261. } else if (!c->low) {
  262. c->got_error = 1;
  263. return;
  264. }
  265. if (c->range >= RAC_BOTTOM)
  266. return;
  267. }
  268. }
  269. static int rac_get_bit(RangeCoder *c)
  270. {
  271. int bit;
  272. c->range >>= 1;
  273. bit = (c->range <= c->low);
  274. if (bit)
  275. c->low -= c->range;
  276. if (c->range < RAC_BOTTOM)
  277. rac_normalise(c);
  278. return bit;
  279. }
  280. static int rac_get_bits(RangeCoder *c, int nbits)
  281. {
  282. int val;
  283. c->range >>= nbits;
  284. val = c->low / c->range;
  285. c->low -= c->range * val;
  286. if (c->range < RAC_BOTTOM)
  287. rac_normalise(c);
  288. return val;
  289. }
  290. static int rac_get_model2_sym(RangeCoder *c, Model2 *m)
  291. {
  292. int bit, helper;
  293. helper = m->zero_freq * (c->range >> MODEL2_SCALE);
  294. bit = (c->low >= helper);
  295. if (bit) {
  296. c->low -= helper;
  297. c->range -= helper;
  298. } else {
  299. c->range = helper;
  300. }
  301. if (c->range < RAC_BOTTOM)
  302. rac_normalise(c);
  303. model2_update(m, bit);
  304. return bit;
  305. }
  306. static int rac_get_model_sym(RangeCoder *c, Model *m)
  307. {
  308. int prob, prob2, helper, val;
  309. int end, end2;
  310. prob = 0;
  311. prob2 = c->range;
  312. c->range >>= MODEL_SCALE;
  313. val = 0;
  314. end = m->num_syms >> 1;
  315. end2 = m->num_syms;
  316. do {
  317. helper = m->freqs[end] * c->range;
  318. if (helper <= c->low) {
  319. val = end;
  320. prob = helper;
  321. } else {
  322. end2 = end;
  323. prob2 = helper;
  324. }
  325. end = (end2 + val) >> 1;
  326. } while (end != val);
  327. c->low -= prob;
  328. c->range = prob2 - prob;
  329. if (c->range < RAC_BOTTOM)
  330. rac_normalise(c);
  331. model_update(m, val);
  332. return val;
  333. }
  334. static int rac_get_model256_sym(RangeCoder *c, Model256 *m)
  335. {
  336. int prob, prob2, helper, val;
  337. int start, end;
  338. int ssym;
  339. prob2 = c->range;
  340. c->range >>= MODEL_SCALE;
  341. helper = c->low / c->range;
  342. ssym = helper >> MODEL256_SEC_SCALE;
  343. val = m->secondary[ssym];
  344. end = start = m->secondary[ssym + 1] + 1;
  345. while (end > val + 1) {
  346. ssym = (end + val) >> 1;
  347. if (m->freqs[ssym] <= helper) {
  348. end = start;
  349. val = ssym;
  350. } else {
  351. end = (end + val) >> 1;
  352. start = ssym;
  353. }
  354. }
  355. prob = m->freqs[val] * c->range;
  356. if (val != 255)
  357. prob2 = m->freqs[val + 1] * c->range;
  358. c->low -= prob;
  359. c->range = prob2 - prob;
  360. if (c->range < RAC_BOTTOM)
  361. rac_normalise(c);
  362. model256_update(m, val);
  363. return val;
  364. }
  365. static int decode_block_type(RangeCoder *c, BlockTypeContext *bt)
  366. {
  367. bt->last_type = rac_get_model_sym(c, &bt->bt_model[bt->last_type]);
  368. return bt->last_type;
  369. }
  370. static int decode_coeff(RangeCoder *c, Model *m)
  371. {
  372. int val, sign;
  373. val = rac_get_model_sym(c, m);
  374. if (val) {
  375. sign = rac_get_bit(c);
  376. if (val > 1) {
  377. val--;
  378. val = (1 << val) + rac_get_bits(c, val);
  379. }
  380. if (!sign)
  381. val = -val;
  382. }
  383. return val;
  384. }
  385. static void decode_fill_block(RangeCoder *c, FillBlockCoder *fc,
  386. uint8_t *dst, int stride, int block_size)
  387. {
  388. int i;
  389. fc->fill_val += decode_coeff(c, &fc->coef_model);
  390. for (i = 0; i < block_size; i++, dst += stride)
  391. memset(dst, fc->fill_val, block_size);
  392. }
  393. static void decode_image_block(RangeCoder *c, ImageBlockCoder *ic,
  394. uint8_t *dst, int stride, int block_size)
  395. {
  396. int i, j;
  397. int vec_size;
  398. int vec[4];
  399. int prev_line[16];
  400. int A, B, C;
  401. vec_size = rac_get_model_sym(c, &ic->vec_size_model) + 2;
  402. for (i = 0; i < vec_size; i++)
  403. vec[i] = rac_get_model256_sym(c, &ic->vec_entry_model);
  404. for (; i < 4; i++)
  405. vec[i] = 0;
  406. memset(prev_line, 0, sizeof(prev_line));
  407. for (j = 0; j < block_size; j++) {
  408. A = 0;
  409. B = 0;
  410. for (i = 0; i < block_size; i++) {
  411. C = B;
  412. B = prev_line[i];
  413. A = rac_get_model_sym(c, &ic->vq_model[A + B * 5 + C * 25]);
  414. prev_line[i] = A;
  415. if (A < 4)
  416. dst[i] = vec[A];
  417. else
  418. dst[i] = rac_get_model256_sym(c, &ic->esc_model);
  419. }
  420. dst += stride;
  421. }
  422. }
  423. static int decode_dct(RangeCoder *c, DCTBlockCoder *bc, int *block,
  424. int bx, int by)
  425. {
  426. int skip, val, sign, pos = 1, zz_pos, dc;
  427. int blk_pos = bx + by * bc->prev_dc_stride;
  428. memset(block, 0, sizeof(*block) * 64);
  429. dc = decode_coeff(c, &bc->dc_model);
  430. if (by) {
  431. if (bx) {
  432. int l, tl, t;
  433. l = bc->prev_dc[blk_pos - 1];
  434. tl = bc->prev_dc[blk_pos - 1 - bc->prev_dc_stride];
  435. t = bc->prev_dc[blk_pos - bc->prev_dc_stride];
  436. if (FFABS(t - tl) <= FFABS(l - tl))
  437. dc += l;
  438. else
  439. dc += t;
  440. } else {
  441. dc += bc->prev_dc[blk_pos - bc->prev_dc_stride];
  442. }
  443. } else if (bx) {
  444. dc += bc->prev_dc[bx - 1];
  445. }
  446. bc->prev_dc[blk_pos] = dc;
  447. block[0] = dc * bc->qmat[0];
  448. while (pos < 64) {
  449. val = rac_get_model256_sym(c, &bc->ac_model);
  450. if (!val)
  451. return 0;
  452. if (val == 0xF0) {
  453. pos += 16;
  454. continue;
  455. }
  456. skip = val >> 4;
  457. val = val & 0xF;
  458. if (!val)
  459. return -1;
  460. pos += skip;
  461. if (pos >= 64)
  462. return -1;
  463. sign = rac_get_model2_sym(c, &bc->sign_model);
  464. if (val > 1) {
  465. val--;
  466. val = (1 << val) + rac_get_bits(c, val);
  467. }
  468. if (!sign)
  469. val = -val;
  470. zz_pos = zigzag_scan[pos];
  471. block[zz_pos] = val * bc->qmat[zz_pos];
  472. pos++;
  473. }
  474. return pos == 64 ? 0 : -1;
  475. }
  476. static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc,
  477. uint8_t *dst, int stride, int block_size,
  478. int *block, int mb_x, int mb_y)
  479. {
  480. int i, j;
  481. int bx, by;
  482. int nblocks = block_size >> 3;
  483. bx = mb_x * nblocks;
  484. by = mb_y * nblocks;
  485. for (j = 0; j < nblocks; j++) {
  486. for (i = 0; i < nblocks; i++) {
  487. if (decode_dct(c, bc, block, bx + i, by + j)) {
  488. c->got_error = 1;
  489. return;
  490. }
  491. ff_mss34_dct_put(dst + i * 8, stride, block);
  492. }
  493. dst += 8 * stride;
  494. }
  495. }
  496. static void decode_haar_block(RangeCoder *c, HaarBlockCoder *hc,
  497. uint8_t *dst, int stride, int block_size,
  498. int *block)
  499. {
  500. const int hsize = block_size >> 1;
  501. int A, B, C, D, t1, t2, t3, t4;
  502. int i, j;
  503. for (j = 0; j < block_size; j++) {
  504. for (i = 0; i < block_size; i++) {
  505. if (i < hsize && j < hsize)
  506. block[i] = rac_get_model256_sym(c, &hc->coef_model);
  507. else
  508. block[i] = decode_coeff(c, &hc->coef_hi_model);
  509. block[i] *= hc->scale;
  510. }
  511. block += block_size;
  512. }
  513. block -= block_size * block_size;
  514. for (j = 0; j < hsize; j++) {
  515. for (i = 0; i < hsize; i++) {
  516. A = block[i];
  517. B = block[i + hsize];
  518. C = block[i + hsize * block_size];
  519. D = block[i + hsize * block_size + hsize];
  520. t1 = A - B;
  521. t2 = C - D;
  522. t3 = A + B;
  523. t4 = C + D;
  524. dst[i * 2] = av_clip_uint8(t1 - t2);
  525. dst[i * 2 + stride] = av_clip_uint8(t1 + t2);
  526. dst[i * 2 + 1] = av_clip_uint8(t3 - t4);
  527. dst[i * 2 + 1 + stride] = av_clip_uint8(t3 + t4);
  528. }
  529. block += block_size;
  530. dst += stride * 2;
  531. }
  532. }
  533. static void reset_coders(MSS3Context *ctx, int quality)
  534. {
  535. int i, j;
  536. for (i = 0; i < 3; i++) {
  537. ctx->btype[i].last_type = SKIP_BLOCK;
  538. for (j = 0; j < 5; j++)
  539. model_reset(&ctx->btype[i].bt_model[j]);
  540. ctx->fill_coder[i].fill_val = 0;
  541. model_reset(&ctx->fill_coder[i].coef_model);
  542. model256_reset(&ctx->image_coder[i].esc_model);
  543. model256_reset(&ctx->image_coder[i].vec_entry_model);
  544. model_reset(&ctx->image_coder[i].vec_size_model);
  545. for (j = 0; j < 125; j++)
  546. model_reset(&ctx->image_coder[i].vq_model[j]);
  547. if (ctx->dct_coder[i].quality != quality) {
  548. ctx->dct_coder[i].quality = quality;
  549. ff_mss34_gen_quant_mat(ctx->dct_coder[i].qmat, quality, !i);
  550. }
  551. memset(ctx->dct_coder[i].prev_dc, 0,
  552. sizeof(*ctx->dct_coder[i].prev_dc) *
  553. ctx->dct_coder[i].prev_dc_stride *
  554. ctx->dct_coder[i].prev_dc_height);
  555. model_reset(&ctx->dct_coder[i].dc_model);
  556. model2_reset(&ctx->dct_coder[i].sign_model);
  557. model256_reset(&ctx->dct_coder[i].ac_model);
  558. if (ctx->haar_coder[i].quality != quality) {
  559. ctx->haar_coder[i].quality = quality;
  560. ctx->haar_coder[i].scale = 17 - 7 * quality / 50;
  561. }
  562. model_reset(&ctx->haar_coder[i].coef_hi_model);
  563. model256_reset(&ctx->haar_coder[i].coef_model);
  564. }
  565. }
  566. static av_cold void init_coders(MSS3Context *ctx)
  567. {
  568. int i, j;
  569. for (i = 0; i < 3; i++) {
  570. for (j = 0; j < 5; j++)
  571. model_init(&ctx->btype[i].bt_model[j], 5);
  572. model_init(&ctx->fill_coder[i].coef_model, 12);
  573. model256_init(&ctx->image_coder[i].esc_model);
  574. model256_init(&ctx->image_coder[i].vec_entry_model);
  575. model_init(&ctx->image_coder[i].vec_size_model, 3);
  576. for (j = 0; j < 125; j++)
  577. model_init(&ctx->image_coder[i].vq_model[j], 5);
  578. model_init(&ctx->dct_coder[i].dc_model, 12);
  579. model256_init(&ctx->dct_coder[i].ac_model);
  580. model_init(&ctx->haar_coder[i].coef_hi_model, 12);
  581. model256_init(&ctx->haar_coder[i].coef_model);
  582. }
  583. }
  584. static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  585. AVPacket *avpkt)
  586. {
  587. const uint8_t *buf = avpkt->data;
  588. int buf_size = avpkt->size;
  589. MSS3Context *c = avctx->priv_data;
  590. RangeCoder *acoder = &c->coder;
  591. GetByteContext gb;
  592. uint8_t *dst[3];
  593. int dec_width, dec_height, dec_x, dec_y, quality, keyframe;
  594. int x, y, i, mb_width, mb_height, blk_size, btype;
  595. int ret;
  596. if (buf_size < HEADER_SIZE) {
  597. av_log(avctx, AV_LOG_ERROR,
  598. "Frame should have at least %d bytes, got %d instead\n",
  599. HEADER_SIZE, buf_size);
  600. return AVERROR_INVALIDDATA;
  601. }
  602. bytestream2_init(&gb, buf, buf_size);
  603. keyframe = bytestream2_get_be32(&gb);
  604. if (keyframe & ~0x301) {
  605. av_log(avctx, AV_LOG_ERROR, "Invalid frame type %X\n", keyframe);
  606. return AVERROR_INVALIDDATA;
  607. }
  608. keyframe = !(keyframe & 1);
  609. bytestream2_skip(&gb, 6);
  610. dec_x = bytestream2_get_be16(&gb);
  611. dec_y = bytestream2_get_be16(&gb);
  612. dec_width = bytestream2_get_be16(&gb);
  613. dec_height = bytestream2_get_be16(&gb);
  614. if (dec_x + dec_width > avctx->width ||
  615. dec_y + dec_height > avctx->height ||
  616. (dec_width | dec_height) & 0xF) {
  617. av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d +%d,%d\n",
  618. dec_width, dec_height, dec_x, dec_y);
  619. return AVERROR_INVALIDDATA;
  620. }
  621. bytestream2_skip(&gb, 4);
  622. quality = bytestream2_get_byte(&gb);
  623. if (quality < 1 || quality > 100) {
  624. av_log(avctx, AV_LOG_ERROR, "Invalid quality setting %d\n", quality);
  625. return AVERROR_INVALIDDATA;
  626. }
  627. bytestream2_skip(&gb, 4);
  628. if (keyframe && !bytestream2_get_bytes_left(&gb)) {
  629. av_log(avctx, AV_LOG_ERROR, "Keyframe without data found\n");
  630. return AVERROR_INVALIDDATA;
  631. }
  632. if (!keyframe && c->got_error)
  633. return buf_size;
  634. c->got_error = 0;
  635. c->pic.reference = 3;
  636. c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
  637. FF_BUFFER_HINTS_REUSABLE;
  638. if ((ret = avctx->reget_buffer(avctx, &c->pic)) < 0) {
  639. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  640. return ret;
  641. }
  642. c->pic.key_frame = keyframe;
  643. c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  644. if (!bytestream2_get_bytes_left(&gb)) {
  645. *data_size = sizeof(AVFrame);
  646. *(AVFrame*)data = c->pic;
  647. return buf_size;
  648. }
  649. reset_coders(c, quality);
  650. rac_init(acoder, buf + HEADER_SIZE, buf_size - HEADER_SIZE);
  651. mb_width = dec_width >> 4;
  652. mb_height = dec_height >> 4;
  653. dst[0] = c->pic.data[0] + dec_x + dec_y * c->pic.linesize[0];
  654. dst[1] = c->pic.data[1] + dec_x / 2 + (dec_y / 2) * c->pic.linesize[1];
  655. dst[2] = c->pic.data[2] + dec_x / 2 + (dec_y / 2) * c->pic.linesize[2];
  656. for (y = 0; y < mb_height; y++) {
  657. for (x = 0; x < mb_width; x++) {
  658. for (i = 0; i < 3; i++) {
  659. blk_size = 8 << !i;
  660. btype = decode_block_type(acoder, c->btype + i);
  661. switch (btype) {
  662. case FILL_BLOCK:
  663. decode_fill_block(acoder, c->fill_coder + i,
  664. dst[i] + x * blk_size,
  665. c->pic.linesize[i], blk_size);
  666. break;
  667. case IMAGE_BLOCK:
  668. decode_image_block(acoder, c->image_coder + i,
  669. dst[i] + x * blk_size,
  670. c->pic.linesize[i], blk_size);
  671. break;
  672. case DCT_BLOCK:
  673. decode_dct_block(acoder, c->dct_coder + i,
  674. dst[i] + x * blk_size,
  675. c->pic.linesize[i], blk_size,
  676. c->dctblock, x, y);
  677. break;
  678. case HAAR_BLOCK:
  679. decode_haar_block(acoder, c->haar_coder + i,
  680. dst[i] + x * blk_size,
  681. c->pic.linesize[i], blk_size,
  682. c->hblock);
  683. break;
  684. }
  685. if (c->got_error || acoder->got_error) {
  686. av_log(avctx, AV_LOG_ERROR, "Error decoding block %d,%d\n",
  687. x, y);
  688. c->got_error = 1;
  689. return AVERROR_INVALIDDATA;
  690. }
  691. }
  692. }
  693. dst[0] += c->pic.linesize[0] * 16;
  694. dst[1] += c->pic.linesize[1] * 8;
  695. dst[2] += c->pic.linesize[2] * 8;
  696. }
  697. *data_size = sizeof(AVFrame);
  698. *(AVFrame*)data = c->pic;
  699. return buf_size;
  700. }
  701. static av_cold int mss3_decode_init(AVCodecContext *avctx)
  702. {
  703. MSS3Context * const c = avctx->priv_data;
  704. int i;
  705. c->avctx = avctx;
  706. if ((avctx->width & 0xF) || (avctx->height & 0xF)) {
  707. av_log(avctx, AV_LOG_ERROR,
  708. "Image dimensions should be a multiple of 16.\n");
  709. return AVERROR_INVALIDDATA;
  710. }
  711. c->got_error = 0;
  712. for (i = 0; i < 3; i++) {
  713. int b_width = avctx->width >> (2 + !!i);
  714. int b_height = avctx->height >> (2 + !!i);
  715. c->dct_coder[i].prev_dc_stride = b_width;
  716. c->dct_coder[i].prev_dc_height = b_height;
  717. c->dct_coder[i].prev_dc = av_malloc(sizeof(*c->dct_coder[i].prev_dc) *
  718. b_width * b_height);
  719. if (!c->dct_coder[i].prev_dc) {
  720. av_log(avctx, AV_LOG_ERROR, "Cannot allocate buffer\n");
  721. while (i >= 0) {
  722. av_freep(&c->dct_coder[i].prev_dc);
  723. i--;
  724. }
  725. return AVERROR(ENOMEM);
  726. }
  727. }
  728. avctx->pix_fmt = PIX_FMT_YUV420P;
  729. avctx->coded_frame = &c->pic;
  730. init_coders(c);
  731. return 0;
  732. }
  733. static av_cold int mss3_decode_end(AVCodecContext *avctx)
  734. {
  735. MSS3Context * const c = avctx->priv_data;
  736. int i;
  737. if (c->pic.data[0])
  738. avctx->release_buffer(avctx, &c->pic);
  739. for (i = 0; i < 3; i++)
  740. av_freep(&c->dct_coder[i].prev_dc);
  741. return 0;
  742. }
  743. AVCodec ff_msa1_decoder = {
  744. .name = "msa1",
  745. .type = AVMEDIA_TYPE_VIDEO,
  746. .id = CODEC_ID_MSA1,
  747. .priv_data_size = sizeof(MSS3Context),
  748. .init = mss3_decode_init,
  749. .close = mss3_decode_end,
  750. .decode = mss3_decode_frame,
  751. .capabilities = CODEC_CAP_DR1,
  752. .long_name = NULL_IF_CONFIG_SMALL("MS ATC Screen"),
  753. };