You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

880 lines
23KB

  1. /*
  2. * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder
  3. * Copyright (c) 2012 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder
  24. */
  25. #include "avcodec.h"
  26. #include "bytestream.h"
  27. #include "internal.h"
  28. #include "mathops.h"
  29. #include "mss34dsp.h"
  30. #define HEADER_SIZE 27
  31. #define MODEL2_SCALE 13
  32. #define MODEL_SCALE 15
  33. #define MODEL256_SEC_SCALE 9
  34. typedef struct Model2 {
  35. int upd_val, till_rescale;
  36. unsigned zero_freq, zero_weight;
  37. unsigned total_freq, total_weight;
  38. } Model2;
  39. typedef struct Model {
  40. int weights[16], freqs[16];
  41. int num_syms;
  42. int tot_weight;
  43. int upd_val, max_upd_val, till_rescale;
  44. } Model;
  45. typedef struct Model256 {
  46. int weights[256], freqs[256];
  47. int tot_weight;
  48. int secondary[68];
  49. int sec_size;
  50. int upd_val, max_upd_val, till_rescale;
  51. } Model256;
  52. #define RAC_BOTTOM 0x01000000
  53. typedef struct RangeCoder {
  54. const uint8_t *src, *src_end;
  55. uint32_t range, low;
  56. int got_error;
  57. } RangeCoder;
  58. enum BlockType {
  59. FILL_BLOCK = 0,
  60. IMAGE_BLOCK,
  61. DCT_BLOCK,
  62. HAAR_BLOCK,
  63. SKIP_BLOCK
  64. };
  65. typedef struct BlockTypeContext {
  66. int last_type;
  67. Model bt_model[5];
  68. } BlockTypeContext;
  69. typedef struct FillBlockCoder {
  70. int fill_val;
  71. Model coef_model;
  72. } FillBlockCoder;
  73. typedef struct ImageBlockCoder {
  74. Model256 esc_model, vec_entry_model;
  75. Model vec_size_model;
  76. Model vq_model[125];
  77. } ImageBlockCoder;
  78. typedef struct DCTBlockCoder {
  79. int *prev_dc;
  80. ptrdiff_t prev_dc_stride;
  81. int prev_dc_height;
  82. int quality;
  83. uint16_t qmat[64];
  84. Model dc_model;
  85. Model2 sign_model;
  86. Model256 ac_model;
  87. } DCTBlockCoder;
  88. typedef struct HaarBlockCoder {
  89. int quality, scale;
  90. Model256 coef_model;
  91. Model coef_hi_model;
  92. } HaarBlockCoder;
  93. typedef struct MSS3Context {
  94. AVCodecContext *avctx;
  95. AVFrame *pic;
  96. int got_error;
  97. RangeCoder coder;
  98. BlockTypeContext btype[3];
  99. FillBlockCoder fill_coder[3];
  100. ImageBlockCoder image_coder[3];
  101. DCTBlockCoder dct_coder[3];
  102. HaarBlockCoder haar_coder[3];
  103. int dctblock[64];
  104. int hblock[16 * 16];
  105. } MSS3Context;
  106. static void model2_reset(Model2 *m)
  107. {
  108. m->zero_weight = 1;
  109. m->total_weight = 2;
  110. m->zero_freq = 0x1000;
  111. m->total_freq = 0x2000;
  112. m->upd_val = 4;
  113. m->till_rescale = 4;
  114. }
  115. static void model2_update(Model2 *m, int bit)
  116. {
  117. unsigned scale;
  118. if (!bit)
  119. m->zero_weight++;
  120. m->till_rescale--;
  121. if (m->till_rescale)
  122. return;
  123. m->total_weight += m->upd_val;
  124. if (m->total_weight > 0x2000) {
  125. m->total_weight = (m->total_weight + 1) >> 1;
  126. m->zero_weight = (m->zero_weight + 1) >> 1;
  127. if (m->total_weight == m->zero_weight)
  128. m->total_weight = m->zero_weight + 1;
  129. }
  130. m->upd_val = m->upd_val * 5 >> 2;
  131. if (m->upd_val > 64)
  132. m->upd_val = 64;
  133. scale = 0x80000000u / m->total_weight;
  134. m->zero_freq = m->zero_weight * scale >> 18;
  135. m->total_freq = m->total_weight * scale >> 18;
  136. m->till_rescale = m->upd_val;
  137. }
  138. static void model_update(Model *m, int val)
  139. {
  140. int i, sum = 0;
  141. unsigned scale;
  142. m->weights[val]++;
  143. m->till_rescale--;
  144. if (m->till_rescale)
  145. return;
  146. m->tot_weight += m->upd_val;
  147. if (m->tot_weight > 0x8000) {
  148. m->tot_weight = 0;
  149. for (i = 0; i < m->num_syms; i++) {
  150. m->weights[i] = (m->weights[i] + 1) >> 1;
  151. m->tot_weight += m->weights[i];
  152. }
  153. }
  154. scale = 0x80000000u / m->tot_weight;
  155. for (i = 0; i < m->num_syms; i++) {
  156. m->freqs[i] = sum * scale >> 16;
  157. sum += m->weights[i];
  158. }
  159. m->upd_val = m->upd_val * 5 >> 2;
  160. if (m->upd_val > m->max_upd_val)
  161. m->upd_val = m->max_upd_val;
  162. m->till_rescale = m->upd_val;
  163. }
  164. static void model_reset(Model *m)
  165. {
  166. int i;
  167. m->tot_weight = 0;
  168. for (i = 0; i < m->num_syms - 1; i++)
  169. m->weights[i] = 1;
  170. m->weights[m->num_syms - 1] = 0;
  171. m->upd_val = m->num_syms;
  172. m->till_rescale = 1;
  173. model_update(m, m->num_syms - 1);
  174. m->till_rescale =
  175. m->upd_val = (m->num_syms + 6) >> 1;
  176. }
  177. static av_cold void model_init(Model *m, int num_syms)
  178. {
  179. m->num_syms = num_syms;
  180. m->max_upd_val = 8 * num_syms + 48;
  181. model_reset(m);
  182. }
  183. static void model256_update(Model256 *m, int val)
  184. {
  185. int i, sum = 0;
  186. unsigned scale;
  187. int send, sidx = 1;
  188. m->weights[val]++;
  189. m->till_rescale--;
  190. if (m->till_rescale)
  191. return;
  192. m->tot_weight += m->upd_val;
  193. if (m->tot_weight > 0x8000) {
  194. m->tot_weight = 0;
  195. for (i = 0; i < 256; i++) {
  196. m->weights[i] = (m->weights[i] + 1) >> 1;
  197. m->tot_weight += m->weights[i];
  198. }
  199. }
  200. scale = 0x80000000u / m->tot_weight;
  201. m->secondary[0] = 0;
  202. for (i = 0; i < 256; i++) {
  203. m->freqs[i] = sum * scale >> 16;
  204. sum += m->weights[i];
  205. send = m->freqs[i] >> MODEL256_SEC_SCALE;
  206. while (sidx <= send)
  207. m->secondary[sidx++] = i - 1;
  208. }
  209. while (sidx < m->sec_size)
  210. m->secondary[sidx++] = 255;
  211. m->upd_val = m->upd_val * 5 >> 2;
  212. if (m->upd_val > m->max_upd_val)
  213. m->upd_val = m->max_upd_val;
  214. m->till_rescale = m->upd_val;
  215. }
  216. static void model256_reset(Model256 *m)
  217. {
  218. int i;
  219. for (i = 0; i < 255; i++)
  220. m->weights[i] = 1;
  221. m->weights[255] = 0;
  222. m->tot_weight = 0;
  223. m->upd_val = 256;
  224. m->till_rescale = 1;
  225. model256_update(m, 255);
  226. m->till_rescale =
  227. m->upd_val = (256 + 6) >> 1;
  228. }
  229. static av_cold void model256_init(Model256 *m)
  230. {
  231. m->max_upd_val = 8 * 256 + 48;
  232. m->sec_size = (1 << 6) + 2;
  233. model256_reset(m);
  234. }
  235. static void rac_init(RangeCoder *c, const uint8_t *src, int size)
  236. {
  237. int i;
  238. c->src = src;
  239. c->src_end = src + size;
  240. c->low = 0;
  241. for (i = 0; i < FFMIN(size, 4); i++)
  242. c->low = (c->low << 8) | *c->src++;
  243. c->range = 0xFFFFFFFF;
  244. c->got_error = 0;
  245. }
  246. static void rac_normalise(RangeCoder *c)
  247. {
  248. for (;;) {
  249. c->range <<= 8;
  250. c->low <<= 8;
  251. if (c->src < c->src_end) {
  252. c->low |= *c->src++;
  253. } else if (!c->low) {
  254. c->got_error = 1;
  255. c->low = 1;
  256. }
  257. if (c->low > c->range) {
  258. c->got_error = 1;
  259. c->low = 1;
  260. }
  261. if (c->range >= RAC_BOTTOM)
  262. return;
  263. }
  264. }
  265. static int rac_get_bit(RangeCoder *c)
  266. {
  267. int bit;
  268. c->range >>= 1;
  269. bit = (c->range <= c->low);
  270. if (bit)
  271. c->low -= c->range;
  272. if (c->range < RAC_BOTTOM)
  273. rac_normalise(c);
  274. return bit;
  275. }
  276. static int rac_get_bits(RangeCoder *c, int nbits)
  277. {
  278. int val;
  279. c->range >>= nbits;
  280. val = c->low / c->range;
  281. c->low -= c->range * val;
  282. if (c->range < RAC_BOTTOM)
  283. rac_normalise(c);
  284. return val;
  285. }
  286. static int rac_get_model2_sym(RangeCoder *c, Model2 *m)
  287. {
  288. int bit, helper;
  289. helper = m->zero_freq * (c->range >> MODEL2_SCALE);
  290. bit = (c->low >= helper);
  291. if (bit) {
  292. c->low -= helper;
  293. c->range -= helper;
  294. } else {
  295. c->range = helper;
  296. }
  297. if (c->range < RAC_BOTTOM)
  298. rac_normalise(c);
  299. model2_update(m, bit);
  300. return bit;
  301. }
  302. static int rac_get_model_sym(RangeCoder *c, Model *m)
  303. {
  304. int val;
  305. int end, end2;
  306. unsigned prob, prob2, helper;
  307. prob = 0;
  308. prob2 = c->range;
  309. c->range >>= MODEL_SCALE;
  310. val = 0;
  311. end = m->num_syms >> 1;
  312. end2 = m->num_syms;
  313. do {
  314. helper = m->freqs[end] * c->range;
  315. if (helper <= c->low) {
  316. val = end;
  317. prob = helper;
  318. } else {
  319. end2 = end;
  320. prob2 = helper;
  321. }
  322. end = (end2 + val) >> 1;
  323. } while (end != val);
  324. c->low -= prob;
  325. c->range = prob2 - prob;
  326. if (c->range < RAC_BOTTOM)
  327. rac_normalise(c);
  328. model_update(m, val);
  329. return val;
  330. }
  331. static int rac_get_model256_sym(RangeCoder *c, Model256 *m)
  332. {
  333. int val;
  334. int start, end;
  335. int ssym;
  336. unsigned prob, prob2, helper;
  337. prob2 = c->range;
  338. c->range >>= MODEL_SCALE;
  339. helper = c->low / c->range;
  340. ssym = helper >> MODEL256_SEC_SCALE;
  341. val = m->secondary[ssym];
  342. end = start = m->secondary[ssym + 1] + 1;
  343. while (end > val + 1) {
  344. ssym = (end + val) >> 1;
  345. if (m->freqs[ssym] <= helper) {
  346. end = start;
  347. val = ssym;
  348. } else {
  349. end = (end + val) >> 1;
  350. start = ssym;
  351. }
  352. }
  353. prob = m->freqs[val] * c->range;
  354. if (val != 255)
  355. prob2 = m->freqs[val + 1] * c->range;
  356. c->low -= prob;
  357. c->range = prob2 - prob;
  358. if (c->range < RAC_BOTTOM)
  359. rac_normalise(c);
  360. model256_update(m, val);
  361. return val;
  362. }
  363. static int decode_block_type(RangeCoder *c, BlockTypeContext *bt)
  364. {
  365. bt->last_type = rac_get_model_sym(c, &bt->bt_model[bt->last_type]);
  366. return bt->last_type;
  367. }
  368. static int decode_coeff(RangeCoder *c, Model *m)
  369. {
  370. int val, sign;
  371. val = rac_get_model_sym(c, m);
  372. if (val) {
  373. sign = rac_get_bit(c);
  374. if (val > 1) {
  375. val--;
  376. val = (1 << val) + rac_get_bits(c, val);
  377. }
  378. if (!sign)
  379. val = -val;
  380. }
  381. return val;
  382. }
  383. static void decode_fill_block(RangeCoder *c, FillBlockCoder *fc,
  384. uint8_t *dst, ptrdiff_t stride, int block_size)
  385. {
  386. int i;
  387. fc->fill_val += decode_coeff(c, &fc->coef_model);
  388. for (i = 0; i < block_size; i++, dst += stride)
  389. memset(dst, fc->fill_val, block_size);
  390. }
  391. static void decode_image_block(RangeCoder *c, ImageBlockCoder *ic,
  392. uint8_t *dst, ptrdiff_t stride, int block_size)
  393. {
  394. int i, j;
  395. int vec_size;
  396. int vec[4];
  397. int prev_line[16];
  398. int A, B, C;
  399. vec_size = rac_get_model_sym(c, &ic->vec_size_model) + 2;
  400. for (i = 0; i < vec_size; i++)
  401. vec[i] = rac_get_model256_sym(c, &ic->vec_entry_model);
  402. for (; i < 4; i++)
  403. vec[i] = 0;
  404. memset(prev_line, 0, sizeof(prev_line));
  405. for (j = 0; j < block_size; j++) {
  406. A = 0;
  407. B = 0;
  408. for (i = 0; i < block_size; i++) {
  409. C = B;
  410. B = prev_line[i];
  411. A = rac_get_model_sym(c, &ic->vq_model[A + B * 5 + C * 25]);
  412. prev_line[i] = A;
  413. if (A < 4)
  414. dst[i] = vec[A];
  415. else
  416. dst[i] = rac_get_model256_sym(c, &ic->esc_model);
  417. }
  418. dst += stride;
  419. }
  420. }
  421. static int decode_dct(RangeCoder *c, DCTBlockCoder *bc, int *block,
  422. int bx, int by)
  423. {
  424. int skip, val, sign, pos = 1, zz_pos, dc;
  425. int blk_pos = bx + by * bc->prev_dc_stride;
  426. memset(block, 0, sizeof(*block) * 64);
  427. dc = decode_coeff(c, &bc->dc_model);
  428. if (by) {
  429. if (bx) {
  430. int l, tl, t;
  431. l = bc->prev_dc[blk_pos - 1];
  432. tl = bc->prev_dc[blk_pos - 1 - bc->prev_dc_stride];
  433. t = bc->prev_dc[blk_pos - bc->prev_dc_stride];
  434. if (FFABS(t - tl) <= FFABS(l - tl))
  435. dc += l;
  436. else
  437. dc += t;
  438. } else {
  439. dc += bc->prev_dc[blk_pos - bc->prev_dc_stride];
  440. }
  441. } else if (bx) {
  442. dc += bc->prev_dc[bx - 1];
  443. }
  444. bc->prev_dc[blk_pos] = dc;
  445. block[0] = dc * bc->qmat[0];
  446. while (pos < 64) {
  447. val = rac_get_model256_sym(c, &bc->ac_model);
  448. if (!val)
  449. return 0;
  450. if (val == 0xF0) {
  451. pos += 16;
  452. continue;
  453. }
  454. skip = val >> 4;
  455. val = val & 0xF;
  456. if (!val)
  457. return -1;
  458. pos += skip;
  459. if (pos >= 64)
  460. return -1;
  461. sign = rac_get_model2_sym(c, &bc->sign_model);
  462. if (val > 1) {
  463. val--;
  464. val = (1 << val) + rac_get_bits(c, val);
  465. }
  466. if (!sign)
  467. val = -val;
  468. zz_pos = ff_zigzag_direct[pos];
  469. block[zz_pos] = val * bc->qmat[zz_pos];
  470. pos++;
  471. }
  472. return pos == 64 ? 0 : -1;
  473. }
  474. static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc,
  475. uint8_t *dst, ptrdiff_t stride, int block_size,
  476. int *block, int mb_x, int mb_y)
  477. {
  478. int i, j;
  479. int bx, by;
  480. int nblocks = block_size >> 3;
  481. bx = mb_x * nblocks;
  482. by = mb_y * nblocks;
  483. for (j = 0; j < nblocks; j++) {
  484. for (i = 0; i < nblocks; i++) {
  485. if (decode_dct(c, bc, block, bx + i, by + j)) {
  486. c->got_error = 1;
  487. return;
  488. }
  489. ff_mss34_dct_put(dst + i * 8, stride, block);
  490. }
  491. dst += 8 * stride;
  492. }
  493. }
  494. static void decode_haar_block(RangeCoder *c, HaarBlockCoder *hc,
  495. uint8_t *dst, ptrdiff_t stride,
  496. int block_size, int *block)
  497. {
  498. const int hsize = block_size >> 1;
  499. int A, B, C, D, t1, t2, t3, t4;
  500. int i, j;
  501. for (j = 0; j < block_size; j++) {
  502. for (i = 0; i < block_size; i++) {
  503. if (i < hsize && j < hsize)
  504. block[i] = rac_get_model256_sym(c, &hc->coef_model);
  505. else
  506. block[i] = decode_coeff(c, &hc->coef_hi_model);
  507. block[i] *= hc->scale;
  508. }
  509. block += block_size;
  510. }
  511. block -= block_size * block_size;
  512. for (j = 0; j < hsize; j++) {
  513. for (i = 0; i < hsize; i++) {
  514. A = block[i];
  515. B = block[i + hsize];
  516. C = block[i + hsize * block_size];
  517. D = block[i + hsize * block_size + hsize];
  518. t1 = A - B;
  519. t2 = C - D;
  520. t3 = A + B;
  521. t4 = C + D;
  522. dst[i * 2] = av_clip_uint8(t1 - t2);
  523. dst[i * 2 + stride] = av_clip_uint8(t1 + t2);
  524. dst[i * 2 + 1] = av_clip_uint8(t3 - t4);
  525. dst[i * 2 + 1 + stride] = av_clip_uint8(t3 + t4);
  526. }
  527. block += block_size;
  528. dst += stride * 2;
  529. }
  530. }
  531. static void reset_coders(MSS3Context *ctx, int quality)
  532. {
  533. int i, j;
  534. for (i = 0; i < 3; i++) {
  535. ctx->btype[i].last_type = SKIP_BLOCK;
  536. for (j = 0; j < 5; j++)
  537. model_reset(&ctx->btype[i].bt_model[j]);
  538. ctx->fill_coder[i].fill_val = 0;
  539. model_reset(&ctx->fill_coder[i].coef_model);
  540. model256_reset(&ctx->image_coder[i].esc_model);
  541. model256_reset(&ctx->image_coder[i].vec_entry_model);
  542. model_reset(&ctx->image_coder[i].vec_size_model);
  543. for (j = 0; j < 125; j++)
  544. model_reset(&ctx->image_coder[i].vq_model[j]);
  545. if (ctx->dct_coder[i].quality != quality) {
  546. ctx->dct_coder[i].quality = quality;
  547. ff_mss34_gen_quant_mat(ctx->dct_coder[i].qmat, quality, !i);
  548. }
  549. memset(ctx->dct_coder[i].prev_dc, 0,
  550. sizeof(*ctx->dct_coder[i].prev_dc) *
  551. ctx->dct_coder[i].prev_dc_stride *
  552. ctx->dct_coder[i].prev_dc_height);
  553. model_reset(&ctx->dct_coder[i].dc_model);
  554. model2_reset(&ctx->dct_coder[i].sign_model);
  555. model256_reset(&ctx->dct_coder[i].ac_model);
  556. if (ctx->haar_coder[i].quality != quality) {
  557. ctx->haar_coder[i].quality = quality;
  558. ctx->haar_coder[i].scale = 17 - 7 * quality / 50;
  559. }
  560. model_reset(&ctx->haar_coder[i].coef_hi_model);
  561. model256_reset(&ctx->haar_coder[i].coef_model);
  562. }
  563. }
  564. static av_cold void init_coders(MSS3Context *ctx)
  565. {
  566. int i, j;
  567. for (i = 0; i < 3; i++) {
  568. for (j = 0; j < 5; j++)
  569. model_init(&ctx->btype[i].bt_model[j], 5);
  570. model_init(&ctx->fill_coder[i].coef_model, 12);
  571. model256_init(&ctx->image_coder[i].esc_model);
  572. model256_init(&ctx->image_coder[i].vec_entry_model);
  573. model_init(&ctx->image_coder[i].vec_size_model, 3);
  574. for (j = 0; j < 125; j++)
  575. model_init(&ctx->image_coder[i].vq_model[j], 5);
  576. model_init(&ctx->dct_coder[i].dc_model, 12);
  577. model256_init(&ctx->dct_coder[i].ac_model);
  578. model_init(&ctx->haar_coder[i].coef_hi_model, 12);
  579. model256_init(&ctx->haar_coder[i].coef_model);
  580. }
  581. }
  582. static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  583. AVPacket *avpkt)
  584. {
  585. const uint8_t *buf = avpkt->data;
  586. int buf_size = avpkt->size;
  587. MSS3Context *c = avctx->priv_data;
  588. RangeCoder *acoder = &c->coder;
  589. GetByteContext gb;
  590. uint8_t *dst[3];
  591. int dec_width, dec_height, dec_x, dec_y, quality, keyframe;
  592. int x, y, i, mb_width, mb_height, blk_size, btype;
  593. int ret;
  594. if (buf_size < HEADER_SIZE) {
  595. av_log(avctx, AV_LOG_ERROR,
  596. "Frame should have at least %d bytes, got %d instead\n",
  597. HEADER_SIZE, buf_size);
  598. return AVERROR_INVALIDDATA;
  599. }
  600. bytestream2_init(&gb, buf, buf_size);
  601. keyframe = bytestream2_get_be32(&gb);
  602. if (keyframe & ~0x301) {
  603. av_log(avctx, AV_LOG_ERROR, "Invalid frame type %X\n", keyframe);
  604. return AVERROR_INVALIDDATA;
  605. }
  606. keyframe = !(keyframe & 1);
  607. bytestream2_skip(&gb, 6);
  608. dec_x = bytestream2_get_be16(&gb);
  609. dec_y = bytestream2_get_be16(&gb);
  610. dec_width = bytestream2_get_be16(&gb);
  611. dec_height = bytestream2_get_be16(&gb);
  612. if (dec_x + dec_width > avctx->width ||
  613. dec_y + dec_height > avctx->height ||
  614. (dec_width | dec_height) & 0xF) {
  615. av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d +%d,%d\n",
  616. dec_width, dec_height, dec_x, dec_y);
  617. return AVERROR_INVALIDDATA;
  618. }
  619. bytestream2_skip(&gb, 4);
  620. quality = bytestream2_get_byte(&gb);
  621. if (quality < 1 || quality > 100) {
  622. av_log(avctx, AV_LOG_ERROR, "Invalid quality setting %d\n", quality);
  623. return AVERROR_INVALIDDATA;
  624. }
  625. bytestream2_skip(&gb, 4);
  626. if (keyframe && !bytestream2_get_bytes_left(&gb)) {
  627. av_log(avctx, AV_LOG_ERROR, "Keyframe without data found\n");
  628. return AVERROR_INVALIDDATA;
  629. }
  630. if (!keyframe && c->got_error)
  631. return buf_size;
  632. c->got_error = 0;
  633. if ((ret = ff_reget_buffer(avctx, c->pic, 0)) < 0)
  634. return ret;
  635. c->pic->key_frame = keyframe;
  636. c->pic->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  637. if (!bytestream2_get_bytes_left(&gb)) {
  638. if ((ret = av_frame_ref(data, c->pic)) < 0)
  639. return ret;
  640. *got_frame = 1;
  641. return buf_size;
  642. }
  643. reset_coders(c, quality);
  644. rac_init(acoder, buf + HEADER_SIZE, buf_size - HEADER_SIZE);
  645. mb_width = dec_width >> 4;
  646. mb_height = dec_height >> 4;
  647. dst[0] = c->pic->data[0] + dec_x + dec_y * c->pic->linesize[0];
  648. dst[1] = c->pic->data[1] + dec_x / 2 + (dec_y / 2) * c->pic->linesize[1];
  649. dst[2] = c->pic->data[2] + dec_x / 2 + (dec_y / 2) * c->pic->linesize[2];
  650. for (y = 0; y < mb_height; y++) {
  651. for (x = 0; x < mb_width; x++) {
  652. for (i = 0; i < 3; i++) {
  653. blk_size = 8 << !i;
  654. btype = decode_block_type(acoder, c->btype + i);
  655. switch (btype) {
  656. case FILL_BLOCK:
  657. decode_fill_block(acoder, c->fill_coder + i,
  658. dst[i] + x * blk_size,
  659. c->pic->linesize[i], blk_size);
  660. break;
  661. case IMAGE_BLOCK:
  662. decode_image_block(acoder, c->image_coder + i,
  663. dst[i] + x * blk_size,
  664. c->pic->linesize[i], blk_size);
  665. break;
  666. case DCT_BLOCK:
  667. decode_dct_block(acoder, c->dct_coder + i,
  668. dst[i] + x * blk_size,
  669. c->pic->linesize[i], blk_size,
  670. c->dctblock, x, y);
  671. break;
  672. case HAAR_BLOCK:
  673. decode_haar_block(acoder, c->haar_coder + i,
  674. dst[i] + x * blk_size,
  675. c->pic->linesize[i], blk_size,
  676. c->hblock);
  677. break;
  678. }
  679. if (c->got_error || acoder->got_error) {
  680. av_log(avctx, AV_LOG_ERROR, "Error decoding block %d,%d\n",
  681. x, y);
  682. c->got_error = 1;
  683. return AVERROR_INVALIDDATA;
  684. }
  685. }
  686. }
  687. dst[0] += c->pic->linesize[0] * 16;
  688. dst[1] += c->pic->linesize[1] * 8;
  689. dst[2] += c->pic->linesize[2] * 8;
  690. }
  691. if ((ret = av_frame_ref(data, c->pic)) < 0)
  692. return ret;
  693. *got_frame = 1;
  694. return buf_size;
  695. }
  696. static av_cold int mss3_decode_end(AVCodecContext *avctx)
  697. {
  698. MSS3Context * const c = avctx->priv_data;
  699. int i;
  700. av_frame_free(&c->pic);
  701. for (i = 0; i < 3; i++)
  702. av_freep(&c->dct_coder[i].prev_dc);
  703. return 0;
  704. }
  705. static av_cold int mss3_decode_init(AVCodecContext *avctx)
  706. {
  707. MSS3Context * const c = avctx->priv_data;
  708. int i;
  709. c->avctx = avctx;
  710. if ((avctx->width & 0xF) || (avctx->height & 0xF)) {
  711. av_log(avctx, AV_LOG_ERROR,
  712. "Image dimensions should be a multiple of 16.\n");
  713. return AVERROR_INVALIDDATA;
  714. }
  715. c->got_error = 0;
  716. for (i = 0; i < 3; i++) {
  717. int b_width = avctx->width >> (2 + !!i);
  718. int b_height = avctx->height >> (2 + !!i);
  719. c->dct_coder[i].prev_dc_stride = b_width;
  720. c->dct_coder[i].prev_dc_height = b_height;
  721. c->dct_coder[i].prev_dc = av_malloc(sizeof(*c->dct_coder[i].prev_dc) *
  722. b_width * b_height);
  723. if (!c->dct_coder[i].prev_dc) {
  724. av_log(avctx, AV_LOG_ERROR, "Cannot allocate buffer\n");
  725. av_frame_free(&c->pic);
  726. while (i >= 0) {
  727. av_freep(&c->dct_coder[i].prev_dc);
  728. i--;
  729. }
  730. return AVERROR(ENOMEM);
  731. }
  732. }
  733. c->pic = av_frame_alloc();
  734. if (!c->pic) {
  735. mss3_decode_end(avctx);
  736. return AVERROR(ENOMEM);
  737. }
  738. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  739. init_coders(c);
  740. return 0;
  741. }
  742. AVCodec ff_msa1_decoder = {
  743. .name = "msa1",
  744. .long_name = NULL_IF_CONFIG_SMALL("MS ATC Screen"),
  745. .type = AVMEDIA_TYPE_VIDEO,
  746. .id = AV_CODEC_ID_MSA1,
  747. .priv_data_size = sizeof(MSS3Context),
  748. .init = mss3_decode_init,
  749. .close = mss3_decode_end,
  750. .decode = mss3_decode_frame,
  751. .capabilities = AV_CODEC_CAP_DR1,
  752. };