You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

879 lines
23KB

  1. /*
  2. * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder
  3. * Copyright (c) 2012 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder
  24. */
  25. #include "avcodec.h"
  26. #include "bytestream.h"
  27. #include "internal.h"
  28. #include "mathops.h"
  29. #include "mss34dsp.h"
  30. #define HEADER_SIZE 27
  31. #define MODEL2_SCALE 13
  32. #define MODEL_SCALE 15
  33. #define MODEL256_SEC_SCALE 9
  34. typedef struct Model2 {
  35. int upd_val, till_rescale;
  36. unsigned zero_freq, zero_weight;
  37. unsigned total_freq, total_weight;
  38. } Model2;
  39. typedef struct Model {
  40. int weights[16], freqs[16];
  41. int num_syms;
  42. int tot_weight;
  43. int upd_val, max_upd_val, till_rescale;
  44. } Model;
  45. typedef struct Model256 {
  46. int weights[256], freqs[256];
  47. int tot_weight;
  48. int secondary[68];
  49. int sec_size;
  50. int upd_val, max_upd_val, till_rescale;
  51. } Model256;
  52. #define RAC_BOTTOM 0x01000000
  53. typedef struct RangeCoder {
  54. const uint8_t *src, *src_end;
  55. uint32_t range, low;
  56. int got_error;
  57. } RangeCoder;
  58. enum BlockType {
  59. FILL_BLOCK = 0,
  60. IMAGE_BLOCK,
  61. DCT_BLOCK,
  62. HAAR_BLOCK,
  63. SKIP_BLOCK
  64. };
  65. typedef struct BlockTypeContext {
  66. int last_type;
  67. Model bt_model[5];
  68. } BlockTypeContext;
  69. typedef struct FillBlockCoder {
  70. int fill_val;
  71. Model coef_model;
  72. } FillBlockCoder;
  73. typedef struct ImageBlockCoder {
  74. Model256 esc_model, vec_entry_model;
  75. Model vec_size_model;
  76. Model vq_model[125];
  77. } ImageBlockCoder;
  78. typedef struct DCTBlockCoder {
  79. int *prev_dc;
  80. int prev_dc_stride;
  81. int prev_dc_height;
  82. int quality;
  83. uint16_t qmat[64];
  84. Model dc_model;
  85. Model2 sign_model;
  86. Model256 ac_model;
  87. } DCTBlockCoder;
  88. typedef struct HaarBlockCoder {
  89. int quality, scale;
  90. Model256 coef_model;
  91. Model coef_hi_model;
  92. } HaarBlockCoder;
  93. typedef struct MSS3Context {
  94. AVCodecContext *avctx;
  95. AVFrame *pic;
  96. int got_error;
  97. RangeCoder coder;
  98. BlockTypeContext btype[3];
  99. FillBlockCoder fill_coder[3];
  100. ImageBlockCoder image_coder[3];
  101. DCTBlockCoder dct_coder[3];
  102. HaarBlockCoder haar_coder[3];
  103. int dctblock[64];
  104. int hblock[16 * 16];
  105. } MSS3Context;
  106. static void model2_reset(Model2 *m)
  107. {
  108. m->zero_weight = 1;
  109. m->total_weight = 2;
  110. m->zero_freq = 0x1000;
  111. m->total_freq = 0x2000;
  112. m->upd_val = 4;
  113. m->till_rescale = 4;
  114. }
  115. static void model2_update(Model2 *m, int bit)
  116. {
  117. unsigned scale;
  118. if (!bit)
  119. m->zero_weight++;
  120. m->till_rescale--;
  121. if (m->till_rescale)
  122. return;
  123. m->total_weight += m->upd_val;
  124. if (m->total_weight > 0x2000) {
  125. m->total_weight = (m->total_weight + 1) >> 1;
  126. m->zero_weight = (m->zero_weight + 1) >> 1;
  127. if (m->total_weight == m->zero_weight)
  128. m->total_weight = m->zero_weight + 1;
  129. }
  130. m->upd_val = m->upd_val * 5 >> 2;
  131. if (m->upd_val > 64)
  132. m->upd_val = 64;
  133. scale = 0x80000000u / m->total_weight;
  134. m->zero_freq = m->zero_weight * scale >> 18;
  135. m->total_freq = m->total_weight * scale >> 18;
  136. m->till_rescale = m->upd_val;
  137. }
  138. static void model_update(Model *m, int val)
  139. {
  140. int i, sum = 0;
  141. unsigned scale;
  142. m->weights[val]++;
  143. m->till_rescale--;
  144. if (m->till_rescale)
  145. return;
  146. m->tot_weight += m->upd_val;
  147. if (m->tot_weight > 0x8000) {
  148. m->tot_weight = 0;
  149. for (i = 0; i < m->num_syms; i++) {
  150. m->weights[i] = (m->weights[i] + 1) >> 1;
  151. m->tot_weight += m->weights[i];
  152. }
  153. }
  154. scale = 0x80000000u / m->tot_weight;
  155. for (i = 0; i < m->num_syms; i++) {
  156. m->freqs[i] = sum * scale >> 16;
  157. sum += m->weights[i];
  158. }
  159. m->upd_val = m->upd_val * 5 >> 2;
  160. if (m->upd_val > m->max_upd_val)
  161. m->upd_val = m->max_upd_val;
  162. m->till_rescale = m->upd_val;
  163. }
  164. static void model_reset(Model *m)
  165. {
  166. int i;
  167. m->tot_weight = 0;
  168. for (i = 0; i < m->num_syms - 1; i++)
  169. m->weights[i] = 1;
  170. m->weights[m->num_syms - 1] = 0;
  171. m->upd_val = m->num_syms;
  172. m->till_rescale = 1;
  173. model_update(m, m->num_syms - 1);
  174. m->till_rescale =
  175. m->upd_val = (m->num_syms + 6) >> 1;
  176. }
  177. static av_cold void model_init(Model *m, int num_syms)
  178. {
  179. m->num_syms = num_syms;
  180. m->max_upd_val = 8 * num_syms + 48;
  181. model_reset(m);
  182. }
  183. static void model256_update(Model256 *m, int val)
  184. {
  185. int i, sum = 0;
  186. unsigned scale;
  187. int send, sidx = 1;
  188. m->weights[val]++;
  189. m->till_rescale--;
  190. if (m->till_rescale)
  191. return;
  192. m->tot_weight += m->upd_val;
  193. if (m->tot_weight > 0x8000) {
  194. m->tot_weight = 0;
  195. for (i = 0; i < 256; i++) {
  196. m->weights[i] = (m->weights[i] + 1) >> 1;
  197. m->tot_weight += m->weights[i];
  198. }
  199. }
  200. scale = 0x80000000u / m->tot_weight;
  201. m->secondary[0] = 0;
  202. for (i = 0; i < 256; i++) {
  203. m->freqs[i] = sum * scale >> 16;
  204. sum += m->weights[i];
  205. send = m->freqs[i] >> MODEL256_SEC_SCALE;
  206. while (sidx <= send)
  207. m->secondary[sidx++] = i - 1;
  208. }
  209. while (sidx < m->sec_size)
  210. m->secondary[sidx++] = 255;
  211. m->upd_val = m->upd_val * 5 >> 2;
  212. if (m->upd_val > m->max_upd_val)
  213. m->upd_val = m->max_upd_val;
  214. m->till_rescale = m->upd_val;
  215. }
  216. static void model256_reset(Model256 *m)
  217. {
  218. int i;
  219. for (i = 0; i < 255; i++)
  220. m->weights[i] = 1;
  221. m->weights[255] = 0;
  222. m->tot_weight = 0;
  223. m->upd_val = 256;
  224. m->till_rescale = 1;
  225. model256_update(m, 255);
  226. m->till_rescale =
  227. m->upd_val = (256 + 6) >> 1;
  228. }
  229. static av_cold void model256_init(Model256 *m)
  230. {
  231. m->max_upd_val = 8 * 256 + 48;
  232. m->sec_size = (1 << 6) + 2;
  233. model256_reset(m);
  234. }
  235. static void rac_init(RangeCoder *c, const uint8_t *src, int size)
  236. {
  237. int i;
  238. c->src = src;
  239. c->src_end = src + size;
  240. c->low = 0;
  241. for (i = 0; i < FFMIN(size, 4); i++)
  242. c->low = (c->low << 8) | *c->src++;
  243. c->range = 0xFFFFFFFF;
  244. c->got_error = 0;
  245. }
  246. static void rac_normalise(RangeCoder *c)
  247. {
  248. for (;;) {
  249. c->range <<= 8;
  250. c->low <<= 8;
  251. if (c->src < c->src_end) {
  252. c->low |= *c->src++;
  253. } else if (!c->low) {
  254. c->got_error = 1;
  255. c->low = 1;
  256. }
  257. if (c->low > c->range) {
  258. c->got_error = 1;
  259. c->low = 1;
  260. }
  261. if (c->range >= RAC_BOTTOM)
  262. return;
  263. }
  264. }
  265. static int rac_get_bit(RangeCoder *c)
  266. {
  267. int bit;
  268. c->range >>= 1;
  269. bit = (c->range <= c->low);
  270. if (bit)
  271. c->low -= c->range;
  272. if (c->range < RAC_BOTTOM)
  273. rac_normalise(c);
  274. return bit;
  275. }
  276. static int rac_get_bits(RangeCoder *c, int nbits)
  277. {
  278. int val;
  279. c->range >>= nbits;
  280. val = c->low / c->range;
  281. c->low -= c->range * val;
  282. if (c->range < RAC_BOTTOM)
  283. rac_normalise(c);
  284. return val;
  285. }
  286. static int rac_get_model2_sym(RangeCoder *c, Model2 *m)
  287. {
  288. int bit, helper;
  289. helper = m->zero_freq * (c->range >> MODEL2_SCALE);
  290. bit = (c->low >= helper);
  291. if (bit) {
  292. c->low -= helper;
  293. c->range -= helper;
  294. } else {
  295. c->range = helper;
  296. }
  297. if (c->range < RAC_BOTTOM)
  298. rac_normalise(c);
  299. model2_update(m, bit);
  300. return bit;
  301. }
  302. static int rac_get_model_sym(RangeCoder *c, Model *m)
  303. {
  304. int val;
  305. int end, end2;
  306. unsigned prob, prob2, helper;
  307. prob = 0;
  308. prob2 = c->range;
  309. c->range >>= MODEL_SCALE;
  310. val = 0;
  311. end = m->num_syms >> 1;
  312. end2 = m->num_syms;
  313. do {
  314. helper = m->freqs[end] * c->range;
  315. if (helper <= c->low) {
  316. val = end;
  317. prob = helper;
  318. } else {
  319. end2 = end;
  320. prob2 = helper;
  321. }
  322. end = (end2 + val) >> 1;
  323. } while (end != val);
  324. c->low -= prob;
  325. c->range = prob2 - prob;
  326. if (c->range < RAC_BOTTOM)
  327. rac_normalise(c);
  328. model_update(m, val);
  329. return val;
  330. }
  331. static int rac_get_model256_sym(RangeCoder *c, Model256 *m)
  332. {
  333. int prob, prob2, helper, val;
  334. int start, end;
  335. int ssym;
  336. prob2 = c->range;
  337. c->range >>= MODEL_SCALE;
  338. helper = c->low / c->range;
  339. ssym = helper >> MODEL256_SEC_SCALE;
  340. val = m->secondary[ssym];
  341. end = start = m->secondary[ssym + 1] + 1;
  342. while (end > val + 1) {
  343. ssym = (end + val) >> 1;
  344. if (m->freqs[ssym] <= helper) {
  345. end = start;
  346. val = ssym;
  347. } else {
  348. end = (end + val) >> 1;
  349. start = ssym;
  350. }
  351. }
  352. prob = m->freqs[val] * c->range;
  353. if (val != 255)
  354. prob2 = m->freqs[val + 1] * c->range;
  355. c->low -= prob;
  356. c->range = prob2 - prob;
  357. if (c->range < RAC_BOTTOM)
  358. rac_normalise(c);
  359. model256_update(m, val);
  360. return val;
  361. }
  362. static int decode_block_type(RangeCoder *c, BlockTypeContext *bt)
  363. {
  364. bt->last_type = rac_get_model_sym(c, &bt->bt_model[bt->last_type]);
  365. return bt->last_type;
  366. }
  367. static int decode_coeff(RangeCoder *c, Model *m)
  368. {
  369. int val, sign;
  370. val = rac_get_model_sym(c, m);
  371. if (val) {
  372. sign = rac_get_bit(c);
  373. if (val > 1) {
  374. val--;
  375. val = (1 << val) + rac_get_bits(c, val);
  376. }
  377. if (!sign)
  378. val = -val;
  379. }
  380. return val;
  381. }
  382. static void decode_fill_block(RangeCoder *c, FillBlockCoder *fc,
  383. uint8_t *dst, int stride, int block_size)
  384. {
  385. int i;
  386. fc->fill_val += decode_coeff(c, &fc->coef_model);
  387. for (i = 0; i < block_size; i++, dst += stride)
  388. memset(dst, fc->fill_val, block_size);
  389. }
  390. static void decode_image_block(RangeCoder *c, ImageBlockCoder *ic,
  391. uint8_t *dst, int stride, int block_size)
  392. {
  393. int i, j;
  394. int vec_size;
  395. int vec[4];
  396. int prev_line[16];
  397. int A, B, C;
  398. vec_size = rac_get_model_sym(c, &ic->vec_size_model) + 2;
  399. for (i = 0; i < vec_size; i++)
  400. vec[i] = rac_get_model256_sym(c, &ic->vec_entry_model);
  401. for (; i < 4; i++)
  402. vec[i] = 0;
  403. memset(prev_line, 0, sizeof(prev_line));
  404. for (j = 0; j < block_size; j++) {
  405. A = 0;
  406. B = 0;
  407. for (i = 0; i < block_size; i++) {
  408. C = B;
  409. B = prev_line[i];
  410. A = rac_get_model_sym(c, &ic->vq_model[A + B * 5 + C * 25]);
  411. prev_line[i] = A;
  412. if (A < 4)
  413. dst[i] = vec[A];
  414. else
  415. dst[i] = rac_get_model256_sym(c, &ic->esc_model);
  416. }
  417. dst += stride;
  418. }
  419. }
  420. static int decode_dct(RangeCoder *c, DCTBlockCoder *bc, int *block,
  421. int bx, int by)
  422. {
  423. int skip, val, sign, pos = 1, zz_pos, dc;
  424. int blk_pos = bx + by * bc->prev_dc_stride;
  425. memset(block, 0, sizeof(*block) * 64);
  426. dc = decode_coeff(c, &bc->dc_model);
  427. if (by) {
  428. if (bx) {
  429. int l, tl, t;
  430. l = bc->prev_dc[blk_pos - 1];
  431. tl = bc->prev_dc[blk_pos - 1 - bc->prev_dc_stride];
  432. t = bc->prev_dc[blk_pos - bc->prev_dc_stride];
  433. if (FFABS(t - tl) <= FFABS(l - tl))
  434. dc += l;
  435. else
  436. dc += t;
  437. } else {
  438. dc += bc->prev_dc[blk_pos - bc->prev_dc_stride];
  439. }
  440. } else if (bx) {
  441. dc += bc->prev_dc[bx - 1];
  442. }
  443. bc->prev_dc[blk_pos] = dc;
  444. block[0] = dc * bc->qmat[0];
  445. while (pos < 64) {
  446. val = rac_get_model256_sym(c, &bc->ac_model);
  447. if (!val)
  448. return 0;
  449. if (val == 0xF0) {
  450. pos += 16;
  451. continue;
  452. }
  453. skip = val >> 4;
  454. val = val & 0xF;
  455. if (!val)
  456. return -1;
  457. pos += skip;
  458. if (pos >= 64)
  459. return -1;
  460. sign = rac_get_model2_sym(c, &bc->sign_model);
  461. if (val > 1) {
  462. val--;
  463. val = (1 << val) + rac_get_bits(c, val);
  464. }
  465. if (!sign)
  466. val = -val;
  467. zz_pos = ff_zigzag_direct[pos];
  468. block[zz_pos] = val * bc->qmat[zz_pos];
  469. pos++;
  470. }
  471. return pos == 64 ? 0 : -1;
  472. }
  473. static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc,
  474. uint8_t *dst, int stride, int block_size,
  475. int *block, int mb_x, int mb_y)
  476. {
  477. int i, j;
  478. int bx, by;
  479. int nblocks = block_size >> 3;
  480. bx = mb_x * nblocks;
  481. by = mb_y * nblocks;
  482. for (j = 0; j < nblocks; j++) {
  483. for (i = 0; i < nblocks; i++) {
  484. if (decode_dct(c, bc, block, bx + i, by + j)) {
  485. c->got_error = 1;
  486. return;
  487. }
  488. ff_mss34_dct_put(dst + i * 8, stride, block);
  489. }
  490. dst += 8 * stride;
  491. }
  492. }
  493. static void decode_haar_block(RangeCoder *c, HaarBlockCoder *hc,
  494. uint8_t *dst, int stride, int block_size,
  495. int *block)
  496. {
  497. const int hsize = block_size >> 1;
  498. int A, B, C, D, t1, t2, t3, t4;
  499. int i, j;
  500. for (j = 0; j < block_size; j++) {
  501. for (i = 0; i < block_size; i++) {
  502. if (i < hsize && j < hsize)
  503. block[i] = rac_get_model256_sym(c, &hc->coef_model);
  504. else
  505. block[i] = decode_coeff(c, &hc->coef_hi_model);
  506. block[i] *= hc->scale;
  507. }
  508. block += block_size;
  509. }
  510. block -= block_size * block_size;
  511. for (j = 0; j < hsize; j++) {
  512. for (i = 0; i < hsize; i++) {
  513. A = block[i];
  514. B = block[i + hsize];
  515. C = block[i + hsize * block_size];
  516. D = block[i + hsize * block_size + hsize];
  517. t1 = A - B;
  518. t2 = C - D;
  519. t3 = A + B;
  520. t4 = C + D;
  521. dst[i * 2] = av_clip_uint8(t1 - t2);
  522. dst[i * 2 + stride] = av_clip_uint8(t1 + t2);
  523. dst[i * 2 + 1] = av_clip_uint8(t3 - t4);
  524. dst[i * 2 + 1 + stride] = av_clip_uint8(t3 + t4);
  525. }
  526. block += block_size;
  527. dst += stride * 2;
  528. }
  529. }
  530. static void reset_coders(MSS3Context *ctx, int quality)
  531. {
  532. int i, j;
  533. for (i = 0; i < 3; i++) {
  534. ctx->btype[i].last_type = SKIP_BLOCK;
  535. for (j = 0; j < 5; j++)
  536. model_reset(&ctx->btype[i].bt_model[j]);
  537. ctx->fill_coder[i].fill_val = 0;
  538. model_reset(&ctx->fill_coder[i].coef_model);
  539. model256_reset(&ctx->image_coder[i].esc_model);
  540. model256_reset(&ctx->image_coder[i].vec_entry_model);
  541. model_reset(&ctx->image_coder[i].vec_size_model);
  542. for (j = 0; j < 125; j++)
  543. model_reset(&ctx->image_coder[i].vq_model[j]);
  544. if (ctx->dct_coder[i].quality != quality) {
  545. ctx->dct_coder[i].quality = quality;
  546. ff_mss34_gen_quant_mat(ctx->dct_coder[i].qmat, quality, !i);
  547. }
  548. memset(ctx->dct_coder[i].prev_dc, 0,
  549. sizeof(*ctx->dct_coder[i].prev_dc) *
  550. ctx->dct_coder[i].prev_dc_stride *
  551. ctx->dct_coder[i].prev_dc_height);
  552. model_reset(&ctx->dct_coder[i].dc_model);
  553. model2_reset(&ctx->dct_coder[i].sign_model);
  554. model256_reset(&ctx->dct_coder[i].ac_model);
  555. if (ctx->haar_coder[i].quality != quality) {
  556. ctx->haar_coder[i].quality = quality;
  557. ctx->haar_coder[i].scale = 17 - 7 * quality / 50;
  558. }
  559. model_reset(&ctx->haar_coder[i].coef_hi_model);
  560. model256_reset(&ctx->haar_coder[i].coef_model);
  561. }
  562. }
  563. static av_cold void init_coders(MSS3Context *ctx)
  564. {
  565. int i, j;
  566. for (i = 0; i < 3; i++) {
  567. for (j = 0; j < 5; j++)
  568. model_init(&ctx->btype[i].bt_model[j], 5);
  569. model_init(&ctx->fill_coder[i].coef_model, 12);
  570. model256_init(&ctx->image_coder[i].esc_model);
  571. model256_init(&ctx->image_coder[i].vec_entry_model);
  572. model_init(&ctx->image_coder[i].vec_size_model, 3);
  573. for (j = 0; j < 125; j++)
  574. model_init(&ctx->image_coder[i].vq_model[j], 5);
  575. model_init(&ctx->dct_coder[i].dc_model, 12);
  576. model256_init(&ctx->dct_coder[i].ac_model);
  577. model_init(&ctx->haar_coder[i].coef_hi_model, 12);
  578. model256_init(&ctx->haar_coder[i].coef_model);
  579. }
  580. }
  581. static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  582. AVPacket *avpkt)
  583. {
  584. const uint8_t *buf = avpkt->data;
  585. int buf_size = avpkt->size;
  586. MSS3Context *c = avctx->priv_data;
  587. RangeCoder *acoder = &c->coder;
  588. GetByteContext gb;
  589. uint8_t *dst[3];
  590. int dec_width, dec_height, dec_x, dec_y, quality, keyframe;
  591. int x, y, i, mb_width, mb_height, blk_size, btype;
  592. int ret;
  593. if (buf_size < HEADER_SIZE) {
  594. av_log(avctx, AV_LOG_ERROR,
  595. "Frame should have at least %d bytes, got %d instead\n",
  596. HEADER_SIZE, buf_size);
  597. return AVERROR_INVALIDDATA;
  598. }
  599. bytestream2_init(&gb, buf, buf_size);
  600. keyframe = bytestream2_get_be32(&gb);
  601. if (keyframe & ~0x301) {
  602. av_log(avctx, AV_LOG_ERROR, "Invalid frame type %X\n", keyframe);
  603. return AVERROR_INVALIDDATA;
  604. }
  605. keyframe = !(keyframe & 1);
  606. bytestream2_skip(&gb, 6);
  607. dec_x = bytestream2_get_be16(&gb);
  608. dec_y = bytestream2_get_be16(&gb);
  609. dec_width = bytestream2_get_be16(&gb);
  610. dec_height = bytestream2_get_be16(&gb);
  611. if (dec_x + dec_width > avctx->width ||
  612. dec_y + dec_height > avctx->height ||
  613. (dec_width | dec_height) & 0xF) {
  614. av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d +%d,%d\n",
  615. dec_width, dec_height, dec_x, dec_y);
  616. return AVERROR_INVALIDDATA;
  617. }
  618. bytestream2_skip(&gb, 4);
  619. quality = bytestream2_get_byte(&gb);
  620. if (quality < 1 || quality > 100) {
  621. av_log(avctx, AV_LOG_ERROR, "Invalid quality setting %d\n", quality);
  622. return AVERROR_INVALIDDATA;
  623. }
  624. bytestream2_skip(&gb, 4);
  625. if (keyframe && !bytestream2_get_bytes_left(&gb)) {
  626. av_log(avctx, AV_LOG_ERROR, "Keyframe without data found\n");
  627. return AVERROR_INVALIDDATA;
  628. }
  629. if (!keyframe && c->got_error)
  630. return buf_size;
  631. c->got_error = 0;
  632. if ((ret = ff_reget_buffer(avctx, c->pic)) < 0)
  633. return ret;
  634. c->pic->key_frame = keyframe;
  635. c->pic->pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  636. if (!bytestream2_get_bytes_left(&gb)) {
  637. if ((ret = av_frame_ref(data, c->pic)) < 0)
  638. return ret;
  639. *got_frame = 1;
  640. return buf_size;
  641. }
  642. reset_coders(c, quality);
  643. rac_init(acoder, buf + HEADER_SIZE, buf_size - HEADER_SIZE);
  644. mb_width = dec_width >> 4;
  645. mb_height = dec_height >> 4;
  646. dst[0] = c->pic->data[0] + dec_x + dec_y * c->pic->linesize[0];
  647. dst[1] = c->pic->data[1] + dec_x / 2 + (dec_y / 2) * c->pic->linesize[1];
  648. dst[2] = c->pic->data[2] + dec_x / 2 + (dec_y / 2) * c->pic->linesize[2];
  649. for (y = 0; y < mb_height; y++) {
  650. for (x = 0; x < mb_width; x++) {
  651. for (i = 0; i < 3; i++) {
  652. blk_size = 8 << !i;
  653. btype = decode_block_type(acoder, c->btype + i);
  654. switch (btype) {
  655. case FILL_BLOCK:
  656. decode_fill_block(acoder, c->fill_coder + i,
  657. dst[i] + x * blk_size,
  658. c->pic->linesize[i], blk_size);
  659. break;
  660. case IMAGE_BLOCK:
  661. decode_image_block(acoder, c->image_coder + i,
  662. dst[i] + x * blk_size,
  663. c->pic->linesize[i], blk_size);
  664. break;
  665. case DCT_BLOCK:
  666. decode_dct_block(acoder, c->dct_coder + i,
  667. dst[i] + x * blk_size,
  668. c->pic->linesize[i], blk_size,
  669. c->dctblock, x, y);
  670. break;
  671. case HAAR_BLOCK:
  672. decode_haar_block(acoder, c->haar_coder + i,
  673. dst[i] + x * blk_size,
  674. c->pic->linesize[i], blk_size,
  675. c->hblock);
  676. break;
  677. }
  678. if (c->got_error || acoder->got_error) {
  679. av_log(avctx, AV_LOG_ERROR, "Error decoding block %d,%d\n",
  680. x, y);
  681. c->got_error = 1;
  682. return AVERROR_INVALIDDATA;
  683. }
  684. }
  685. }
  686. dst[0] += c->pic->linesize[0] * 16;
  687. dst[1] += c->pic->linesize[1] * 8;
  688. dst[2] += c->pic->linesize[2] * 8;
  689. }
  690. if ((ret = av_frame_ref(data, c->pic)) < 0)
  691. return ret;
  692. *got_frame = 1;
  693. return buf_size;
  694. }
  695. static av_cold int mss3_decode_end(AVCodecContext *avctx)
  696. {
  697. MSS3Context * const c = avctx->priv_data;
  698. int i;
  699. av_frame_free(&c->pic);
  700. for (i = 0; i < 3; i++)
  701. av_freep(&c->dct_coder[i].prev_dc);
  702. return 0;
  703. }
  704. static av_cold int mss3_decode_init(AVCodecContext *avctx)
  705. {
  706. MSS3Context * const c = avctx->priv_data;
  707. int i;
  708. c->avctx = avctx;
  709. if ((avctx->width & 0xF) || (avctx->height & 0xF)) {
  710. av_log(avctx, AV_LOG_ERROR,
  711. "Image dimensions should be a multiple of 16.\n");
  712. return AVERROR_INVALIDDATA;
  713. }
  714. c->got_error = 0;
  715. for (i = 0; i < 3; i++) {
  716. int b_width = avctx->width >> (2 + !!i);
  717. int b_height = avctx->height >> (2 + !!i);
  718. c->dct_coder[i].prev_dc_stride = b_width;
  719. c->dct_coder[i].prev_dc_height = b_height;
  720. c->dct_coder[i].prev_dc = av_malloc(sizeof(*c->dct_coder[i].prev_dc) *
  721. b_width * b_height);
  722. if (!c->dct_coder[i].prev_dc) {
  723. av_log(avctx, AV_LOG_ERROR, "Cannot allocate buffer\n");
  724. av_frame_free(&c->pic);
  725. while (i >= 0) {
  726. av_freep(&c->dct_coder[i].prev_dc);
  727. i--;
  728. }
  729. return AVERROR(ENOMEM);
  730. }
  731. }
  732. c->pic = av_frame_alloc();
  733. if (!c->pic) {
  734. mss3_decode_end(avctx);
  735. return AVERROR(ENOMEM);
  736. }
  737. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  738. init_coders(c);
  739. return 0;
  740. }
  741. AVCodec ff_msa1_decoder = {
  742. .name = "msa1",
  743. .long_name = NULL_IF_CONFIG_SMALL("MS ATC Screen"),
  744. .type = AVMEDIA_TYPE_VIDEO,
  745. .id = AV_CODEC_ID_MSA1,
  746. .priv_data_size = sizeof(MSS3Context),
  747. .init = mss3_decode_init,
  748. .close = mss3_decode_end,
  749. .decode = mss3_decode_frame,
  750. .capabilities = AV_CODEC_CAP_DR1,
  751. };