You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

870 lines
23KB

  1. /*
  2. * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder
  3. * Copyright (c) 2012 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Microsoft Screen 3 (aka Microsoft ATC Screen) decoder
  24. */
  25. #include "avcodec.h"
  26. #include "bytestream.h"
  27. #include "dsputil.h"
  28. #include "mss34dsp.h"
  29. #define HEADER_SIZE 27
  30. #define MODEL2_SCALE 13
  31. #define MODEL_SCALE 15
  32. #define MODEL256_SEC_SCALE 9
  33. typedef struct Model2 {
  34. int upd_val, till_rescale;
  35. unsigned zero_freq, zero_weight;
  36. unsigned total_freq, total_weight;
  37. } Model2;
  38. typedef struct Model {
  39. int weights[16], freqs[16];
  40. int num_syms;
  41. int tot_weight;
  42. int upd_val, max_upd_val, till_rescale;
  43. } Model;
  44. typedef struct Model256 {
  45. int weights[256], freqs[256];
  46. int tot_weight;
  47. int secondary[68];
  48. int sec_size;
  49. int upd_val, max_upd_val, till_rescale;
  50. } Model256;
  51. #define RAC_BOTTOM 0x01000000
  52. typedef struct RangeCoder {
  53. const uint8_t *src, *src_end;
  54. uint32_t range, low;
  55. int got_error;
  56. } RangeCoder;
  57. enum BlockType {
  58. FILL_BLOCK = 0,
  59. IMAGE_BLOCK,
  60. DCT_BLOCK,
  61. HAAR_BLOCK,
  62. SKIP_BLOCK
  63. };
  64. typedef struct BlockTypeContext {
  65. int last_type;
  66. Model bt_model[5];
  67. } BlockTypeContext;
  68. typedef struct FillBlockCoder {
  69. int fill_val;
  70. Model coef_model;
  71. } FillBlockCoder;
  72. typedef struct ImageBlockCoder {
  73. Model256 esc_model, vec_entry_model;
  74. Model vec_size_model;
  75. Model vq_model[125];
  76. } ImageBlockCoder;
  77. typedef struct DCTBlockCoder {
  78. int *prev_dc;
  79. int prev_dc_stride;
  80. int prev_dc_height;
  81. int quality;
  82. uint16_t qmat[64];
  83. Model dc_model;
  84. Model2 sign_model;
  85. Model256 ac_model;
  86. } DCTBlockCoder;
  87. typedef struct HaarBlockCoder {
  88. int quality, scale;
  89. Model256 coef_model;
  90. Model coef_hi_model;
  91. } HaarBlockCoder;
  92. typedef struct MSS3Context {
  93. AVCodecContext *avctx;
  94. AVFrame pic;
  95. int got_error;
  96. RangeCoder coder;
  97. BlockTypeContext btype[3];
  98. FillBlockCoder fill_coder[3];
  99. ImageBlockCoder image_coder[3];
  100. DCTBlockCoder dct_coder[3];
  101. HaarBlockCoder haar_coder[3];
  102. int dctblock[64];
  103. int hblock[16 * 16];
  104. } MSS3Context;
  105. static void model2_reset(Model2 *m)
  106. {
  107. m->zero_weight = 1;
  108. m->total_weight = 2;
  109. m->zero_freq = 0x1000;
  110. m->total_freq = 0x2000;
  111. m->upd_val = 4;
  112. m->till_rescale = 4;
  113. }
  114. static void model2_update(Model2 *m, int bit)
  115. {
  116. unsigned scale;
  117. if (!bit)
  118. m->zero_weight++;
  119. m->till_rescale--;
  120. if (m->till_rescale)
  121. return;
  122. m->total_weight += m->upd_val;
  123. if (m->total_weight > 0x2000) {
  124. m->total_weight = (m->total_weight + 1) >> 1;
  125. m->zero_weight = (m->zero_weight + 1) >> 1;
  126. if (m->total_weight == m->zero_weight)
  127. m->total_weight = m->zero_weight + 1;
  128. }
  129. m->upd_val = m->upd_val * 5 >> 2;
  130. if (m->upd_val > 64)
  131. m->upd_val = 64;
  132. scale = 0x80000000u / m->total_weight;
  133. m->zero_freq = m->zero_weight * scale >> 18;
  134. m->total_freq = m->total_weight * scale >> 18;
  135. m->till_rescale = m->upd_val;
  136. }
  137. static void model_update(Model *m, int val)
  138. {
  139. int i, sum = 0;
  140. unsigned scale;
  141. m->weights[val]++;
  142. m->till_rescale--;
  143. if (m->till_rescale)
  144. return;
  145. m->tot_weight += m->upd_val;
  146. if (m->tot_weight > 0x8000) {
  147. m->tot_weight = 0;
  148. for (i = 0; i < m->num_syms; i++) {
  149. m->weights[i] = (m->weights[i] + 1) >> 1;
  150. m->tot_weight += m->weights[i];
  151. }
  152. }
  153. scale = 0x80000000u / m->tot_weight;
  154. for (i = 0; i < m->num_syms; i++) {
  155. m->freqs[i] = sum * scale >> 16;
  156. sum += m->weights[i];
  157. }
  158. m->upd_val = m->upd_val * 5 >> 2;
  159. if (m->upd_val > m->max_upd_val)
  160. m->upd_val = m->max_upd_val;
  161. m->till_rescale = m->upd_val;
  162. }
  163. static void model_reset(Model *m)
  164. {
  165. int i;
  166. m->tot_weight = 0;
  167. for (i = 0; i < m->num_syms - 1; i++)
  168. m->weights[i] = 1;
  169. m->weights[m->num_syms - 1] = 0;
  170. m->upd_val = m->num_syms;
  171. m->till_rescale = 1;
  172. model_update(m, m->num_syms - 1);
  173. m->till_rescale =
  174. m->upd_val = (m->num_syms + 6) >> 1;
  175. }
  176. static av_cold void model_init(Model *m, int num_syms)
  177. {
  178. m->num_syms = num_syms;
  179. m->max_upd_val = 8 * num_syms + 48;
  180. model_reset(m);
  181. }
  182. static void model256_update(Model256 *m, int val)
  183. {
  184. int i, sum = 0;
  185. unsigned scale;
  186. int send, sidx = 1;
  187. m->weights[val]++;
  188. m->till_rescale--;
  189. if (m->till_rescale)
  190. return;
  191. m->tot_weight += m->upd_val;
  192. if (m->tot_weight > 0x8000) {
  193. m->tot_weight = 0;
  194. for (i = 0; i < 256; i++) {
  195. m->weights[i] = (m->weights[i] + 1) >> 1;
  196. m->tot_weight += m->weights[i];
  197. }
  198. }
  199. scale = 0x80000000u / m->tot_weight;
  200. m->secondary[0] = 0;
  201. for (i = 0; i < 256; i++) {
  202. m->freqs[i] = sum * scale >> 16;
  203. sum += m->weights[i];
  204. send = m->freqs[i] >> MODEL256_SEC_SCALE;
  205. while (sidx <= send)
  206. m->secondary[sidx++] = i - 1;
  207. }
  208. while (sidx < m->sec_size)
  209. m->secondary[sidx++] = 255;
  210. m->upd_val = m->upd_val * 5 >> 2;
  211. if (m->upd_val > m->max_upd_val)
  212. m->upd_val = m->max_upd_val;
  213. m->till_rescale = m->upd_val;
  214. }
  215. static void model256_reset(Model256 *m)
  216. {
  217. int i;
  218. for (i = 0; i < 255; i++)
  219. m->weights[i] = 1;
  220. m->weights[255] = 0;
  221. m->tot_weight = 0;
  222. m->upd_val = 256;
  223. m->till_rescale = 1;
  224. model256_update(m, 255);
  225. m->till_rescale =
  226. m->upd_val = (256 + 6) >> 1;
  227. }
  228. static av_cold void model256_init(Model256 *m)
  229. {
  230. m->max_upd_val = 8 * 256 + 48;
  231. m->sec_size = (1 << 6) + 2;
  232. model256_reset(m);
  233. }
  234. static void rac_init(RangeCoder *c, const uint8_t *src, int size)
  235. {
  236. int i;
  237. c->src = src;
  238. c->src_end = src + size;
  239. c->low = 0;
  240. for (i = 0; i < FFMIN(size, 4); i++)
  241. c->low = (c->low << 8) | *c->src++;
  242. c->range = 0xFFFFFFFF;
  243. c->got_error = 0;
  244. }
  245. static void rac_normalise(RangeCoder *c)
  246. {
  247. for (;;) {
  248. c->range <<= 8;
  249. c->low <<= 8;
  250. if (c->src < c->src_end) {
  251. c->low |= *c->src++;
  252. } else if (!c->low) {
  253. c->got_error = 1;
  254. return;
  255. }
  256. if (c->range >= RAC_BOTTOM)
  257. return;
  258. }
  259. }
  260. static int rac_get_bit(RangeCoder *c)
  261. {
  262. int bit;
  263. c->range >>= 1;
  264. bit = (c->range <= c->low);
  265. if (bit)
  266. c->low -= c->range;
  267. if (c->range < RAC_BOTTOM)
  268. rac_normalise(c);
  269. return bit;
  270. }
  271. static int rac_get_bits(RangeCoder *c, int nbits)
  272. {
  273. int val;
  274. c->range >>= nbits;
  275. val = c->low / c->range;
  276. c->low -= c->range * val;
  277. if (c->range < RAC_BOTTOM)
  278. rac_normalise(c);
  279. return val;
  280. }
  281. static int rac_get_model2_sym(RangeCoder *c, Model2 *m)
  282. {
  283. int bit, helper;
  284. helper = m->zero_freq * (c->range >> MODEL2_SCALE);
  285. bit = (c->low >= helper);
  286. if (bit) {
  287. c->low -= helper;
  288. c->range -= helper;
  289. } else {
  290. c->range = helper;
  291. }
  292. if (c->range < RAC_BOTTOM)
  293. rac_normalise(c);
  294. model2_update(m, bit);
  295. return bit;
  296. }
  297. static int rac_get_model_sym(RangeCoder *c, Model *m)
  298. {
  299. int prob, prob2, helper, val;
  300. int end, end2;
  301. prob = 0;
  302. prob2 = c->range;
  303. c->range >>= MODEL_SCALE;
  304. val = 0;
  305. end = m->num_syms >> 1;
  306. end2 = m->num_syms;
  307. do {
  308. helper = m->freqs[end] * c->range;
  309. if (helper <= c->low) {
  310. val = end;
  311. prob = helper;
  312. } else {
  313. end2 = end;
  314. prob2 = helper;
  315. }
  316. end = (end2 + val) >> 1;
  317. } while (end != val);
  318. c->low -= prob;
  319. c->range = prob2 - prob;
  320. if (c->range < RAC_BOTTOM)
  321. rac_normalise(c);
  322. model_update(m, val);
  323. return val;
  324. }
  325. static int rac_get_model256_sym(RangeCoder *c, Model256 *m)
  326. {
  327. int prob, prob2, helper, val;
  328. int start, end;
  329. int ssym;
  330. prob2 = c->range;
  331. c->range >>= MODEL_SCALE;
  332. helper = c->low / c->range;
  333. ssym = helper >> MODEL256_SEC_SCALE;
  334. val = m->secondary[ssym];
  335. end = start = m->secondary[ssym + 1] + 1;
  336. while (end > val + 1) {
  337. ssym = (end + val) >> 1;
  338. if (m->freqs[ssym] <= helper) {
  339. end = start;
  340. val = ssym;
  341. } else {
  342. end = (end + val) >> 1;
  343. start = ssym;
  344. }
  345. }
  346. prob = m->freqs[val] * c->range;
  347. if (val != 255)
  348. prob2 = m->freqs[val + 1] * c->range;
  349. c->low -= prob;
  350. c->range = prob2 - prob;
  351. if (c->range < RAC_BOTTOM)
  352. rac_normalise(c);
  353. model256_update(m, val);
  354. return val;
  355. }
  356. static int decode_block_type(RangeCoder *c, BlockTypeContext *bt)
  357. {
  358. bt->last_type = rac_get_model_sym(c, &bt->bt_model[bt->last_type]);
  359. return bt->last_type;
  360. }
  361. static int decode_coeff(RangeCoder *c, Model *m)
  362. {
  363. int val, sign;
  364. val = rac_get_model_sym(c, m);
  365. if (val) {
  366. sign = rac_get_bit(c);
  367. if (val > 1) {
  368. val--;
  369. val = (1 << val) + rac_get_bits(c, val);
  370. }
  371. if (!sign)
  372. val = -val;
  373. }
  374. return val;
  375. }
  376. static void decode_fill_block(RangeCoder *c, FillBlockCoder *fc,
  377. uint8_t *dst, int stride, int block_size)
  378. {
  379. int i;
  380. fc->fill_val += decode_coeff(c, &fc->coef_model);
  381. for (i = 0; i < block_size; i++, dst += stride)
  382. memset(dst, fc->fill_val, block_size);
  383. }
  384. static void decode_image_block(RangeCoder *c, ImageBlockCoder *ic,
  385. uint8_t *dst, int stride, int block_size)
  386. {
  387. int i, j;
  388. int vec_size;
  389. int vec[4];
  390. int prev_line[16];
  391. int A, B, C;
  392. vec_size = rac_get_model_sym(c, &ic->vec_size_model) + 2;
  393. for (i = 0; i < vec_size; i++)
  394. vec[i] = rac_get_model256_sym(c, &ic->vec_entry_model);
  395. for (; i < 4; i++)
  396. vec[i] = 0;
  397. memset(prev_line, 0, sizeof(prev_line));
  398. for (j = 0; j < block_size; j++) {
  399. A = 0;
  400. B = 0;
  401. for (i = 0; i < block_size; i++) {
  402. C = B;
  403. B = prev_line[i];
  404. A = rac_get_model_sym(c, &ic->vq_model[A + B * 5 + C * 25]);
  405. prev_line[i] = A;
  406. if (A < 4)
  407. dst[i] = vec[A];
  408. else
  409. dst[i] = rac_get_model256_sym(c, &ic->esc_model);
  410. }
  411. dst += stride;
  412. }
  413. }
  414. static int decode_dct(RangeCoder *c, DCTBlockCoder *bc, int *block,
  415. int bx, int by)
  416. {
  417. int skip, val, sign, pos = 1, zz_pos, dc;
  418. int blk_pos = bx + by * bc->prev_dc_stride;
  419. memset(block, 0, sizeof(*block) * 64);
  420. dc = decode_coeff(c, &bc->dc_model);
  421. if (by) {
  422. if (bx) {
  423. int l, tl, t;
  424. l = bc->prev_dc[blk_pos - 1];
  425. tl = bc->prev_dc[blk_pos - 1 - bc->prev_dc_stride];
  426. t = bc->prev_dc[blk_pos - bc->prev_dc_stride];
  427. if (FFABS(t - tl) <= FFABS(l - tl))
  428. dc += l;
  429. else
  430. dc += t;
  431. } else {
  432. dc += bc->prev_dc[blk_pos - bc->prev_dc_stride];
  433. }
  434. } else if (bx) {
  435. dc += bc->prev_dc[bx - 1];
  436. }
  437. bc->prev_dc[blk_pos] = dc;
  438. block[0] = dc * bc->qmat[0];
  439. while (pos < 64) {
  440. val = rac_get_model256_sym(c, &bc->ac_model);
  441. if (!val)
  442. return 0;
  443. if (val == 0xF0) {
  444. pos += 16;
  445. continue;
  446. }
  447. skip = val >> 4;
  448. val = val & 0xF;
  449. if (!val)
  450. return -1;
  451. pos += skip;
  452. if (pos >= 64)
  453. return -1;
  454. sign = rac_get_model2_sym(c, &bc->sign_model);
  455. if (val > 1) {
  456. val--;
  457. val = (1 << val) + rac_get_bits(c, val);
  458. }
  459. if (!sign)
  460. val = -val;
  461. zz_pos = ff_zigzag_direct[pos];
  462. block[zz_pos] = val * bc->qmat[zz_pos];
  463. pos++;
  464. }
  465. return pos == 64 ? 0 : -1;
  466. }
  467. static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc,
  468. uint8_t *dst, int stride, int block_size,
  469. int *block, int mb_x, int mb_y)
  470. {
  471. int i, j;
  472. int bx, by;
  473. int nblocks = block_size >> 3;
  474. bx = mb_x * nblocks;
  475. by = mb_y * nblocks;
  476. for (j = 0; j < nblocks; j++) {
  477. for (i = 0; i < nblocks; i++) {
  478. if (decode_dct(c, bc, block, bx + i, by + j)) {
  479. c->got_error = 1;
  480. return;
  481. }
  482. ff_mss34_dct_put(dst + i * 8, stride, block);
  483. }
  484. dst += 8 * stride;
  485. }
  486. }
  487. static void decode_haar_block(RangeCoder *c, HaarBlockCoder *hc,
  488. uint8_t *dst, int stride, int block_size,
  489. int *block)
  490. {
  491. const int hsize = block_size >> 1;
  492. int A, B, C, D, t1, t2, t3, t4;
  493. int i, j;
  494. for (j = 0; j < block_size; j++) {
  495. for (i = 0; i < block_size; i++) {
  496. if (i < hsize && j < hsize)
  497. block[i] = rac_get_model256_sym(c, &hc->coef_model);
  498. else
  499. block[i] = decode_coeff(c, &hc->coef_hi_model);
  500. block[i] *= hc->scale;
  501. }
  502. block += block_size;
  503. }
  504. block -= block_size * block_size;
  505. for (j = 0; j < hsize; j++) {
  506. for (i = 0; i < hsize; i++) {
  507. A = block[i];
  508. B = block[i + hsize];
  509. C = block[i + hsize * block_size];
  510. D = block[i + hsize * block_size + hsize];
  511. t1 = A - B;
  512. t2 = C - D;
  513. t3 = A + B;
  514. t4 = C + D;
  515. dst[i * 2] = av_clip_uint8(t1 - t2);
  516. dst[i * 2 + stride] = av_clip_uint8(t1 + t2);
  517. dst[i * 2 + 1] = av_clip_uint8(t3 - t4);
  518. dst[i * 2 + 1 + stride] = av_clip_uint8(t3 + t4);
  519. }
  520. block += block_size;
  521. dst += stride * 2;
  522. }
  523. }
  524. static void reset_coders(MSS3Context *ctx, int quality)
  525. {
  526. int i, j;
  527. for (i = 0; i < 3; i++) {
  528. ctx->btype[i].last_type = SKIP_BLOCK;
  529. for (j = 0; j < 5; j++)
  530. model_reset(&ctx->btype[i].bt_model[j]);
  531. ctx->fill_coder[i].fill_val = 0;
  532. model_reset(&ctx->fill_coder[i].coef_model);
  533. model256_reset(&ctx->image_coder[i].esc_model);
  534. model256_reset(&ctx->image_coder[i].vec_entry_model);
  535. model_reset(&ctx->image_coder[i].vec_size_model);
  536. for (j = 0; j < 125; j++)
  537. model_reset(&ctx->image_coder[i].vq_model[j]);
  538. if (ctx->dct_coder[i].quality != quality) {
  539. ctx->dct_coder[i].quality = quality;
  540. ff_mss34_gen_quant_mat(ctx->dct_coder[i].qmat, quality, !i);
  541. }
  542. memset(ctx->dct_coder[i].prev_dc, 0,
  543. sizeof(*ctx->dct_coder[i].prev_dc) *
  544. ctx->dct_coder[i].prev_dc_stride *
  545. ctx->dct_coder[i].prev_dc_height);
  546. model_reset(&ctx->dct_coder[i].dc_model);
  547. model2_reset(&ctx->dct_coder[i].sign_model);
  548. model256_reset(&ctx->dct_coder[i].ac_model);
  549. if (ctx->haar_coder[i].quality != quality) {
  550. ctx->haar_coder[i].quality = quality;
  551. ctx->haar_coder[i].scale = 17 - 7 * quality / 50;
  552. }
  553. model_reset(&ctx->haar_coder[i].coef_hi_model);
  554. model256_reset(&ctx->haar_coder[i].coef_model);
  555. }
  556. }
  557. static av_cold void init_coders(MSS3Context *ctx)
  558. {
  559. int i, j;
  560. for (i = 0; i < 3; i++) {
  561. for (j = 0; j < 5; j++)
  562. model_init(&ctx->btype[i].bt_model[j], 5);
  563. model_init(&ctx->fill_coder[i].coef_model, 12);
  564. model256_init(&ctx->image_coder[i].esc_model);
  565. model256_init(&ctx->image_coder[i].vec_entry_model);
  566. model_init(&ctx->image_coder[i].vec_size_model, 3);
  567. for (j = 0; j < 125; j++)
  568. model_init(&ctx->image_coder[i].vq_model[j], 5);
  569. model_init(&ctx->dct_coder[i].dc_model, 12);
  570. model256_init(&ctx->dct_coder[i].ac_model);
  571. model_init(&ctx->haar_coder[i].coef_hi_model, 12);
  572. model256_init(&ctx->haar_coder[i].coef_model);
  573. }
  574. }
  575. static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  576. AVPacket *avpkt)
  577. {
  578. const uint8_t *buf = avpkt->data;
  579. int buf_size = avpkt->size;
  580. MSS3Context *c = avctx->priv_data;
  581. RangeCoder *acoder = &c->coder;
  582. GetByteContext gb;
  583. uint8_t *dst[3];
  584. int dec_width, dec_height, dec_x, dec_y, quality, keyframe;
  585. int x, y, i, mb_width, mb_height, blk_size, btype;
  586. int ret;
  587. if (buf_size < HEADER_SIZE) {
  588. av_log(avctx, AV_LOG_ERROR,
  589. "Frame should have at least %d bytes, got %d instead\n",
  590. HEADER_SIZE, buf_size);
  591. return AVERROR_INVALIDDATA;
  592. }
  593. bytestream2_init(&gb, buf, buf_size);
  594. keyframe = bytestream2_get_be32(&gb);
  595. if (keyframe & ~0x301) {
  596. av_log(avctx, AV_LOG_ERROR, "Invalid frame type %X\n", keyframe);
  597. return AVERROR_INVALIDDATA;
  598. }
  599. keyframe = !(keyframe & 1);
  600. bytestream2_skip(&gb, 6);
  601. dec_x = bytestream2_get_be16(&gb);
  602. dec_y = bytestream2_get_be16(&gb);
  603. dec_width = bytestream2_get_be16(&gb);
  604. dec_height = bytestream2_get_be16(&gb);
  605. if (dec_x + dec_width > avctx->width ||
  606. dec_y + dec_height > avctx->height ||
  607. (dec_width | dec_height) & 0xF) {
  608. av_log(avctx, AV_LOG_ERROR, "Invalid frame dimensions %dx%d +%d,%d\n",
  609. dec_width, dec_height, dec_x, dec_y);
  610. return AVERROR_INVALIDDATA;
  611. }
  612. bytestream2_skip(&gb, 4);
  613. quality = bytestream2_get_byte(&gb);
  614. if (quality < 1 || quality > 100) {
  615. av_log(avctx, AV_LOG_ERROR, "Invalid quality setting %d\n", quality);
  616. return AVERROR_INVALIDDATA;
  617. }
  618. bytestream2_skip(&gb, 4);
  619. if (keyframe && !bytestream2_get_bytes_left(&gb)) {
  620. av_log(avctx, AV_LOG_ERROR, "Keyframe without data found\n");
  621. return AVERROR_INVALIDDATA;
  622. }
  623. if (!keyframe && c->got_error)
  624. return buf_size;
  625. c->got_error = 0;
  626. c->pic.reference = 3;
  627. c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
  628. FF_BUFFER_HINTS_REUSABLE;
  629. if ((ret = avctx->reget_buffer(avctx, &c->pic)) < 0) {
  630. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  631. return ret;
  632. }
  633. c->pic.key_frame = keyframe;
  634. c->pic.pict_type = keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  635. if (!bytestream2_get_bytes_left(&gb)) {
  636. *data_size = sizeof(AVFrame);
  637. *(AVFrame*)data = c->pic;
  638. return buf_size;
  639. }
  640. reset_coders(c, quality);
  641. rac_init(acoder, buf + HEADER_SIZE, buf_size - HEADER_SIZE);
  642. mb_width = dec_width >> 4;
  643. mb_height = dec_height >> 4;
  644. dst[0] = c->pic.data[0] + dec_x + dec_y * c->pic.linesize[0];
  645. dst[1] = c->pic.data[1] + dec_x / 2 + (dec_y / 2) * c->pic.linesize[1];
  646. dst[2] = c->pic.data[2] + dec_x / 2 + (dec_y / 2) * c->pic.linesize[2];
  647. for (y = 0; y < mb_height; y++) {
  648. for (x = 0; x < mb_width; x++) {
  649. for (i = 0; i < 3; i++) {
  650. blk_size = 8 << !i;
  651. btype = decode_block_type(acoder, c->btype + i);
  652. switch (btype) {
  653. case FILL_BLOCK:
  654. decode_fill_block(acoder, c->fill_coder + i,
  655. dst[i] + x * blk_size,
  656. c->pic.linesize[i], blk_size);
  657. break;
  658. case IMAGE_BLOCK:
  659. decode_image_block(acoder, c->image_coder + i,
  660. dst[i] + x * blk_size,
  661. c->pic.linesize[i], blk_size);
  662. break;
  663. case DCT_BLOCK:
  664. decode_dct_block(acoder, c->dct_coder + i,
  665. dst[i] + x * blk_size,
  666. c->pic.linesize[i], blk_size,
  667. c->dctblock, x, y);
  668. break;
  669. case HAAR_BLOCK:
  670. decode_haar_block(acoder, c->haar_coder + i,
  671. dst[i] + x * blk_size,
  672. c->pic.linesize[i], blk_size,
  673. c->hblock);
  674. break;
  675. }
  676. if (c->got_error || acoder->got_error) {
  677. av_log(avctx, AV_LOG_ERROR, "Error decoding block %d,%d\n",
  678. x, y);
  679. c->got_error = 1;
  680. return AVERROR_INVALIDDATA;
  681. }
  682. }
  683. }
  684. dst[0] += c->pic.linesize[0] * 16;
  685. dst[1] += c->pic.linesize[1] * 8;
  686. dst[2] += c->pic.linesize[2] * 8;
  687. }
  688. *data_size = sizeof(AVFrame);
  689. *(AVFrame*)data = c->pic;
  690. return buf_size;
  691. }
  692. static av_cold int mss3_decode_init(AVCodecContext *avctx)
  693. {
  694. MSS3Context * const c = avctx->priv_data;
  695. int i;
  696. c->avctx = avctx;
  697. if ((avctx->width & 0xF) || (avctx->height & 0xF)) {
  698. av_log(avctx, AV_LOG_ERROR,
  699. "Image dimensions should be a multiple of 16.\n");
  700. return AVERROR_INVALIDDATA;
  701. }
  702. c->got_error = 0;
  703. for (i = 0; i < 3; i++) {
  704. int b_width = avctx->width >> (2 + !!i);
  705. int b_height = avctx->height >> (2 + !!i);
  706. c->dct_coder[i].prev_dc_stride = b_width;
  707. c->dct_coder[i].prev_dc_height = b_height;
  708. c->dct_coder[i].prev_dc = av_malloc(sizeof(*c->dct_coder[i].prev_dc) *
  709. b_width * b_height);
  710. if (!c->dct_coder[i].prev_dc) {
  711. av_log(avctx, AV_LOG_ERROR, "Cannot allocate buffer\n");
  712. while (i >= 0) {
  713. av_freep(&c->dct_coder[i].prev_dc);
  714. i--;
  715. }
  716. return AVERROR(ENOMEM);
  717. }
  718. }
  719. avctx->pix_fmt = PIX_FMT_YUV420P;
  720. avctx->coded_frame = &c->pic;
  721. init_coders(c);
  722. return 0;
  723. }
  724. static av_cold int mss3_decode_end(AVCodecContext *avctx)
  725. {
  726. MSS3Context * const c = avctx->priv_data;
  727. int i;
  728. if (c->pic.data[0])
  729. avctx->release_buffer(avctx, &c->pic);
  730. for (i = 0; i < 3; i++)
  731. av_freep(&c->dct_coder[i].prev_dc);
  732. return 0;
  733. }
  734. AVCodec ff_msa1_decoder = {
  735. .name = "msa1",
  736. .type = AVMEDIA_TYPE_VIDEO,
  737. .id = AV_CODEC_ID_MSA1,
  738. .priv_data_size = sizeof(MSS3Context),
  739. .init = mss3_decode_init,
  740. .close = mss3_decode_end,
  741. .decode = mss3_decode_frame,
  742. .capabilities = CODEC_CAP_DR1,
  743. .long_name = NULL_IF_CONFIG_SMALL("MS ATC Screen"),
  744. };