You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

852 lines
23KB

  1. /*
  2. * Microsoft Screen 1 (aka Windows Media Video V7 Screen) decoder
  3. * Copyright (c) 2012 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Microsoft Screen 1 (aka Windows Media Video V7 Screen) decoder
  24. */
  25. #include "libavutil/intfloat.h"
  26. #include "libavutil/intreadwrite.h"
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. enum SplitMode {
  30. SPLIT_VERT = 0,
  31. SPLIT_HOR,
  32. SPLIT_NONE
  33. };
  34. typedef struct ArithCoder {
  35. int low, high, value;
  36. GetBitContext *gb;
  37. } ArithCoder;
  38. #define MODEL_MIN_SYMS 2
  39. #define MODEL_MAX_SYMS 256
  40. #define THRESH_ADAPTIVE -1
  41. #define THRESH_LOW 15
  42. #define THRESH_HIGH 50
  43. typedef struct Model {
  44. int cum_prob[MODEL_MAX_SYMS + 1];
  45. int weights[MODEL_MAX_SYMS + 1];
  46. int idx2sym[MODEL_MAX_SYMS + 1];
  47. int sym2idx[MODEL_MAX_SYMS + 1];
  48. int num_syms;
  49. int thr_weight, threshold;
  50. } Model;
  51. static const int sec_order_sizes[4] = { 1, 7, 6, 1 };
  52. enum ContextDirection {
  53. TOP_LEFT = 0,
  54. TOP,
  55. TOP_RIGHT,
  56. LEFT
  57. };
  58. typedef struct PixContext {
  59. int cache_size, num_syms;
  60. uint8_t cache[12];
  61. Model cache_model, full_model;
  62. Model sec_models[4][8][4];
  63. } PixContext;
  64. typedef struct MSS1Context {
  65. AVCodecContext *avctx;
  66. AVFrame pic;
  67. uint8_t *pic_start;
  68. int pic_stride;
  69. uint8_t *mask;
  70. int mask_linesize;
  71. uint32_t pal[256];
  72. int free_colours;
  73. Model intra_region, inter_region;
  74. Model pivot, edge_mode, split_mode;
  75. PixContext intra_pix_ctx, inter_pix_ctx;
  76. int corrupted;
  77. } MSS1Context;
  78. static void arith_init(ArithCoder *c, GetBitContext *gb)
  79. {
  80. c->low = 0;
  81. c->high = 0xFFFF;
  82. c->value = get_bits(gb, 16);
  83. c->gb = gb;
  84. }
  85. static void arith_normalise(ArithCoder *c)
  86. {
  87. for (;;) {
  88. if (c->high >= 0x8000) {
  89. if (c->low < 0x8000) {
  90. if (c->low >= 0x4000 && c->high < 0xC000) {
  91. c->value -= 0x4000;
  92. c->low -= 0x4000;
  93. c->high -= 0x4000;
  94. } else {
  95. return;
  96. }
  97. } else {
  98. c->value -= 0x8000;
  99. c->low -= 0x8000;
  100. c->high -= 0x8000;
  101. }
  102. }
  103. c->value <<= 1;
  104. c->low <<= 1;
  105. c->high <<= 1;
  106. c->high |= 1;
  107. c->value |= get_bits1(c->gb);
  108. }
  109. }
  110. static int arith_get_bit(ArithCoder *c)
  111. {
  112. int range = c->high - c->low + 1;
  113. int bit = (((c->value - c->low) << 1) + 1) / range;
  114. if (bit)
  115. c->low += range >> 1;
  116. else
  117. c->high = c->low + (range >> 1) - 1;
  118. arith_normalise(c);
  119. return bit;
  120. }
  121. static int arith_get_bits(ArithCoder *c, int bits)
  122. {
  123. int range = c->high - c->low + 1;
  124. int val = (((c->value - c->low + 1) << bits) - 1) / range;
  125. int prob = range * val;
  126. c->high = ((prob + range) >> bits) + c->low - 1;
  127. c->low += prob >> bits;
  128. arith_normalise(c);
  129. return val;
  130. }
  131. static int arith_get_number(ArithCoder *c, int mod_val)
  132. {
  133. int range = c->high - c->low + 1;
  134. int val = ((c->value - c->low + 1) * mod_val - 1) / range;
  135. int prob = range * val;
  136. c->high = (prob + range) / mod_val + c->low - 1;
  137. c->low += prob / mod_val;
  138. arith_normalise(c);
  139. return val;
  140. }
  141. static int arith_get_prob(ArithCoder *c, int *probs)
  142. {
  143. int range = c->high - c->low + 1;
  144. int val = ((c->value - c->low + 1) * probs[0] - 1) / range;
  145. int sym = 1;
  146. while (probs[sym] > val)
  147. sym++;
  148. c->high = range * probs[sym - 1] / probs[0] + c->low - 1;
  149. c->low += range * probs[sym] / probs[0];
  150. return sym;
  151. }
  152. static int model_calc_threshold(Model *m)
  153. {
  154. int thr;
  155. if (m->thr_weight == -1) {
  156. thr = 2 * m->weights[m->num_syms] - 1;
  157. thr = ((thr >> 1) + 4 * m->cum_prob[0]) / thr;
  158. } else {
  159. thr = m->num_syms * m->thr_weight;
  160. }
  161. return FFMIN(thr, 0x3FFF);
  162. }
  163. static void model_reset(Model *m)
  164. {
  165. int i;
  166. for (i = 0; i <= m->num_syms; i++) {
  167. m->weights[i] = 1;
  168. m->cum_prob[i] = m->num_syms - i;
  169. }
  170. m->weights[0] = -1;
  171. m->idx2sym[0] = -1;
  172. m->sym2idx[m->num_syms] = -1;
  173. for (i = 0; i < m->num_syms; i++) {
  174. m->sym2idx[i] = i + 1;
  175. m->idx2sym[i + 1] = i;
  176. }
  177. }
  178. static av_cold void model_init(Model *m, int num_syms, int thr_weight)
  179. {
  180. m->num_syms = num_syms;
  181. m->thr_weight = thr_weight;
  182. m->threshold = model_calc_threshold(m);
  183. model_reset(m);
  184. }
  185. static void model_rescale_weights(Model *m)
  186. {
  187. int i;
  188. int cum_prob;
  189. if (m->thr_weight == -1)
  190. m->threshold = model_calc_threshold(m);
  191. while (m->cum_prob[0] > m->threshold) {
  192. cum_prob = 0;
  193. for (i = m->num_syms; i >= 0; i--) {
  194. m->cum_prob[i] = cum_prob;
  195. m->weights[i] = (m->weights[i] + 1) >> 1;
  196. cum_prob += m->weights[i];
  197. }
  198. }
  199. }
  200. static void model_update(Model *m, int val)
  201. {
  202. int i;
  203. if (m->weights[val] == m->weights[val - 1]) {
  204. for (i = val; m->weights[i - 1] == m->weights[val]; i--);
  205. if (i != val) {
  206. int sym1, sym2;
  207. sym1 = m->idx2sym[val];
  208. sym2 = m->idx2sym[i];
  209. m->idx2sym[val] = sym2;
  210. m->idx2sym[i] = sym1;
  211. m->sym2idx[sym1] = i;
  212. m->sym2idx[sym2] = val;
  213. val = i;
  214. }
  215. }
  216. m->weights[val]++;
  217. for (i = val - 1; i >= 0; i--)
  218. m->cum_prob[i]++;
  219. model_rescale_weights(m);
  220. }
  221. static int arith_get_model_sym(ArithCoder *c, Model *m)
  222. {
  223. int idx, val;
  224. idx = arith_get_prob(c, m->cum_prob);
  225. val = m->idx2sym[idx];
  226. model_update(m, idx);
  227. arith_normalise(c);
  228. return val;
  229. }
  230. static void pixctx_reset(PixContext *ctx)
  231. {
  232. int i, j, k;
  233. for (i = 0; i < ctx->cache_size; i++)
  234. ctx->cache[i] = i;
  235. model_reset(&ctx->cache_model);
  236. model_reset(&ctx->full_model);
  237. for (i = 0; i < 4; i++)
  238. for (j = 0; j < sec_order_sizes[i]; j++)
  239. for (k = 0; k < 4; k++)
  240. model_reset(&ctx->sec_models[i][j][k]);
  241. }
  242. static av_cold void pixctx_init(PixContext *ctx, int cache_size)
  243. {
  244. int i, j, k;
  245. ctx->cache_size = cache_size + 4;
  246. ctx->num_syms = cache_size;
  247. for (i = 0; i < ctx->cache_size; i++)
  248. ctx->cache[i] = i;
  249. model_init(&ctx->cache_model, ctx->num_syms + 1, THRESH_LOW);
  250. model_init(&ctx->full_model, 256, THRESH_HIGH);
  251. for (i = 0; i < 4; i++) {
  252. for (j = 0; j < sec_order_sizes[i]; j++) {
  253. for (k = 0; k < 4; k++) {
  254. model_init(&ctx->sec_models[i][j][k], 2 + i,
  255. i ? THRESH_LOW : THRESH_ADAPTIVE);
  256. }
  257. }
  258. }
  259. }
  260. static int decode_top_left_pixel(ArithCoder *acoder, PixContext *pctx)
  261. {
  262. int i, val, pix;
  263. val = arith_get_model_sym(acoder, &pctx->cache_model);
  264. if (val < pctx->num_syms) {
  265. pix = pctx->cache[val];
  266. } else {
  267. pix = arith_get_model_sym(acoder, &pctx->full_model);
  268. for (i = 0; i < pctx->cache_size - 1; i++)
  269. if (pctx->cache[i] == pix)
  270. break;
  271. val = i;
  272. }
  273. if (val) {
  274. for (i = val; i > 0; i--)
  275. pctx->cache[i] = pctx->cache[i - 1];
  276. pctx->cache[0] = pix;
  277. }
  278. return pix;
  279. }
  280. static int decode_pixel(ArithCoder *acoder, PixContext *pctx,
  281. uint8_t *ngb, int num_ngb)
  282. {
  283. int i, val, pix;
  284. val = arith_get_model_sym(acoder, &pctx->cache_model);
  285. if (val < pctx->num_syms) {
  286. int idx, j;
  287. idx = 0;
  288. for (i = 0; i < pctx->cache_size; i++) {
  289. for (j = 0; j < num_ngb; j++)
  290. if (pctx->cache[i] == ngb[j])
  291. break;
  292. if (j == num_ngb) {
  293. if (idx == val)
  294. break;
  295. idx++;
  296. }
  297. }
  298. val = FFMIN(i, pctx->cache_size - 1);
  299. pix = pctx->cache[val];
  300. } else {
  301. pix = arith_get_model_sym(acoder, &pctx->full_model);
  302. for (i = 0; i < pctx->cache_size - 1; i++)
  303. if (pctx->cache[i] == pix)
  304. break;
  305. val = i;
  306. }
  307. if (val) {
  308. for (i = val; i > 0; i--)
  309. pctx->cache[i] = pctx->cache[i - 1];
  310. pctx->cache[0] = pix;
  311. }
  312. return pix;
  313. }
  314. static int decode_pixel_in_context(ArithCoder *acoder, PixContext *pctx,
  315. uint8_t *src, int stride, int x, int y,
  316. int has_right)
  317. {
  318. uint8_t neighbours[4];
  319. uint8_t ref_pix[4];
  320. int nlen;
  321. int layer = 0, sub;
  322. int pix;
  323. int i, j;
  324. if (!y) {
  325. memset(neighbours, src[-1], 4);
  326. } else {
  327. neighbours[TOP] = src[-stride];
  328. if (!x) {
  329. neighbours[TOP_LEFT] = neighbours[LEFT] = neighbours[TOP];
  330. } else {
  331. neighbours[TOP_LEFT] = src[-stride - 1];
  332. neighbours[ LEFT] = src[-1];
  333. }
  334. if (has_right)
  335. neighbours[TOP_RIGHT] = src[-stride + 1];
  336. else
  337. neighbours[TOP_RIGHT] = neighbours[TOP];
  338. }
  339. sub = 0;
  340. if (x >= 2 && src[-2] == neighbours[LEFT])
  341. sub = 1;
  342. if (y >= 2 && src[-2 * stride] == neighbours[TOP])
  343. sub |= 2;
  344. nlen = 1;
  345. ref_pix[0] = neighbours[0];
  346. for (i = 1; i < 4; i++) {
  347. for (j = 0; j < nlen; j++)
  348. if (ref_pix[j] == neighbours[i])
  349. break;
  350. if (j == nlen)
  351. ref_pix[nlen++] = neighbours[i];
  352. }
  353. switch (nlen) {
  354. case 1:
  355. case 4:
  356. layer = 0;
  357. break;
  358. case 2:
  359. if (neighbours[TOP] == neighbours[TOP_LEFT]) {
  360. if (neighbours[TOP_RIGHT] == neighbours[TOP_LEFT])
  361. layer = 3;
  362. else if (neighbours[LEFT] == neighbours[TOP_LEFT])
  363. layer = 2;
  364. else
  365. layer = 4;
  366. } else if (neighbours[TOP_RIGHT] == neighbours[TOP_LEFT]) {
  367. if (neighbours[LEFT] == neighbours[TOP_LEFT])
  368. layer = 1;
  369. else
  370. layer = 5;
  371. } else if (neighbours[LEFT] == neighbours[TOP_LEFT]) {
  372. layer = 6;
  373. } else {
  374. layer = 0;
  375. }
  376. break;
  377. case 3:
  378. if (neighbours[TOP] == neighbours[TOP_LEFT])
  379. layer = 0;
  380. else if (neighbours[TOP_RIGHT] == neighbours[TOP_LEFT])
  381. layer = 1;
  382. else if (neighbours[LEFT] == neighbours[TOP_LEFT])
  383. layer = 2;
  384. else if (neighbours[TOP_RIGHT] == neighbours[TOP])
  385. layer = 3;
  386. else if (neighbours[TOP] == neighbours[LEFT])
  387. layer = 4;
  388. else
  389. layer = 5;
  390. break;
  391. }
  392. pix = arith_get_model_sym(acoder, &pctx->sec_models[nlen - 1][layer][sub]);
  393. if (pix < nlen)
  394. return ref_pix[pix];
  395. else
  396. return decode_pixel(acoder, pctx, ref_pix, nlen);
  397. }
  398. static int decode_region(MSS1Context *ctx, ArithCoder *acoder, uint8_t *dst,
  399. int x, int y, int width, int height, int stride,
  400. PixContext *pctx)
  401. {
  402. int i, j;
  403. dst += x + y * stride;
  404. dst[0] = decode_top_left_pixel(acoder, pctx);
  405. for (j = 0; j < height; j++) {
  406. for (i = 0; i < width; i++) {
  407. if (!i && !j)
  408. continue;
  409. dst[i] = decode_pixel_in_context(acoder, pctx, dst + i, stride,
  410. i, j, width - i - 1);
  411. }
  412. dst += stride;
  413. }
  414. return 0;
  415. }
  416. static int decode_region_masked(MSS1Context *ctx, ArithCoder *acoder,
  417. uint8_t *dst, int stride, uint8_t *mask,
  418. int mask_stride, int x, int y,
  419. int width, int height,
  420. PixContext *pctx)
  421. {
  422. int i, j;
  423. dst += x + y * stride;
  424. mask += x + y * mask_stride;
  425. if (mask[0] == 0xFF)
  426. dst[0] = decode_top_left_pixel(acoder, pctx);
  427. for (j = 0; j < height; j++) {
  428. for (i = 0; i < width; i++) {
  429. if (!i && !j || mask[i] != 0xFF)
  430. continue;
  431. dst[i] = decode_pixel_in_context(acoder, pctx, dst + i, stride,
  432. i, j, width - i - 1);
  433. }
  434. dst += stride;
  435. mask += mask_stride;
  436. }
  437. return 0;
  438. }
  439. static av_cold void codec_init(MSS1Context *ctx)
  440. {
  441. model_init(&ctx->intra_region, 2, THRESH_ADAPTIVE);
  442. model_init(&ctx->inter_region, 2, THRESH_ADAPTIVE);
  443. model_init(&ctx->split_mode, 3, THRESH_HIGH);
  444. model_init(&ctx->edge_mode, 2, THRESH_HIGH);
  445. model_init(&ctx->pivot, 3, THRESH_LOW);
  446. pixctx_init(&ctx->intra_pix_ctx, 8);
  447. pixctx_init(&ctx->inter_pix_ctx, 2);
  448. ctx->corrupted = 1;
  449. }
  450. static void codec_reset(MSS1Context *ctx)
  451. {
  452. model_reset(&ctx->intra_region);
  453. model_reset(&ctx->inter_region);
  454. model_reset(&ctx->split_mode);
  455. model_reset(&ctx->edge_mode);
  456. model_reset(&ctx->pivot);
  457. pixctx_reset(&ctx->intra_pix_ctx);
  458. pixctx_reset(&ctx->inter_pix_ctx);
  459. ctx->corrupted = 0;
  460. }
  461. static int decode_pal(MSS1Context *ctx, ArithCoder *acoder)
  462. {
  463. int i, ncol, r, g, b;
  464. uint32_t *pal = ctx->pal + 256 - ctx->free_colours;
  465. if (!ctx->free_colours)
  466. return 0;
  467. ncol = arith_get_number(acoder, ctx->free_colours + 1);
  468. for (i = 0; i < ncol; i++) {
  469. r = arith_get_bits(acoder, 8);
  470. g = arith_get_bits(acoder, 8);
  471. b = arith_get_bits(acoder, 8);
  472. *pal++ = (0xFF << 24) | (r << 16) | (g << 8) | b;
  473. }
  474. return !!ncol;
  475. }
  476. static int decode_pivot(MSS1Context *ctx, ArithCoder *acoder, int base)
  477. {
  478. int val, inv;
  479. inv = arith_get_model_sym(acoder, &ctx->edge_mode);
  480. val = arith_get_model_sym(acoder, &ctx->pivot) + 1;
  481. if (val > 2) {
  482. if ((base + 1) / 2 - 2 <= 0) {
  483. ctx->corrupted = 1;
  484. return 0;
  485. }
  486. val = arith_get_number(acoder, (base + 1) / 2 - 2) + 3;
  487. }
  488. if ((unsigned)val >= base) {
  489. ctx->corrupted = 1;
  490. return 0;
  491. }
  492. return inv ? base - val : val;
  493. }
  494. static int decode_region_intra(MSS1Context *ctx, ArithCoder *acoder,
  495. int x, int y, int width, int height)
  496. {
  497. int mode;
  498. mode = arith_get_model_sym(acoder, &ctx->intra_region);
  499. if (!mode) {
  500. int i, pix;
  501. int stride = ctx->pic_stride;
  502. uint8_t *dst = ctx->pic_start + x + y * stride;
  503. pix = decode_top_left_pixel(acoder, &ctx->intra_pix_ctx);
  504. for (i = 0; i < height; i++, dst += stride)
  505. memset(dst, pix, width);
  506. } else {
  507. return decode_region(ctx, acoder, ctx->pic_start,
  508. x, y, width, height, ctx->pic_stride,
  509. &ctx->intra_pix_ctx);
  510. }
  511. return 0;
  512. }
  513. static int decode_intra(MSS1Context *ctx, ArithCoder *acoder,
  514. int x, int y, int width, int height)
  515. {
  516. int mode, pivot;
  517. if (ctx->corrupted)
  518. return -1;
  519. mode = arith_get_model_sym(acoder, &ctx->split_mode);
  520. switch (mode) {
  521. case SPLIT_VERT:
  522. pivot = decode_pivot(ctx, acoder, height);
  523. if (ctx->corrupted)
  524. return -1;
  525. if (decode_intra(ctx, acoder, x, y, width, pivot))
  526. return -1;
  527. if (decode_intra(ctx, acoder, x, y + pivot, width, height - pivot))
  528. return -1;
  529. break;
  530. case SPLIT_HOR:
  531. pivot = decode_pivot(ctx, acoder, width);
  532. if (ctx->corrupted)
  533. return -1;
  534. if (decode_intra(ctx, acoder, x, y, pivot, height))
  535. return -1;
  536. if (decode_intra(ctx, acoder, x + pivot, y, width - pivot, height))
  537. return -1;
  538. break;
  539. case SPLIT_NONE:
  540. return decode_region_intra(ctx, acoder, x, y, width, height);
  541. default:
  542. return -1;
  543. }
  544. return 0;
  545. }
  546. static int decode_region_inter(MSS1Context *ctx, ArithCoder *acoder,
  547. int x, int y, int width, int height)
  548. {
  549. int mode;
  550. mode = arith_get_model_sym(acoder, &ctx->inter_region);
  551. if (!mode) {
  552. mode = decode_top_left_pixel(acoder, &ctx->inter_pix_ctx);
  553. if (mode != 0xFF) {
  554. return 0;
  555. } else {
  556. return decode_region_intra(ctx, acoder, x, y, width, height);
  557. }
  558. } else {
  559. if (decode_region(ctx, acoder, ctx->mask,
  560. x, y, width, height, ctx->mask_linesize,
  561. &ctx->inter_pix_ctx) < 0)
  562. return -1;
  563. return decode_region_masked(ctx, acoder, ctx->pic_start,
  564. -ctx->pic.linesize[0], ctx->mask,
  565. ctx->mask_linesize,
  566. x, y, width, height,
  567. &ctx->intra_pix_ctx);
  568. }
  569. return 0;
  570. }
  571. static int decode_inter(MSS1Context *ctx, ArithCoder *acoder,
  572. int x, int y, int width, int height)
  573. {
  574. int mode, pivot;
  575. if (ctx->corrupted)
  576. return -1;
  577. mode = arith_get_model_sym(acoder, &ctx->split_mode);
  578. switch (mode) {
  579. case SPLIT_VERT:
  580. pivot = decode_pivot(ctx, acoder, height);
  581. if (decode_inter(ctx, acoder, x, y, width, pivot))
  582. return -1;
  583. if (decode_inter(ctx, acoder, x, y + pivot, width, height - pivot))
  584. return -1;
  585. break;
  586. case SPLIT_HOR:
  587. pivot = decode_pivot(ctx, acoder, width);
  588. if (decode_inter(ctx, acoder, x, y, pivot, height))
  589. return -1;
  590. if (decode_inter(ctx, acoder, x + pivot, y, width - pivot, height))
  591. return -1;
  592. break;
  593. case SPLIT_NONE:
  594. return decode_region_inter(ctx, acoder, x, y, width, height);
  595. default:
  596. return -1;
  597. }
  598. return 0;
  599. }
  600. static int mss1_decode_frame(AVCodecContext *avctx, void *data, int *data_size,
  601. AVPacket *avpkt)
  602. {
  603. const uint8_t *buf = avpkt->data;
  604. int buf_size = avpkt->size;
  605. MSS1Context *c = avctx->priv_data;
  606. GetBitContext gb;
  607. ArithCoder acoder;
  608. int pal_changed = 0;
  609. int ret;
  610. init_get_bits(&gb, buf, buf_size * 8);
  611. arith_init(&acoder, &gb);
  612. c->pic.reference = 3;
  613. c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
  614. FF_BUFFER_HINTS_REUSABLE;
  615. if ((ret = avctx->reget_buffer(avctx, &c->pic)) < 0) {
  616. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  617. return ret;
  618. }
  619. c->pic_start = c->pic.data[0] + c->pic.linesize[0] * (avctx->height - 1);
  620. c->pic_stride = -c->pic.linesize[0];
  621. if (!arith_get_bit(&acoder)) {
  622. codec_reset(c);
  623. pal_changed = decode_pal(c, &acoder);
  624. c->corrupted = decode_intra(c, &acoder, 0, 0,
  625. avctx->width, avctx->height);
  626. c->pic.key_frame = 1;
  627. c->pic.pict_type = AV_PICTURE_TYPE_I;
  628. } else {
  629. if (c->corrupted)
  630. return AVERROR_INVALIDDATA;
  631. c->corrupted = decode_inter(c, &acoder, 0, 0,
  632. avctx->width, avctx->height);
  633. c->pic.key_frame = 0;
  634. c->pic.pict_type = AV_PICTURE_TYPE_P;
  635. }
  636. if (c->corrupted)
  637. return AVERROR_INVALIDDATA;
  638. memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
  639. c->pic.palette_has_changed = pal_changed;
  640. *data_size = sizeof(AVFrame);
  641. *(AVFrame*)data = c->pic;
  642. /* always report that the buffer was completely consumed */
  643. return buf_size;
  644. }
  645. static av_cold int mss1_decode_init(AVCodecContext *avctx)
  646. {
  647. MSS1Context * const c = avctx->priv_data;
  648. int i;
  649. c->avctx = avctx;
  650. if (avctx->extradata_size < 52 + 256 * 3) {
  651. av_log(avctx, AV_LOG_ERROR, "Insufficient extradata size %d\n",
  652. avctx->extradata_size);
  653. return AVERROR_INVALIDDATA;
  654. }
  655. if (AV_RB32(avctx->extradata) < avctx->extradata_size) {
  656. av_log(avctx, AV_LOG_ERROR,
  657. "Insufficient extradata size: expected %d got %d\n",
  658. AV_RB32(avctx->extradata),
  659. avctx->extradata_size);
  660. return AVERROR_INVALIDDATA;
  661. }
  662. av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d\n",
  663. AV_RB32(avctx->extradata + 4), AV_RB32(avctx->extradata + 8));
  664. c->free_colours = AV_RB32(avctx->extradata + 48);
  665. if ((unsigned)c->free_colours > 256) {
  666. av_log(avctx, AV_LOG_ERROR,
  667. "Incorrect number of changeable palette entries: %d\n",
  668. c->free_colours);
  669. return AVERROR_INVALIDDATA;
  670. }
  671. av_log(avctx, AV_LOG_DEBUG, "%d free colour(s)\n", c->free_colours);
  672. avctx->coded_width = AV_RB32(avctx->extradata + 20);
  673. avctx->coded_height = AV_RB32(avctx->extradata + 24);
  674. av_log(avctx, AV_LOG_DEBUG, "Display dimensions %dx%d\n",
  675. AV_RB32(avctx->extradata + 12), AV_RB32(avctx->extradata + 16));
  676. av_log(avctx, AV_LOG_DEBUG, "Coded dimensions %dx%d\n",
  677. avctx->coded_width, avctx->coded_height);
  678. av_log(avctx, AV_LOG_DEBUG, "%g frames per second\n",
  679. av_int2float(AV_RB32(avctx->extradata + 28)));
  680. av_log(avctx, AV_LOG_DEBUG, "Bitrate %d bps\n",
  681. AV_RB32(avctx->extradata + 32));
  682. av_log(avctx, AV_LOG_DEBUG, "Max. lead time %g ms\n",
  683. av_int2float(AV_RB32(avctx->extradata + 36)));
  684. av_log(avctx, AV_LOG_DEBUG, "Max. lag time %g ms\n",
  685. av_int2float(AV_RB32(avctx->extradata + 40)));
  686. av_log(avctx, AV_LOG_DEBUG, "Max. seek time %g ms\n",
  687. av_int2float(AV_RB32(avctx->extradata + 44)));
  688. for (i = 0; i < 256; i++)
  689. c->pal[i] = 0xFF << 24 | AV_RB24(avctx->extradata + 52 + i * 3);
  690. avctx->pix_fmt = PIX_FMT_PAL8;
  691. c->mask_linesize = FFALIGN(avctx->width, 16);
  692. c->mask = av_malloc(c->mask_linesize * avctx->height);
  693. if (!c->mask) {
  694. av_log(avctx, AV_LOG_ERROR, "Cannot allocate mask plane\n");
  695. return AVERROR(ENOMEM);
  696. }
  697. avctx->coded_frame = &c->pic;
  698. codec_init(c);
  699. return 0;
  700. }
  701. static av_cold int mss1_decode_end(AVCodecContext *avctx)
  702. {
  703. MSS1Context * const c = avctx->priv_data;
  704. if (c->pic.data[0])
  705. avctx->release_buffer(avctx, &c->pic);
  706. av_freep(&c->mask);
  707. return 0;
  708. }
  709. AVCodec ff_mss1_decoder = {
  710. .name = "mss1",
  711. .type = AVMEDIA_TYPE_VIDEO,
  712. .id = AV_CODEC_ID_MSS1,
  713. .priv_data_size = sizeof(MSS1Context),
  714. .init = mss1_decode_init,
  715. .close = mss1_decode_end,
  716. .decode = mss1_decode_frame,
  717. .capabilities = CODEC_CAP_DR1,
  718. .long_name = NULL_IF_CONFIG_SMALL("MS Screen 1"),
  719. };