You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

695 lines
21KB

  1. /*
  2. * Copyright (c) 2012 Konstantin Shishkov
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Common functions for Microsoft Screen 1 and 2
  23. */
  24. #include <inttypes.h>
  25. #include "libavutil/intfloat.h"
  26. #include "libavutil/intreadwrite.h"
  27. #include "avcodec.h"
  28. #include "mss12.h"
  29. enum SplitMode {
  30. SPLIT_VERT = 0,
  31. SPLIT_HOR,
  32. SPLIT_NONE
  33. };
  34. static const int sec_order_sizes[4] = { 1, 7, 6, 1 };
  35. enum ContextDirection {
  36. TOP_LEFT = 0,
  37. TOP,
  38. TOP_RIGHT,
  39. LEFT
  40. };
  41. static int model_calc_threshold(Model *m)
  42. {
  43. int thr;
  44. thr = 2 * m->weights[m->num_syms] - 1;
  45. thr = ((thr >> 1) + 4 * m->cum_prob[0]) / thr;
  46. return FFMIN(thr, 0x3FFF);
  47. }
  48. static void model_reset(Model *m)
  49. {
  50. int i;
  51. for (i = 0; i <= m->num_syms; i++) {
  52. m->weights[i] = 1;
  53. m->cum_prob[i] = m->num_syms - i;
  54. }
  55. m->weights[0] = 0;
  56. for (i = 0; i < m->num_syms; i++)
  57. m->idx2sym[i + 1] = i;
  58. }
  59. static av_cold void model_init(Model *m, int num_syms, int thr_weight)
  60. {
  61. m->num_syms = num_syms;
  62. m->thr_weight = thr_weight;
  63. m->threshold = num_syms * thr_weight;
  64. }
  65. static void model_rescale_weights(Model *m)
  66. {
  67. int i;
  68. int cum_prob;
  69. if (m->thr_weight == THRESH_ADAPTIVE)
  70. m->threshold = model_calc_threshold(m);
  71. while (m->cum_prob[0] > m->threshold) {
  72. cum_prob = 0;
  73. for (i = m->num_syms; i >= 0; i--) {
  74. m->cum_prob[i] = cum_prob;
  75. m->weights[i] = (m->weights[i] + 1) >> 1;
  76. cum_prob += m->weights[i];
  77. }
  78. }
  79. }
  80. void ff_mss12_model_update(Model *m, int val)
  81. {
  82. int i;
  83. if (m->weights[val] == m->weights[val - 1]) {
  84. for (i = val; m->weights[i - 1] == m->weights[val]; i--);
  85. if (i != val) {
  86. int sym1, sym2;
  87. sym1 = m->idx2sym[val];
  88. sym2 = m->idx2sym[i];
  89. m->idx2sym[val] = sym2;
  90. m->idx2sym[i] = sym1;
  91. val = i;
  92. }
  93. }
  94. m->weights[val]++;
  95. for (i = val - 1; i >= 0; i--)
  96. m->cum_prob[i]++;
  97. model_rescale_weights(m);
  98. }
  99. static void pixctx_reset(PixContext *ctx)
  100. {
  101. int i, j;
  102. if (!ctx->special_initial_cache)
  103. for (i = 0; i < ctx->cache_size; i++)
  104. ctx->cache[i] = i;
  105. else {
  106. ctx->cache[0] = 1;
  107. ctx->cache[1] = 2;
  108. ctx->cache[2] = 4;
  109. }
  110. model_reset(&ctx->cache_model);
  111. model_reset(&ctx->full_model);
  112. for (i = 0; i < 15; i++)
  113. for (j = 0; j < 4; j++)
  114. model_reset(&ctx->sec_models[i][j]);
  115. }
  116. static av_cold void pixctx_init(PixContext *ctx, int cache_size,
  117. int full_model_syms, int special_initial_cache)
  118. {
  119. int i, j, k, idx;
  120. ctx->cache_size = cache_size + 4;
  121. ctx->num_syms = cache_size;
  122. ctx->special_initial_cache = special_initial_cache;
  123. model_init(&ctx->cache_model, ctx->num_syms + 1, THRESH_LOW);
  124. model_init(&ctx->full_model, full_model_syms, THRESH_HIGH);
  125. for (i = 0, idx = 0; i < 4; i++)
  126. for (j = 0; j < sec_order_sizes[i]; j++, idx++)
  127. for (k = 0; k < 4; k++)
  128. model_init(&ctx->sec_models[idx][k], 2 + i,
  129. i ? THRESH_LOW : THRESH_ADAPTIVE);
  130. }
  131. static av_always_inline int decode_pixel(ArithCoder *acoder, PixContext *pctx,
  132. uint8_t *ngb, int num_ngb, int any_ngb)
  133. {
  134. int i, val, pix;
  135. if (acoder->overread > MAX_OVERREAD)
  136. return AVERROR_INVALIDDATA;
  137. val = acoder->get_model_sym(acoder, &pctx->cache_model);
  138. if (val < pctx->num_syms) {
  139. if (any_ngb) {
  140. int idx, j;
  141. idx = 0;
  142. for (i = 0; i < pctx->cache_size; i++) {
  143. for (j = 0; j < num_ngb; j++)
  144. if (pctx->cache[i] == ngb[j])
  145. break;
  146. if (j == num_ngb) {
  147. if (idx == val)
  148. break;
  149. idx++;
  150. }
  151. }
  152. val = FFMIN(i, pctx->cache_size - 1);
  153. }
  154. pix = pctx->cache[val];
  155. } else {
  156. pix = acoder->get_model_sym(acoder, &pctx->full_model);
  157. for (i = 0; i < pctx->cache_size - 1; i++)
  158. if (pctx->cache[i] == pix)
  159. break;
  160. val = i;
  161. }
  162. if (val) {
  163. for (i = val; i > 0; i--)
  164. pctx->cache[i] = pctx->cache[i - 1];
  165. pctx->cache[0] = pix;
  166. }
  167. return pix;
  168. }
  169. static int decode_pixel_in_context(ArithCoder *acoder, PixContext *pctx,
  170. uint8_t *src, ptrdiff_t stride, int x, int y,
  171. int has_right)
  172. {
  173. uint8_t neighbours[4];
  174. uint8_t ref_pix[4];
  175. int nlen;
  176. int layer = 0, sub;
  177. int pix;
  178. int i, j;
  179. if (!y) {
  180. memset(neighbours, src[-1], 4);
  181. } else {
  182. neighbours[TOP] = src[-stride];
  183. if (!x) {
  184. neighbours[TOP_LEFT] = neighbours[LEFT] = neighbours[TOP];
  185. } else {
  186. neighbours[TOP_LEFT] = src[-stride - 1];
  187. neighbours[ LEFT] = src[-1];
  188. }
  189. if (has_right)
  190. neighbours[TOP_RIGHT] = src[-stride + 1];
  191. else
  192. neighbours[TOP_RIGHT] = neighbours[TOP];
  193. }
  194. sub = 0;
  195. if (x >= 2 && src[-2] == neighbours[LEFT])
  196. sub = 1;
  197. if (y >= 2 && src[-2 * stride] == neighbours[TOP])
  198. sub |= 2;
  199. nlen = 1;
  200. ref_pix[0] = neighbours[0];
  201. for (i = 1; i < 4; i++) {
  202. for (j = 0; j < nlen; j++)
  203. if (ref_pix[j] == neighbours[i])
  204. break;
  205. if (j == nlen)
  206. ref_pix[nlen++] = neighbours[i];
  207. }
  208. switch (nlen) {
  209. case 1:
  210. layer = 0;
  211. break;
  212. case 2:
  213. if (neighbours[TOP] == neighbours[TOP_LEFT]) {
  214. if (neighbours[TOP_RIGHT] == neighbours[TOP_LEFT])
  215. layer = 1;
  216. else if (neighbours[LEFT] == neighbours[TOP_LEFT])
  217. layer = 2;
  218. else
  219. layer = 3;
  220. } else if (neighbours[TOP_RIGHT] == neighbours[TOP_LEFT]) {
  221. if (neighbours[LEFT] == neighbours[TOP_LEFT])
  222. layer = 4;
  223. else
  224. layer = 5;
  225. } else if (neighbours[LEFT] == neighbours[TOP_LEFT]) {
  226. layer = 6;
  227. } else {
  228. layer = 7;
  229. }
  230. break;
  231. case 3:
  232. if (neighbours[TOP] == neighbours[TOP_LEFT])
  233. layer = 8;
  234. else if (neighbours[TOP_RIGHT] == neighbours[TOP_LEFT])
  235. layer = 9;
  236. else if (neighbours[LEFT] == neighbours[TOP_LEFT])
  237. layer = 10;
  238. else if (neighbours[TOP_RIGHT] == neighbours[TOP])
  239. layer = 11;
  240. else if (neighbours[TOP] == neighbours[LEFT])
  241. layer = 12;
  242. else
  243. layer = 13;
  244. break;
  245. case 4:
  246. layer = 14;
  247. break;
  248. }
  249. pix = acoder->get_model_sym(acoder,
  250. &pctx->sec_models[layer][sub]);
  251. if (pix < nlen)
  252. return ref_pix[pix];
  253. else
  254. return decode_pixel(acoder, pctx, ref_pix, nlen, 1);
  255. }
  256. static int decode_region(ArithCoder *acoder, uint8_t *dst, uint8_t *rgb_pic,
  257. int x, int y, int width, int height, ptrdiff_t stride,
  258. ptrdiff_t rgb_stride, PixContext *pctx,
  259. const uint32_t *pal)
  260. {
  261. int i, j, p;
  262. uint8_t *rgb_dst = rgb_pic + x * 3 + y * rgb_stride;
  263. dst += x + y * stride;
  264. for (j = 0; j < height; j++) {
  265. for (i = 0; i < width; i++) {
  266. if (!i && !j)
  267. p = decode_pixel(acoder, pctx, NULL, 0, 0);
  268. else
  269. p = decode_pixel_in_context(acoder, pctx, dst + i, stride,
  270. i, j, width - i - 1);
  271. if (p < 0)
  272. return p;
  273. dst[i] = p;
  274. if (rgb_pic)
  275. AV_WB24(rgb_dst + i * 3, pal[p]);
  276. }
  277. dst += stride;
  278. rgb_dst += rgb_stride;
  279. }
  280. return 0;
  281. }
  282. static void copy_rectangles(MSS12Context const *c,
  283. int x, int y, int width, int height)
  284. {
  285. int j;
  286. if (c->last_rgb_pic)
  287. for (j = y; j < y + height; j++) {
  288. memcpy(c->rgb_pic + j * c->rgb_stride + x * 3,
  289. c->last_rgb_pic + j * c->rgb_stride + x * 3,
  290. width * 3);
  291. memcpy(c->pal_pic + j * c->pal_stride + x,
  292. c->last_pal_pic + j * c->pal_stride + x,
  293. width);
  294. }
  295. }
  296. static int motion_compensation(MSS12Context const *c,
  297. int x, int y, int width, int height)
  298. {
  299. if (x + c->mvX < 0 || x + c->mvX + width > c->avctx->width ||
  300. y + c->mvY < 0 || y + c->mvY + height > c->avctx->height ||
  301. !c->rgb_pic)
  302. return -1;
  303. else {
  304. uint8_t *dst = c->pal_pic + x + y * c->pal_stride;
  305. uint8_t *rgb_dst = c->rgb_pic + x * 3 + y * c->rgb_stride;
  306. uint8_t *src;
  307. uint8_t *rgb_src;
  308. int j;
  309. x += c->mvX;
  310. y += c->mvY;
  311. if (c->last_rgb_pic) {
  312. src = c->last_pal_pic + x + y * c->pal_stride;
  313. rgb_src = c->last_rgb_pic + x * 3 + y * c->rgb_stride;
  314. } else {
  315. src = c->pal_pic + x + y * c->pal_stride;
  316. rgb_src = c->rgb_pic + x * 3 + y * c->rgb_stride;
  317. }
  318. for (j = 0; j < height; j++) {
  319. memmove(dst, src, width);
  320. memmove(rgb_dst, rgb_src, width * 3);
  321. dst += c->pal_stride;
  322. src += c->pal_stride;
  323. rgb_dst += c->rgb_stride;
  324. rgb_src += c->rgb_stride;
  325. }
  326. }
  327. return 0;
  328. }
  329. static int decode_region_masked(MSS12Context const *c, ArithCoder *acoder,
  330. uint8_t *dst, ptrdiff_t stride, uint8_t *mask,
  331. ptrdiff_t mask_stride, int x, int y,
  332. int width, int height,
  333. PixContext *pctx)
  334. {
  335. int i, j, p;
  336. uint8_t *rgb_dst = c->rgb_pic + x * 3 + y * c->rgb_stride;
  337. dst += x + y * stride;
  338. mask += x + y * mask_stride;
  339. for (j = 0; j < height; j++) {
  340. for (i = 0; i < width; i++) {
  341. if (c->avctx->err_recognition & AV_EF_EXPLODE &&
  342. ( c->rgb_pic && mask[i] != 0x01 && mask[i] != 0x02 && mask[i] != 0x04 ||
  343. !c->rgb_pic && mask[i] != 0x80 && mask[i] != 0xFF))
  344. return -1;
  345. if (mask[i] == 0x02) {
  346. copy_rectangles(c, x + i, y + j, 1, 1);
  347. } else if (mask[i] == 0x04) {
  348. if (motion_compensation(c, x + i, y + j, 1, 1))
  349. return -1;
  350. } else if (mask[i] != 0x80) {
  351. if (!i && !j)
  352. p = decode_pixel(acoder, pctx, NULL, 0, 0);
  353. else
  354. p = decode_pixel_in_context(acoder, pctx, dst + i, stride,
  355. i, j, width - i - 1);
  356. if (p < 0)
  357. return p;
  358. dst[i] = p;
  359. if (c->rgb_pic)
  360. AV_WB24(rgb_dst + i * 3, c->pal[p]);
  361. }
  362. }
  363. dst += stride;
  364. mask += mask_stride;
  365. rgb_dst += c->rgb_stride;
  366. }
  367. return 0;
  368. }
  369. static av_cold void slicecontext_init(SliceContext *sc,
  370. int version, int full_model_syms)
  371. {
  372. model_init(&sc->intra_region, 2, THRESH_ADAPTIVE);
  373. model_init(&sc->inter_region, 2, THRESH_ADAPTIVE);
  374. model_init(&sc->split_mode, 3, THRESH_HIGH);
  375. model_init(&sc->edge_mode, 2, THRESH_HIGH);
  376. model_init(&sc->pivot, 3, THRESH_LOW);
  377. pixctx_init(&sc->intra_pix_ctx, 8, full_model_syms, 0);
  378. pixctx_init(&sc->inter_pix_ctx, version ? 3 : 2,
  379. full_model_syms, version ? 1 : 0);
  380. }
  381. void ff_mss12_slicecontext_reset(SliceContext *sc)
  382. {
  383. model_reset(&sc->intra_region);
  384. model_reset(&sc->inter_region);
  385. model_reset(&sc->split_mode);
  386. model_reset(&sc->edge_mode);
  387. model_reset(&sc->pivot);
  388. pixctx_reset(&sc->intra_pix_ctx);
  389. pixctx_reset(&sc->inter_pix_ctx);
  390. }
  391. static int decode_pivot(SliceContext *sc, ArithCoder *acoder, int base)
  392. {
  393. int val, inv;
  394. inv = acoder->get_model_sym(acoder, &sc->edge_mode);
  395. val = acoder->get_model_sym(acoder, &sc->pivot) + 1;
  396. if (val > 2) {
  397. if ((base + 1) / 2 - 2 <= 0)
  398. return -1;
  399. val = acoder->get_number(acoder, (base + 1) / 2 - 2) + 3;
  400. }
  401. if ((unsigned)val >= base)
  402. return -1;
  403. return inv ? base - val : val;
  404. }
  405. static int decode_region_intra(SliceContext *sc, ArithCoder *acoder,
  406. int x, int y, int width, int height)
  407. {
  408. MSS12Context const *c = sc->c;
  409. int mode;
  410. mode = acoder->get_model_sym(acoder, &sc->intra_region);
  411. if (!mode) {
  412. int i, j, pix, rgb_pix;
  413. ptrdiff_t stride = c->pal_stride;
  414. ptrdiff_t rgb_stride = c->rgb_stride;
  415. uint8_t *dst = c->pal_pic + x + y * stride;
  416. uint8_t *rgb_dst = c->rgb_pic + x * 3 + y * rgb_stride;
  417. pix = decode_pixel(acoder, &sc->intra_pix_ctx, NULL, 0, 0);
  418. if (pix < 0)
  419. return pix;
  420. rgb_pix = c->pal[pix];
  421. for (i = 0; i < height; i++, dst += stride, rgb_dst += rgb_stride) {
  422. memset(dst, pix, width);
  423. if (c->rgb_pic)
  424. for (j = 0; j < width * 3; j += 3)
  425. AV_WB24(rgb_dst + j, rgb_pix);
  426. }
  427. } else {
  428. return decode_region(acoder, c->pal_pic, c->rgb_pic,
  429. x, y, width, height, c->pal_stride, c->rgb_stride,
  430. &sc->intra_pix_ctx, &c->pal[0]);
  431. }
  432. return 0;
  433. }
  434. static int decode_region_inter(SliceContext *sc, ArithCoder *acoder,
  435. int x, int y, int width, int height)
  436. {
  437. MSS12Context const *c = sc->c;
  438. int mode;
  439. mode = acoder->get_model_sym(acoder, &sc->inter_region);
  440. if (!mode) {
  441. mode = decode_pixel(acoder, &sc->inter_pix_ctx, NULL, 0, 0);
  442. if (mode < 0)
  443. return mode;
  444. if (c->avctx->err_recognition & AV_EF_EXPLODE &&
  445. ( c->rgb_pic && mode != 0x01 && mode != 0x02 && mode != 0x04 ||
  446. !c->rgb_pic && mode != 0x80 && mode != 0xFF))
  447. return -1;
  448. if (mode == 0x02)
  449. copy_rectangles(c, x, y, width, height);
  450. else if (mode == 0x04)
  451. return motion_compensation(c, x, y, width, height);
  452. else if (mode != 0x80)
  453. return decode_region_intra(sc, acoder, x, y, width, height);
  454. } else {
  455. if (decode_region(acoder, c->mask, NULL,
  456. x, y, width, height, c->mask_stride, 0,
  457. &sc->inter_pix_ctx, &c->pal[0]) < 0)
  458. return -1;
  459. return decode_region_masked(c, acoder, c->pal_pic,
  460. c->pal_stride, c->mask,
  461. c->mask_stride,
  462. x, y, width, height,
  463. &sc->intra_pix_ctx);
  464. }
  465. return 0;
  466. }
  467. int ff_mss12_decode_rect(SliceContext *sc, ArithCoder *acoder,
  468. int x, int y, int width, int height)
  469. {
  470. int mode, pivot;
  471. if (acoder->overread > MAX_OVERREAD)
  472. return AVERROR_INVALIDDATA;
  473. mode = acoder->get_model_sym(acoder, &sc->split_mode);
  474. switch (mode) {
  475. case SPLIT_VERT:
  476. if ((pivot = decode_pivot(sc, acoder, height)) < 1)
  477. return -1;
  478. if (ff_mss12_decode_rect(sc, acoder, x, y, width, pivot))
  479. return -1;
  480. if (ff_mss12_decode_rect(sc, acoder, x, y + pivot, width, height - pivot))
  481. return -1;
  482. break;
  483. case SPLIT_HOR:
  484. if ((pivot = decode_pivot(sc, acoder, width)) < 1)
  485. return -1;
  486. if (ff_mss12_decode_rect(sc, acoder, x, y, pivot, height))
  487. return -1;
  488. if (ff_mss12_decode_rect(sc, acoder, x + pivot, y, width - pivot, height))
  489. return -1;
  490. break;
  491. case SPLIT_NONE:
  492. if (sc->c->keyframe)
  493. return decode_region_intra(sc, acoder, x, y, width, height);
  494. else
  495. return decode_region_inter(sc, acoder, x, y, width, height);
  496. default:
  497. return -1;
  498. }
  499. return 0;
  500. }
  501. av_cold int ff_mss12_decode_init(MSS12Context *c, int version,
  502. SliceContext* sc1, SliceContext *sc2)
  503. {
  504. AVCodecContext *avctx = c->avctx;
  505. int i;
  506. if (avctx->extradata_size < 52 + 256 * 3) {
  507. av_log(avctx, AV_LOG_ERROR, "Insufficient extradata size %d\n",
  508. avctx->extradata_size);
  509. return AVERROR_INVALIDDATA;
  510. }
  511. if (AV_RB32(avctx->extradata) < avctx->extradata_size) {
  512. av_log(avctx, AV_LOG_ERROR,
  513. "Insufficient extradata size: expected %"PRIu32" got %d\n",
  514. AV_RB32(avctx->extradata),
  515. avctx->extradata_size);
  516. return AVERROR_INVALIDDATA;
  517. }
  518. avctx->coded_width = FFMAX(AV_RB32(avctx->extradata + 20), avctx->width);
  519. avctx->coded_height = FFMAX(AV_RB32(avctx->extradata + 24), avctx->height);
  520. if (avctx->coded_width > 4096 || avctx->coded_height > 4096) {
  521. av_log(avctx, AV_LOG_ERROR, "Frame dimensions %dx%d too large",
  522. avctx->coded_width, avctx->coded_height);
  523. return AVERROR_INVALIDDATA;
  524. }
  525. if (avctx->coded_width < 1 || avctx->coded_height < 1) {
  526. av_log(avctx, AV_LOG_ERROR, "Frame dimensions %dx%d too small",
  527. avctx->coded_width, avctx->coded_height);
  528. return AVERROR_INVALIDDATA;
  529. }
  530. av_log(avctx, AV_LOG_DEBUG, "Encoder version %"PRIu32".%"PRIu32"\n",
  531. AV_RB32(avctx->extradata + 4), AV_RB32(avctx->extradata + 8));
  532. if (version != AV_RB32(avctx->extradata + 4) > 1) {
  533. av_log(avctx, AV_LOG_ERROR,
  534. "Header version doesn't match codec tag\n");
  535. return -1;
  536. }
  537. c->free_colours = AV_RB32(avctx->extradata + 48);
  538. if ((unsigned)c->free_colours > 256) {
  539. av_log(avctx, AV_LOG_ERROR,
  540. "Incorrect number of changeable palette entries: %d\n",
  541. c->free_colours);
  542. return AVERROR_INVALIDDATA;
  543. }
  544. av_log(avctx, AV_LOG_DEBUG, "%d free colour(s)\n", c->free_colours);
  545. av_log(avctx, AV_LOG_DEBUG, "Display dimensions %"PRIu32"x%"PRIu32"\n",
  546. AV_RB32(avctx->extradata + 12), AV_RB32(avctx->extradata + 16));
  547. av_log(avctx, AV_LOG_DEBUG, "Coded dimensions %dx%d\n",
  548. avctx->coded_width, avctx->coded_height);
  549. av_log(avctx, AV_LOG_DEBUG, "%g frames per second\n",
  550. av_int2float(AV_RB32(avctx->extradata + 28)));
  551. av_log(avctx, AV_LOG_DEBUG, "Bitrate %"PRIu32" bps\n",
  552. AV_RB32(avctx->extradata + 32));
  553. av_log(avctx, AV_LOG_DEBUG, "Max. lead time %g ms\n",
  554. av_int2float(AV_RB32(avctx->extradata + 36)));
  555. av_log(avctx, AV_LOG_DEBUG, "Max. lag time %g ms\n",
  556. av_int2float(AV_RB32(avctx->extradata + 40)));
  557. av_log(avctx, AV_LOG_DEBUG, "Max. seek time %g ms\n",
  558. av_int2float(AV_RB32(avctx->extradata + 44)));
  559. if (version) {
  560. if (avctx->extradata_size < 60 + 256 * 3) {
  561. av_log(avctx, AV_LOG_ERROR,
  562. "Insufficient extradata size %d for v2\n",
  563. avctx->extradata_size);
  564. return AVERROR_INVALIDDATA;
  565. }
  566. c->slice_split = AV_RB32(avctx->extradata + 52);
  567. av_log(avctx, AV_LOG_DEBUG, "Slice split %d\n", c->slice_split);
  568. c->full_model_syms = AV_RB32(avctx->extradata + 56);
  569. if (c->full_model_syms < 2 || c->full_model_syms > 256) {
  570. av_log(avctx, AV_LOG_ERROR,
  571. "Incorrect number of used colours %d\n",
  572. c->full_model_syms);
  573. return AVERROR_INVALIDDATA;
  574. }
  575. av_log(avctx, AV_LOG_DEBUG, "Used colours %d\n",
  576. c->full_model_syms);
  577. } else {
  578. c->slice_split = 0;
  579. c->full_model_syms = 256;
  580. }
  581. for (i = 0; i < 256; i++)
  582. c->pal[i] = 0xFFU << 24 | AV_RB24(avctx->extradata + 52 +
  583. (version ? 8 : 0) + i * 3);
  584. c->mask_stride = FFALIGN(avctx->width, 16);
  585. c->mask = av_malloc_array(c->mask_stride, avctx->height);
  586. if (!c->mask) {
  587. av_log(avctx, AV_LOG_ERROR, "Cannot allocate mask plane\n");
  588. return AVERROR(ENOMEM);
  589. }
  590. sc1->c = c;
  591. slicecontext_init(sc1, version, c->full_model_syms);
  592. if (c->slice_split) {
  593. sc2->c = c;
  594. slicecontext_init(sc2, version, c->full_model_syms);
  595. }
  596. c->corrupted = 1;
  597. return 0;
  598. }
  599. av_cold int ff_mss12_decode_end(MSS12Context *c)
  600. {
  601. av_freep(&c->mask);
  602. return 0;
  603. }