You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

696 lines
21KB

  1. /*
  2. * Copyright (c) 2012 Konstantin Shishkov
  3. *
  4. * This file is part of Libav.
  5. *
  6. * Libav is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * Libav is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with Libav; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. /**
  21. * @file
  22. * Common functions for Microsoft Screen 1 and 2
  23. */
  24. #include "libavutil/intfloat.h"
  25. #include "libavutil/intreadwrite.h"
  26. #include "avcodec.h"
  27. #include "mss12.h"
  28. enum SplitMode {
  29. SPLIT_VERT = 0,
  30. SPLIT_HOR,
  31. SPLIT_NONE
  32. };
  33. static const int sec_order_sizes[4] = { 1, 7, 6, 1 };
  34. enum ContextDirection {
  35. TOP_LEFT = 0,
  36. TOP,
  37. TOP_RIGHT,
  38. LEFT
  39. };
  40. static int model_calc_threshold(Model *m)
  41. {
  42. int thr;
  43. thr = 2 * m->weights[m->num_syms] - 1;
  44. thr = ((thr >> 1) + 4 * m->cum_prob[0]) / thr;
  45. return FFMIN(thr, 0x3FFF);
  46. }
  47. static void model_reset(Model *m)
  48. {
  49. int i;
  50. for (i = 0; i <= m->num_syms; i++) {
  51. m->weights[i] = 1;
  52. m->cum_prob[i] = m->num_syms - i;
  53. }
  54. m->weights[0] = 0;
  55. for (i = 0; i < m->num_syms; i++)
  56. m->idx2sym[i + 1] = i;
  57. }
  58. static av_cold void model_init(Model *m, int num_syms, int thr_weight)
  59. {
  60. m->num_syms = num_syms;
  61. m->thr_weight = thr_weight;
  62. m->threshold = num_syms * thr_weight;
  63. }
  64. static void model_rescale_weights(Model *m)
  65. {
  66. int i;
  67. int cum_prob;
  68. if (m->thr_weight == THRESH_ADAPTIVE)
  69. m->threshold = model_calc_threshold(m);
  70. while (m->cum_prob[0] > m->threshold) {
  71. cum_prob = 0;
  72. for (i = m->num_syms; i >= 0; i--) {
  73. m->cum_prob[i] = cum_prob;
  74. m->weights[i] = (m->weights[i] + 1) >> 1;
  75. cum_prob += m->weights[i];
  76. }
  77. }
  78. }
  79. void ff_mss12_model_update(Model *m, int val)
  80. {
  81. int i;
  82. if (m->weights[val] == m->weights[val - 1]) {
  83. for (i = val; m->weights[i - 1] == m->weights[val]; i--);
  84. if (i != val) {
  85. int sym1, sym2;
  86. sym1 = m->idx2sym[val];
  87. sym2 = m->idx2sym[i];
  88. m->idx2sym[val] = sym2;
  89. m->idx2sym[i] = sym1;
  90. val = i;
  91. }
  92. }
  93. m->weights[val]++;
  94. for (i = val - 1; i >= 0; i--)
  95. m->cum_prob[i]++;
  96. model_rescale_weights(m);
  97. }
  98. static void pixctx_reset(PixContext *ctx)
  99. {
  100. int i, j;
  101. if (!ctx->special_initial_cache)
  102. for (i = 0; i < ctx->cache_size; i++)
  103. ctx->cache[i] = i;
  104. else {
  105. ctx->cache[0] = 1;
  106. ctx->cache[1] = 2;
  107. ctx->cache[2] = 4;
  108. }
  109. model_reset(&ctx->cache_model);
  110. model_reset(&ctx->full_model);
  111. for (i = 0; i < 15; i++)
  112. for (j = 0; j < 4; j++)
  113. model_reset(&ctx->sec_models[i][j]);
  114. }
  115. static av_cold void pixctx_init(PixContext *ctx, int cache_size,
  116. int full_model_syms, int special_initial_cache)
  117. {
  118. int i, j, k, idx;
  119. ctx->cache_size = cache_size + 4;
  120. ctx->num_syms = cache_size;
  121. ctx->special_initial_cache = special_initial_cache;
  122. model_init(&ctx->cache_model, ctx->num_syms + 1, THRESH_LOW);
  123. model_init(&ctx->full_model, full_model_syms, THRESH_HIGH);
  124. for (i = 0, idx = 0; i < 4; i++)
  125. for (j = 0; j < sec_order_sizes[i]; j++, idx++)
  126. for (k = 0; k < 4; k++)
  127. model_init(&ctx->sec_models[idx][k], 2 + i,
  128. i ? THRESH_LOW : THRESH_ADAPTIVE);
  129. }
  130. static int decode_top_left_pixel(ArithCoder *acoder, PixContext *pctx)
  131. {
  132. int i, val, pix;
  133. val = acoder->get_model_sym(acoder, &pctx->cache_model);
  134. if (val < pctx->num_syms) {
  135. pix = pctx->cache[val];
  136. } else {
  137. pix = acoder->get_model_sym(acoder, &pctx->full_model);
  138. for (i = 0; i < pctx->cache_size - 1; i++)
  139. if (pctx->cache[i] == pix)
  140. break;
  141. val = i;
  142. }
  143. if (val) {
  144. for (i = val; i > 0; i--)
  145. pctx->cache[i] = pctx->cache[i - 1];
  146. pctx->cache[0] = pix;
  147. }
  148. return pix;
  149. }
  150. static int decode_pixel(ArithCoder *acoder, PixContext *pctx,
  151. uint8_t *ngb, int num_ngb)
  152. {
  153. int i, val, pix;
  154. val = acoder->get_model_sym(acoder, &pctx->cache_model);
  155. if (val < pctx->num_syms) {
  156. int idx, j;
  157. idx = 0;
  158. for (i = 0; i < pctx->cache_size; i++) {
  159. for (j = 0; j < num_ngb; j++)
  160. if (pctx->cache[i] == ngb[j])
  161. break;
  162. if (j == num_ngb) {
  163. if (idx == val)
  164. break;
  165. idx++;
  166. }
  167. }
  168. val = FFMIN(i, pctx->cache_size - 1);
  169. pix = pctx->cache[val];
  170. } else {
  171. pix = acoder->get_model_sym(acoder, &pctx->full_model);
  172. for (i = 0; i < pctx->cache_size - 1; i++)
  173. if (pctx->cache[i] == pix)
  174. break;
  175. val = i;
  176. }
  177. if (val) {
  178. for (i = val; i > 0; i--)
  179. pctx->cache[i] = pctx->cache[i - 1];
  180. pctx->cache[0] = pix;
  181. }
  182. return pix;
  183. }
  184. static int decode_pixel_in_context(ArithCoder *acoder, PixContext *pctx,
  185. uint8_t *src, int stride, int x, int y,
  186. int has_right)
  187. {
  188. uint8_t neighbours[4];
  189. uint8_t ref_pix[4];
  190. int nlen;
  191. int layer = 0, sub;
  192. int pix;
  193. int i, j;
  194. if (!y) {
  195. memset(neighbours, src[-1], 4);
  196. } else {
  197. neighbours[TOP] = src[-stride];
  198. if (!x) {
  199. neighbours[TOP_LEFT] = neighbours[LEFT] = neighbours[TOP];
  200. } else {
  201. neighbours[TOP_LEFT] = src[-stride - 1];
  202. neighbours[ LEFT] = src[-1];
  203. }
  204. if (has_right)
  205. neighbours[TOP_RIGHT] = src[-stride + 1];
  206. else
  207. neighbours[TOP_RIGHT] = neighbours[TOP];
  208. }
  209. sub = 0;
  210. if (x >= 2 && src[-2] == neighbours[LEFT])
  211. sub = 1;
  212. if (y >= 2 && src[-2 * stride] == neighbours[TOP])
  213. sub |= 2;
  214. nlen = 1;
  215. ref_pix[0] = neighbours[0];
  216. for (i = 1; i < 4; i++) {
  217. for (j = 0; j < nlen; j++)
  218. if (ref_pix[j] == neighbours[i])
  219. break;
  220. if (j == nlen)
  221. ref_pix[nlen++] = neighbours[i];
  222. }
  223. switch (nlen) {
  224. case 1:
  225. layer = 0;
  226. break;
  227. case 2:
  228. if (neighbours[TOP] == neighbours[TOP_LEFT]) {
  229. if (neighbours[TOP_RIGHT] == neighbours[TOP_LEFT])
  230. layer = 1;
  231. else if (neighbours[LEFT] == neighbours[TOP_LEFT])
  232. layer = 2;
  233. else
  234. layer = 3;
  235. } else if (neighbours[TOP_RIGHT] == neighbours[TOP_LEFT]) {
  236. if (neighbours[LEFT] == neighbours[TOP_LEFT])
  237. layer = 4;
  238. else
  239. layer = 5;
  240. } else if (neighbours[LEFT] == neighbours[TOP_LEFT]) {
  241. layer = 6;
  242. } else {
  243. layer = 7;
  244. }
  245. break;
  246. case 3:
  247. if (neighbours[TOP] == neighbours[TOP_LEFT])
  248. layer = 8;
  249. else if (neighbours[TOP_RIGHT] == neighbours[TOP_LEFT])
  250. layer = 9;
  251. else if (neighbours[LEFT] == neighbours[TOP_LEFT])
  252. layer = 10;
  253. else if (neighbours[TOP_RIGHT] == neighbours[TOP])
  254. layer = 11;
  255. else if (neighbours[TOP] == neighbours[LEFT])
  256. layer = 12;
  257. else
  258. layer = 13;
  259. break;
  260. case 4:
  261. layer = 14;
  262. break;
  263. }
  264. pix = acoder->get_model_sym(acoder,
  265. &pctx->sec_models[layer][sub]);
  266. if (pix < nlen)
  267. return ref_pix[pix];
  268. else
  269. return decode_pixel(acoder, pctx, ref_pix, nlen);
  270. }
  271. static int decode_region(ArithCoder *acoder, uint8_t *dst, uint8_t *rgb_pic,
  272. int x, int y, int width, int height, int stride,
  273. int rgb_stride, PixContext *pctx, const uint32_t *pal)
  274. {
  275. int i, j, p;
  276. uint8_t *rgb_dst = rgb_pic + x * 3 + y * rgb_stride;
  277. dst += x + y * stride;
  278. for (j = 0; j < height; j++) {
  279. for (i = 0; i < width; i++) {
  280. if (!i && !j)
  281. p = decode_top_left_pixel(acoder, pctx);
  282. else
  283. p = decode_pixel_in_context(acoder, pctx, dst + i, stride,
  284. i, j, width - i - 1);
  285. dst[i] = p;
  286. if (rgb_pic)
  287. AV_WB24(rgb_dst + i * 3, pal[p]);
  288. }
  289. dst += stride;
  290. rgb_dst += rgb_stride;
  291. }
  292. return 0;
  293. }
  294. static void copy_rectangles(MSS12Context const *c,
  295. int x, int y, int width, int height)
  296. {
  297. int j;
  298. if (c->last_rgb_pic)
  299. for (j = y; j < y + height; j++) {
  300. memcpy(c->rgb_pic + j * c->rgb_stride + x * 3,
  301. c->last_rgb_pic + j * c->rgb_stride + x * 3,
  302. width * 3);
  303. memcpy(c->pal_pic + j * c->pal_stride + x,
  304. c->last_pal_pic + j * c->pal_stride + x,
  305. width);
  306. }
  307. }
  308. static int motion_compensation(MSS12Context const *c,
  309. int x, int y, int width, int height)
  310. {
  311. if (x + c->mvX < 0 || x + c->mvX + width > c->avctx->width ||
  312. y + c->mvY < 0 || y + c->mvY + height > c->avctx->height ||
  313. !c->rgb_pic)
  314. return -1;
  315. else {
  316. uint8_t *dst = c->pal_pic + x + y * c->pal_stride;
  317. uint8_t *rgb_dst = c->rgb_pic + x * 3 + y * c->rgb_stride;
  318. uint8_t *src;
  319. uint8_t *rgb_src;
  320. int j;
  321. x += c->mvX;
  322. y += c->mvY;
  323. if (c->last_rgb_pic) {
  324. src = c->last_pal_pic + x + y * c->pal_stride;
  325. rgb_src = c->last_rgb_pic + x * 3 + y * c->rgb_stride;
  326. } else {
  327. src = c->pal_pic + x + y * c->pal_stride;
  328. rgb_src = c->rgb_pic + x * 3 + y * c->rgb_stride;
  329. }
  330. for (j = 0; j < height; j++) {
  331. memmove(dst, src, width);
  332. memmove(rgb_dst, rgb_src, width * 3);
  333. dst += c->pal_stride;
  334. src += c->pal_stride;
  335. rgb_dst += c->rgb_stride;
  336. rgb_src += c->rgb_stride;
  337. }
  338. }
  339. return 0;
  340. }
  341. static int decode_region_masked(MSS12Context const *c, ArithCoder *acoder,
  342. uint8_t *dst, int stride, uint8_t *mask,
  343. int mask_stride, int x, int y,
  344. int width, int height,
  345. PixContext *pctx)
  346. {
  347. int i, j, p;
  348. uint8_t *rgb_dst = c->rgb_pic + x * 3 + y * c->rgb_stride;
  349. dst += x + y * stride;
  350. mask += x + y * mask_stride;
  351. for (j = 0; j < height; j++) {
  352. for (i = 0; i < width; i++) {
  353. if (c->avctx->err_recognition & AV_EF_EXPLODE &&
  354. ( c->rgb_pic && mask[i] != 0x01 && mask[i] != 0x02 && mask[i] != 0x04 ||
  355. !c->rgb_pic && mask[i] != 0x80 && mask[i] != 0xFF))
  356. return -1;
  357. if (mask[i] == 0x02) {
  358. copy_rectangles(c, x + i, y + j, 1, 1);
  359. } else if (mask[i] == 0x04) {
  360. if (motion_compensation(c, x + i, y + j, 1, 1))
  361. return -1;
  362. } else if (mask[i] != 0x80) {
  363. if (!i && !j)
  364. p = decode_top_left_pixel(acoder, pctx);
  365. else
  366. p = decode_pixel_in_context(acoder, pctx, dst + i, stride,
  367. i, j, width - i - 1);
  368. dst[i] = p;
  369. if (c->rgb_pic)
  370. AV_WB24(rgb_dst + i * 3, c->pal[p]);
  371. }
  372. }
  373. dst += stride;
  374. mask += mask_stride;
  375. rgb_dst += c->rgb_stride;
  376. }
  377. return 0;
  378. }
  379. static av_cold void slicecontext_init(SliceContext *sc,
  380. int version, int full_model_syms)
  381. {
  382. model_init(&sc->intra_region, 2, THRESH_ADAPTIVE);
  383. model_init(&sc->inter_region, 2, THRESH_ADAPTIVE);
  384. model_init(&sc->split_mode, 3, THRESH_HIGH);
  385. model_init(&sc->edge_mode, 2, THRESH_HIGH);
  386. model_init(&sc->pivot, 3, THRESH_LOW);
  387. pixctx_init(&sc->intra_pix_ctx, 8, full_model_syms, 0);
  388. pixctx_init(&sc->inter_pix_ctx, version ? 3 : 2,
  389. full_model_syms, version ? 1 : 0);
  390. }
  391. void ff_mss12_slicecontext_reset(SliceContext *sc)
  392. {
  393. model_reset(&sc->intra_region);
  394. model_reset(&sc->inter_region);
  395. model_reset(&sc->split_mode);
  396. model_reset(&sc->edge_mode);
  397. model_reset(&sc->pivot);
  398. pixctx_reset(&sc->intra_pix_ctx);
  399. pixctx_reset(&sc->inter_pix_ctx);
  400. }
  401. static int decode_pivot(SliceContext *sc, ArithCoder *acoder, int base)
  402. {
  403. int val, inv;
  404. inv = acoder->get_model_sym(acoder, &sc->edge_mode);
  405. val = acoder->get_model_sym(acoder, &sc->pivot) + 1;
  406. if (val > 2) {
  407. if ((base + 1) / 2 - 2 <= 0)
  408. return -1;
  409. val = acoder->get_number(acoder, (base + 1) / 2 - 2) + 3;
  410. }
  411. if (val >= base)
  412. return -1;
  413. return inv ? base - val : val;
  414. }
  415. static int decode_region_intra(SliceContext *sc, ArithCoder *acoder,
  416. int x, int y, int width, int height)
  417. {
  418. MSS12Context const *c = sc->c;
  419. int mode;
  420. mode = acoder->get_model_sym(acoder, &sc->intra_region);
  421. if (!mode) {
  422. int i, j, pix, rgb_pix;
  423. int stride = c->pal_stride;
  424. int rgb_stride = c->rgb_stride;
  425. uint8_t *dst = c->pal_pic + x + y * stride;
  426. uint8_t *rgb_dst = c->rgb_pic + x * 3 + y * rgb_stride;
  427. pix = decode_top_left_pixel(acoder, &sc->intra_pix_ctx);
  428. rgb_pix = c->pal[pix];
  429. for (i = 0; i < height; i++, dst += stride, rgb_dst += rgb_stride) {
  430. memset(dst, pix, width);
  431. if (c->rgb_pic)
  432. for (j = 0; j < width * 3; j += 3)
  433. AV_WB24(rgb_dst + j, rgb_pix);
  434. }
  435. } else {
  436. return decode_region(acoder, c->pal_pic, c->rgb_pic,
  437. x, y, width, height, c->pal_stride, c->rgb_stride,
  438. &sc->intra_pix_ctx, &c->pal[0]);
  439. }
  440. return 0;
  441. }
  442. static int decode_region_inter(SliceContext *sc, ArithCoder *acoder,
  443. int x, int y, int width, int height)
  444. {
  445. MSS12Context const *c = sc->c;
  446. int mode;
  447. mode = acoder->get_model_sym(acoder, &sc->inter_region);
  448. if (!mode) {
  449. mode = decode_top_left_pixel(acoder, &sc->inter_pix_ctx);
  450. if (c->avctx->err_recognition & AV_EF_EXPLODE &&
  451. ( c->rgb_pic && mode != 0x01 && mode != 0x02 && mode != 0x04 ||
  452. !c->rgb_pic && mode != 0x80 && mode != 0xFF))
  453. return -1;
  454. if (mode == 0x02)
  455. copy_rectangles(c, x, y, width, height);
  456. else if (mode == 0x04)
  457. return motion_compensation(c, x, y, width, height);
  458. else if (mode != 0x80)
  459. return decode_region_intra(sc, acoder, x, y, width, height);
  460. } else {
  461. if (decode_region(acoder, c->mask, NULL,
  462. x, y, width, height, c->mask_stride, 0,
  463. &sc->inter_pix_ctx, &c->pal[0]) < 0)
  464. return -1;
  465. return decode_region_masked(c, acoder, c->pal_pic,
  466. c->pal_stride, c->mask,
  467. c->mask_stride,
  468. x, y, width, height,
  469. &sc->intra_pix_ctx);
  470. }
  471. return 0;
  472. }
  473. int ff_mss12_decode_rect(SliceContext *sc, ArithCoder *acoder,
  474. int x, int y, int width, int height)
  475. {
  476. int mode, pivot;
  477. mode = acoder->get_model_sym(acoder, &sc->split_mode);
  478. switch (mode) {
  479. case SPLIT_VERT:
  480. if ((pivot = decode_pivot(sc, acoder, height)) < 1)
  481. return -1;
  482. if (ff_mss12_decode_rect(sc, acoder, x, y, width, pivot))
  483. return -1;
  484. if (ff_mss12_decode_rect(sc, acoder, x, y + pivot, width, height - pivot))
  485. return -1;
  486. break;
  487. case SPLIT_HOR:
  488. if ((pivot = decode_pivot(sc, acoder, width)) < 1)
  489. return -1;
  490. if (ff_mss12_decode_rect(sc, acoder, x, y, pivot, height))
  491. return -1;
  492. if (ff_mss12_decode_rect(sc, acoder, x + pivot, y, width - pivot, height))
  493. return -1;
  494. break;
  495. case SPLIT_NONE:
  496. if (sc->c->keyframe)
  497. return decode_region_intra(sc, acoder, x, y, width, height);
  498. else
  499. return decode_region_inter(sc, acoder, x, y, width, height);
  500. default:
  501. return -1;
  502. }
  503. return 0;
  504. }
  505. av_cold int ff_mss12_decode_init(MSS12Context *c, int version,
  506. SliceContext* sc1, SliceContext *sc2)
  507. {
  508. AVCodecContext *avctx = c->avctx;
  509. int i;
  510. if (avctx->extradata_size < 52 + 256 * 3) {
  511. av_log(avctx, AV_LOG_ERROR, "Insufficient extradata size %d\n",
  512. avctx->extradata_size);
  513. return AVERROR_INVALIDDATA;
  514. }
  515. if (AV_RB32(avctx->extradata) < avctx->extradata_size) {
  516. av_log(avctx, AV_LOG_ERROR,
  517. "Insufficient extradata size: expected %d got %d\n",
  518. AV_RB32(avctx->extradata),
  519. avctx->extradata_size);
  520. return AVERROR_INVALIDDATA;
  521. }
  522. avctx->coded_width = AV_RB32(avctx->extradata + 20);
  523. avctx->coded_height = AV_RB32(avctx->extradata + 24);
  524. if (avctx->coded_width > 4096 || avctx->coded_height > 4096) {
  525. av_log(avctx, AV_LOG_ERROR, "Frame dimensions %dx%d too large",
  526. avctx->coded_width, avctx->coded_height);
  527. return AVERROR_INVALIDDATA;
  528. }
  529. av_log(avctx, AV_LOG_DEBUG, "Encoder version %d.%d\n",
  530. AV_RB32(avctx->extradata + 4), AV_RB32(avctx->extradata + 8));
  531. if (version != AV_RB32(avctx->extradata + 4) > 1) {
  532. av_log(avctx, AV_LOG_ERROR,
  533. "Header version doesn't match codec tag\n");
  534. return -1;
  535. }
  536. c->free_colours = AV_RB32(avctx->extradata + 48);
  537. if ((unsigned)c->free_colours > 256) {
  538. av_log(avctx, AV_LOG_ERROR,
  539. "Incorrect number of changeable palette entries: %d\n",
  540. c->free_colours);
  541. return AVERROR_INVALIDDATA;
  542. }
  543. av_log(avctx, AV_LOG_DEBUG, "%d free colour(s)\n", c->free_colours);
  544. av_log(avctx, AV_LOG_DEBUG, "Display dimensions %dx%d\n",
  545. AV_RB32(avctx->extradata + 12), AV_RB32(avctx->extradata + 16));
  546. av_log(avctx, AV_LOG_DEBUG, "Coded dimensions %dx%d\n",
  547. avctx->coded_width, avctx->coded_height);
  548. av_log(avctx, AV_LOG_DEBUG, "%g frames per second\n",
  549. av_int2float(AV_RB32(avctx->extradata + 28)));
  550. av_log(avctx, AV_LOG_DEBUG, "Bitrate %d bps\n",
  551. AV_RB32(avctx->extradata + 32));
  552. av_log(avctx, AV_LOG_DEBUG, "Max. lead time %g ms\n",
  553. av_int2float(AV_RB32(avctx->extradata + 36)));
  554. av_log(avctx, AV_LOG_DEBUG, "Max. lag time %g ms\n",
  555. av_int2float(AV_RB32(avctx->extradata + 40)));
  556. av_log(avctx, AV_LOG_DEBUG, "Max. seek time %g ms\n",
  557. av_int2float(AV_RB32(avctx->extradata + 44)));
  558. if (version) {
  559. if (avctx->extradata_size < 60 + 256 * 3) {
  560. av_log(avctx, AV_LOG_ERROR,
  561. "Insufficient extradata size %d for v2\n",
  562. avctx->extradata_size);
  563. return AVERROR_INVALIDDATA;
  564. }
  565. c->slice_split = AV_RB32(avctx->extradata + 52);
  566. av_log(avctx, AV_LOG_DEBUG, "Slice split %d\n", c->slice_split);
  567. c->full_model_syms = AV_RB32(avctx->extradata + 56);
  568. if (c->full_model_syms < 2 || c->full_model_syms > 256) {
  569. av_log(avctx, AV_LOG_ERROR,
  570. "Incorrect number of used colours %d\n",
  571. c->full_model_syms);
  572. return AVERROR_INVALIDDATA;
  573. }
  574. av_log(avctx, AV_LOG_DEBUG, "Used colours %d\n",
  575. c->full_model_syms);
  576. } else {
  577. c->slice_split = 0;
  578. c->full_model_syms = 256;
  579. }
  580. for (i = 0; i < 256; i++)
  581. c->pal[i] = AV_RB24(avctx->extradata + 52 +
  582. (version ? 8 : 0) + i * 3);
  583. c->mask_stride = FFALIGN(avctx->width, 16);
  584. c->mask = av_malloc(c->mask_stride * avctx->height);
  585. if (!c->mask) {
  586. av_log(avctx, AV_LOG_ERROR, "Cannot allocate mask plane\n");
  587. return AVERROR(ENOMEM);
  588. }
  589. sc1->c = c;
  590. slicecontext_init(sc1, version, c->full_model_syms);
  591. if (c->slice_split) {
  592. sc2->c = c;
  593. slicecontext_init(sc2, version, c->full_model_syms);
  594. }
  595. c->corrupted = 1;
  596. return 0;
  597. }
  598. av_cold int ff_mss12_decode_end(MSS12Context *c)
  599. {
  600. av_freep(&c->mask);
  601. return 0;
  602. }