You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1312 lines
41KB

  1. /*
  2. * LucasArts Smush video decoder
  3. * Copyright (c) 2006 Cyril Zorin
  4. * Copyright (c) 2011 Konstantin Shishkov
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. // #define DEBUG 1
  23. #include "avcodec.h"
  24. #include "copy_block.h"
  25. #include "bytestream.h"
  26. #include "internal.h"
  27. #include "libavutil/bswap.h"
  28. #include "libavutil/imgutils.h"
  29. #include "sanm_data.h"
  30. #include "libavutil/avassert.h"
  31. #define NGLYPHS 256
  32. typedef struct {
  33. AVCodecContext *avctx;
  34. GetByteContext gb;
  35. int version, subversion;
  36. uint32_t pal[256];
  37. int16_t delta_pal[768];
  38. int pitch;
  39. int width, height;
  40. int aligned_width, aligned_height;
  41. int prev_seq;
  42. AVFrame frame, *output;
  43. uint16_t *frm0, *frm1, *frm2;
  44. uint8_t *stored_frame;
  45. uint32_t frm0_size, frm1_size, frm2_size;
  46. uint32_t stored_frame_size;
  47. uint8_t *rle_buf;
  48. unsigned int rle_buf_size;
  49. int rotate_code;
  50. long npixels, buf_size;
  51. uint16_t codebook[256];
  52. uint16_t small_codebook[4];
  53. int8_t p4x4glyphs[NGLYPHS][16];
  54. int8_t p8x8glyphs[NGLYPHS][64];
  55. } SANMVideoContext;
  56. typedef struct {
  57. int seq_num, codec, rotate_code, rle_output_size;
  58. uint16_t bg_color;
  59. uint32_t width, height;
  60. } SANMFrameHeader;
  61. enum GlyphEdge {
  62. LEFT_EDGE,
  63. TOP_EDGE,
  64. RIGHT_EDGE,
  65. BOTTOM_EDGE,
  66. NO_EDGE
  67. };
  68. enum GlyphDir {
  69. DIR_LEFT,
  70. DIR_UP,
  71. DIR_RIGHT,
  72. DIR_DOWN,
  73. NO_DIR
  74. };
  75. /**
  76. * Return enum GlyphEdge of box where point (x, y) lies.
  77. *
  78. * @param x x point coordinate
  79. * @param y y point coordinate
  80. * @param edge_size box width/height.
  81. */
  82. static enum GlyphEdge which_edge(int x, int y, int edge_size)
  83. {
  84. const int edge_max = edge_size - 1;
  85. if (!y) {
  86. return BOTTOM_EDGE;
  87. } else if (y == edge_max) {
  88. return TOP_EDGE;
  89. } else if (!x) {
  90. return LEFT_EDGE;
  91. } else if (x == edge_max) {
  92. return RIGHT_EDGE;
  93. } else {
  94. return NO_EDGE;
  95. }
  96. }
  97. static enum GlyphDir which_direction(enum GlyphEdge edge0, enum GlyphEdge edge1)
  98. {
  99. if ((edge0 == LEFT_EDGE && edge1 == RIGHT_EDGE) ||
  100. (edge1 == LEFT_EDGE && edge0 == RIGHT_EDGE) ||
  101. (edge0 == BOTTOM_EDGE && edge1 != TOP_EDGE) ||
  102. (edge1 == BOTTOM_EDGE && edge0 != TOP_EDGE)) {
  103. return DIR_UP;
  104. } else if ((edge0 == TOP_EDGE && edge1 != BOTTOM_EDGE) ||
  105. (edge1 == TOP_EDGE && edge0 != BOTTOM_EDGE)) {
  106. return DIR_DOWN;
  107. } else if ((edge0 == LEFT_EDGE && edge1 != RIGHT_EDGE) ||
  108. (edge1 == LEFT_EDGE && edge0 != RIGHT_EDGE)) {
  109. return DIR_LEFT;
  110. } else if ((edge0 == TOP_EDGE && edge1 == BOTTOM_EDGE) ||
  111. (edge1 == TOP_EDGE && edge0 == BOTTOM_EDGE) ||
  112. (edge0 == RIGHT_EDGE && edge1 != LEFT_EDGE) ||
  113. (edge1 == RIGHT_EDGE && edge0 != LEFT_EDGE)) {
  114. return DIR_RIGHT;
  115. }
  116. return NO_DIR;
  117. }
  118. /**
  119. * Interpolate two points.
  120. */
  121. static void interp_point(int8_t *points, int x0, int y0, int x1, int y1,
  122. int pos, int npoints)
  123. {
  124. if (npoints) {
  125. points[0] = (x0 * pos + x1 * (npoints - pos) + (npoints >> 1)) / npoints;
  126. points[1] = (y0 * pos + y1 * (npoints - pos) + (npoints >> 1)) / npoints;
  127. } else {
  128. points[0] = x0;
  129. points[1] = y0;
  130. }
  131. }
  132. /**
  133. * Construct glyphs by iterating through vectors coordinates.
  134. *
  135. * @param pglyphs pointer to table where glyphs are stored
  136. * @param xvec pointer to x component of vectors coordinates
  137. * @param yvec pointer to y component of vectors coordinates
  138. * @param side_length glyph width/height.
  139. */
  140. static void make_glyphs(int8_t *pglyphs, const int8_t *xvec, const int8_t *yvec,
  141. const int side_length)
  142. {
  143. const int glyph_size = side_length * side_length;
  144. int8_t *pglyph = pglyphs;
  145. int i, j;
  146. for (i = 0; i < GLYPH_COORD_VECT_SIZE; i++) {
  147. int x0 = xvec[i];
  148. int y0 = yvec[i];
  149. enum GlyphEdge edge0 = which_edge(x0, y0, side_length);
  150. for (j = 0; j < GLYPH_COORD_VECT_SIZE; j++, pglyph += glyph_size) {
  151. int x1 = xvec[j];
  152. int y1 = yvec[j];
  153. enum GlyphEdge edge1 = which_edge(x1, y1, side_length);
  154. enum GlyphDir dir = which_direction(edge0, edge1);
  155. int npoints = FFMAX(FFABS(x1 - x0), FFABS(y1 - y0));
  156. int ipoint;
  157. for (ipoint = 0; ipoint <= npoints; ipoint++) {
  158. int8_t point[2];
  159. int irow, icol;
  160. interp_point(point, x0, y0, x1, y1, ipoint, npoints);
  161. switch (dir) {
  162. case DIR_UP:
  163. for (irow = point[1]; irow >= 0; irow--)
  164. pglyph[point[0] + irow * side_length] = 1;
  165. break;
  166. case DIR_DOWN:
  167. for (irow = point[1]; irow < side_length; irow++)
  168. pglyph[point[0] + irow * side_length] = 1;
  169. break;
  170. case DIR_LEFT:
  171. for (icol = point[0]; icol >= 0; icol--)
  172. pglyph[icol + point[1] * side_length] = 1;
  173. break;
  174. case DIR_RIGHT:
  175. for (icol = point[0]; icol < side_length; icol++)
  176. pglyph[icol + point[1] * side_length] = 1;
  177. break;
  178. }
  179. }
  180. }
  181. }
  182. }
  183. static void init_sizes(SANMVideoContext *ctx, int width, int height)
  184. {
  185. ctx->width = width;
  186. ctx->height = height;
  187. ctx->npixels = width * height;
  188. ctx->aligned_width = FFALIGN(width, 8);
  189. ctx->aligned_height = FFALIGN(height, 8);
  190. ctx->buf_size = ctx->aligned_width * ctx->aligned_height * sizeof(ctx->frm0[0]);
  191. ctx->pitch = width;
  192. }
  193. static void destroy_buffers(SANMVideoContext *ctx)
  194. {
  195. av_freep(&ctx->frm0);
  196. av_freep(&ctx->frm1);
  197. av_freep(&ctx->frm2);
  198. av_freep(&ctx->stored_frame);
  199. av_freep(&ctx->rle_buf);
  200. ctx->frm0_size =
  201. ctx->frm1_size =
  202. ctx->frm2_size = 0;
  203. }
  204. static av_cold int init_buffers(SANMVideoContext *ctx)
  205. {
  206. av_fast_padded_malloc(&ctx->frm0, &ctx->frm0_size, ctx->buf_size);
  207. av_fast_padded_malloc(&ctx->frm1, &ctx->frm1_size, ctx->buf_size);
  208. av_fast_padded_malloc(&ctx->frm2, &ctx->frm2_size, ctx->buf_size);
  209. if (!ctx->version)
  210. av_fast_padded_malloc(&ctx->stored_frame, &ctx->stored_frame_size, ctx->buf_size);
  211. if (!ctx->frm0 || !ctx->frm1 || !ctx->frm2 || (!ctx->stored_frame && !ctx->version)) {
  212. destroy_buffers(ctx);
  213. return AVERROR(ENOMEM);
  214. }
  215. return 0;
  216. }
  217. static void rotate_bufs(SANMVideoContext *ctx, int rotate_code)
  218. {
  219. av_dlog(ctx->avctx, "rotate %d\n", rotate_code);
  220. if (rotate_code == 2)
  221. FFSWAP(uint16_t*, ctx->frm1, ctx->frm2);
  222. FFSWAP(uint16_t*, ctx->frm2, ctx->frm0);
  223. }
  224. static av_cold int decode_init(AVCodecContext *avctx)
  225. {
  226. SANMVideoContext *ctx = avctx->priv_data;
  227. ctx->avctx = avctx;
  228. ctx->version = !avctx->extradata_size;
  229. avctx->pix_fmt = ctx->version ? AV_PIX_FMT_RGB565 : AV_PIX_FMT_PAL8;
  230. init_sizes(ctx, avctx->width, avctx->height);
  231. if (init_buffers(ctx)) {
  232. av_log(avctx, AV_LOG_ERROR, "error allocating buffers\n");
  233. return AVERROR(ENOMEM);
  234. }
  235. ctx->output = &ctx->frame;
  236. ctx->output->data[0] = 0;
  237. make_glyphs(ctx->p4x4glyphs[0], glyph4_x, glyph4_y, 4);
  238. make_glyphs(ctx->p8x8glyphs[0], glyph8_x, glyph8_y, 8);
  239. if (!ctx->version) {
  240. int i;
  241. if (avctx->extradata_size < 1026) {
  242. av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
  243. return AVERROR_INVALIDDATA;
  244. }
  245. ctx->subversion = AV_RL16(avctx->extradata);
  246. for (i = 0; i < 256; i++)
  247. ctx->pal[i] = 0xFFU << 24 | AV_RL32(avctx->extradata + 2 + i * 4);
  248. }
  249. return 0;
  250. }
  251. static av_cold int decode_end(AVCodecContext *avctx)
  252. {
  253. SANMVideoContext *ctx = avctx->priv_data;
  254. destroy_buffers(ctx);
  255. if (ctx->frame.data[0]) {
  256. avctx->release_buffer(avctx, &ctx->frame);
  257. ctx->frame.data[0] = 0;
  258. }
  259. return 0;
  260. }
  261. static int rle_decode(SANMVideoContext *ctx, uint8_t *dst, const int out_size)
  262. {
  263. int opcode, color, run_len, left = out_size;
  264. while (left > 0) {
  265. opcode = bytestream2_get_byte(&ctx->gb);
  266. run_len = (opcode >> 1) + 1;
  267. if (run_len > left || bytestream2_get_bytes_left(&ctx->gb) <= 0)
  268. return AVERROR_INVALIDDATA;
  269. if (opcode & 1) {
  270. color = bytestream2_get_byte(&ctx->gb);
  271. memset(dst, color, run_len);
  272. } else {
  273. if (bytestream2_get_bytes_left(&ctx->gb) < run_len)
  274. return AVERROR_INVALIDDATA;
  275. bytestream2_get_bufferu(&ctx->gb, dst, run_len);
  276. }
  277. dst += run_len;
  278. left -= run_len;
  279. }
  280. return 0;
  281. }
  282. static int old_codec1(SANMVideoContext *ctx, int top,
  283. int left, int width, int height)
  284. {
  285. uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * ctx->pitch;
  286. int i, j, len, flag, code, val, pos, end;
  287. for (i = 0; i < height; i++) {
  288. pos = 0;
  289. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  290. return AVERROR_INVALIDDATA;
  291. len = bytestream2_get_le16u(&ctx->gb);
  292. end = bytestream2_tell(&ctx->gb) + len;
  293. while (bytestream2_tell(&ctx->gb) < end) {
  294. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  295. return AVERROR_INVALIDDATA;
  296. code = bytestream2_get_byteu(&ctx->gb);
  297. flag = code & 1;
  298. code = (code >> 1) + 1;
  299. if (pos + code > width)
  300. return AVERROR_INVALIDDATA;
  301. if (flag) {
  302. val = bytestream2_get_byteu(&ctx->gb);
  303. if (val)
  304. memset(dst + pos, val, code);
  305. pos += code;
  306. } else {
  307. if (bytestream2_get_bytes_left(&ctx->gb) < code)
  308. return AVERROR_INVALIDDATA;
  309. for (j = 0; j < code; j++) {
  310. val = bytestream2_get_byteu(&ctx->gb);
  311. if (val)
  312. dst[pos] = val;
  313. pos++;
  314. }
  315. }
  316. }
  317. dst += ctx->pitch;
  318. }
  319. ctx->rotate_code = 0;
  320. return 0;
  321. }
  322. static inline void codec37_mv(uint8_t *dst, const uint8_t *src,
  323. int height, int stride, int x, int y)
  324. {
  325. int pos, i, j;
  326. pos = x + y * stride;
  327. for (j = 0; j < 4; j++) {
  328. for (i = 0; i < 4; i++) {
  329. if ((pos + i) < 0 || (pos + i) >= height * stride)
  330. dst[i] = 0;
  331. else
  332. dst[i] = src[i];
  333. }
  334. dst += stride;
  335. src += stride;
  336. pos += stride;
  337. }
  338. }
  339. static int old_codec37(SANMVideoContext *ctx, int top,
  340. int left, int width, int height)
  341. {
  342. int stride = ctx->pitch;
  343. int i, j, k, t;
  344. int skip_run = 0;
  345. int compr, mvoff, seq, flags;
  346. uint32_t decoded_size;
  347. uint8_t *dst, *prev;
  348. compr = bytestream2_get_byte(&ctx->gb);
  349. mvoff = bytestream2_get_byte(&ctx->gb);
  350. seq = bytestream2_get_le16(&ctx->gb);
  351. decoded_size = bytestream2_get_le32(&ctx->gb);
  352. bytestream2_skip(&ctx->gb, 4);
  353. flags = bytestream2_get_byte(&ctx->gb);
  354. bytestream2_skip(&ctx->gb, 3);
  355. if (decoded_size > ctx->height * stride - left - top * stride) {
  356. decoded_size = ctx->height * stride - left - top * stride;
  357. av_log(ctx->avctx, AV_LOG_WARNING, "decoded size is too large\n");
  358. }
  359. ctx->rotate_code = 0;
  360. if (((seq & 1) || !(flags & 1)) && (compr && compr != 2))
  361. rotate_bufs(ctx, 1);
  362. dst = ((uint8_t*)ctx->frm0) + left + top * stride;
  363. prev = ((uint8_t*)ctx->frm2) + left + top * stride;
  364. if (mvoff > 2) {
  365. av_log(ctx->avctx, AV_LOG_ERROR, "invalid motion base value %d\n", mvoff);
  366. return AVERROR_INVALIDDATA;
  367. }
  368. av_dlog(ctx->avctx, "compression %d\n", compr);
  369. switch (compr) {
  370. case 0:
  371. for (i = 0; i < height; i++) {
  372. bytestream2_get_buffer(&ctx->gb, dst, width);
  373. dst += stride;
  374. }
  375. memset(ctx->frm1, 0, ctx->height * stride);
  376. memset(ctx->frm2, 0, ctx->height * stride);
  377. break;
  378. case 2:
  379. if (rle_decode(ctx, dst, decoded_size))
  380. return AVERROR_INVALIDDATA;
  381. memset(ctx->frm1, 0, ctx->frm1_size);
  382. memset(ctx->frm2, 0, ctx->frm2_size);
  383. break;
  384. case 3:
  385. case 4:
  386. if (flags & 4) {
  387. for (j = 0; j < height; j += 4) {
  388. for (i = 0; i < width; i += 4) {
  389. int code;
  390. if (skip_run) {
  391. skip_run--;
  392. copy_block4(dst + i, prev + i, stride, stride, 4);
  393. continue;
  394. }
  395. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  396. return AVERROR_INVALIDDATA;
  397. code = bytestream2_get_byteu(&ctx->gb);
  398. switch (code) {
  399. case 0xFF:
  400. if (bytestream2_get_bytes_left(&ctx->gb) < 16)
  401. return AVERROR_INVALIDDATA;
  402. for (k = 0; k < 4; k++)
  403. bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
  404. break;
  405. case 0xFE:
  406. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  407. return AVERROR_INVALIDDATA;
  408. for (k = 0; k < 4; k++)
  409. memset(dst + i + k * stride, bytestream2_get_byteu(&ctx->gb), 4);
  410. break;
  411. case 0xFD:
  412. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  413. return AVERROR_INVALIDDATA;
  414. t = bytestream2_get_byteu(&ctx->gb);
  415. for (k = 0; k < 4; k++)
  416. memset(dst + i + k * stride, t, 4);
  417. break;
  418. default:
  419. if (compr == 4 && !code) {
  420. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  421. return AVERROR_INVALIDDATA;
  422. skip_run = bytestream2_get_byteu(&ctx->gb) + 1;
  423. i -= 4;
  424. } else {
  425. int mx, my;
  426. mx = c37_mv[(mvoff * 255 + code) * 2 ];
  427. my = c37_mv[(mvoff * 255 + code) * 2 + 1];
  428. codec37_mv(dst + i, prev + i + mx + my * stride,
  429. ctx->height, stride, i + mx, j + my);
  430. }
  431. }
  432. }
  433. dst += stride * 4;
  434. prev += stride * 4;
  435. }
  436. } else {
  437. for (j = 0; j < height; j += 4) {
  438. for (i = 0; i < width; i += 4) {
  439. int code;
  440. if (skip_run) {
  441. skip_run--;
  442. copy_block4(dst + i, prev + i, stride, stride, 4);
  443. continue;
  444. }
  445. code = bytestream2_get_byte(&ctx->gb);
  446. if (code == 0xFF) {
  447. if (bytestream2_get_bytes_left(&ctx->gb) < 16)
  448. return AVERROR_INVALIDDATA;
  449. for (k = 0; k < 4; k++)
  450. bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
  451. } else if (compr == 4 && !code) {
  452. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  453. return AVERROR_INVALIDDATA;
  454. skip_run = bytestream2_get_byteu(&ctx->gb) + 1;
  455. i -= 4;
  456. } else {
  457. int mx, my;
  458. mx = c37_mv[(mvoff * 255 + code) * 2];
  459. my = c37_mv[(mvoff * 255 + code) * 2 + 1];
  460. codec37_mv(dst + i, prev + i + mx + my * stride,
  461. ctx->height, stride, i + mx, j + my);
  462. }
  463. }
  464. dst += stride * 4;
  465. prev += stride * 4;
  466. }
  467. }
  468. break;
  469. default:
  470. av_log(ctx->avctx, AV_LOG_ERROR,
  471. "subcodec 37 compression %d not implemented\n", compr);
  472. return AVERROR_PATCHWELCOME;
  473. }
  474. return 0;
  475. }
  476. static int process_block(SANMVideoContext *ctx, uint8_t *dst, uint8_t *prev1,
  477. uint8_t *prev2, int stride, int tbl, int size)
  478. {
  479. int code, k, t;
  480. uint8_t colors[2];
  481. int8_t *pglyph;
  482. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  483. return AVERROR_INVALIDDATA;
  484. code = bytestream2_get_byteu(&ctx->gb);
  485. if (code >= 0xF8) {
  486. switch (code) {
  487. case 0xFF:
  488. if (size == 2) {
  489. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  490. return AVERROR_INVALIDDATA;
  491. dst[0] = bytestream2_get_byteu(&ctx->gb);
  492. dst[1] = bytestream2_get_byteu(&ctx->gb);
  493. dst[0+stride] = bytestream2_get_byteu(&ctx->gb);
  494. dst[1+stride] = bytestream2_get_byteu(&ctx->gb);
  495. } else {
  496. size >>= 1;
  497. if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
  498. return AVERROR_INVALIDDATA;
  499. if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
  500. stride, tbl, size))
  501. return AVERROR_INVALIDDATA;
  502. dst += size * stride;
  503. prev1 += size * stride;
  504. prev2 += size * stride;
  505. if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
  506. return AVERROR_INVALIDDATA;
  507. if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
  508. stride, tbl, size))
  509. return AVERROR_INVALIDDATA;
  510. }
  511. break;
  512. case 0xFE:
  513. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  514. return AVERROR_INVALIDDATA;
  515. t = bytestream2_get_byteu(&ctx->gb);
  516. for (k = 0; k < size; k++)
  517. memset(dst + k * stride, t, size);
  518. break;
  519. case 0xFD:
  520. if (bytestream2_get_bytes_left(&ctx->gb) < 3)
  521. return AVERROR_INVALIDDATA;
  522. code = bytestream2_get_byteu(&ctx->gb);
  523. pglyph = (size == 8) ? ctx->p8x8glyphs[code] : ctx->p4x4glyphs[code];
  524. bytestream2_get_bufferu(&ctx->gb, colors, 2);
  525. for (k = 0; k < size; k++)
  526. for (t = 0; t < size; t++)
  527. dst[t + k * stride] = colors[!*pglyph++];
  528. break;
  529. case 0xFC:
  530. for (k = 0; k < size; k++)
  531. memcpy(dst + k * stride, prev1 + k * stride, size);
  532. break;
  533. default:
  534. k = bytestream2_tell(&ctx->gb);
  535. bytestream2_seek(&ctx->gb, tbl + (code & 7), SEEK_SET);
  536. t = bytestream2_get_byte(&ctx->gb);
  537. bytestream2_seek(&ctx->gb, k, SEEK_SET);
  538. for (k = 0; k < size; k++)
  539. memset(dst + k * stride, t, size);
  540. }
  541. } else {
  542. int mx = motion_vectors[code][0];
  543. int my = motion_vectors[code][1];
  544. int index = prev2 - (const uint8_t*)ctx->frm2;
  545. av_assert2(index >= 0 && index < (ctx->buf_size>>1));
  546. if (index < - mx - my*stride ||
  547. (ctx->buf_size>>1) - index < mx + size + (my + size - 1)*stride) {
  548. av_log(ctx->avctx, AV_LOG_ERROR, "MV is invalid \n");
  549. return AVERROR_INVALIDDATA;
  550. }
  551. for (k = 0; k < size; k++)
  552. memcpy(dst + k * stride, prev2 + mx + (my + k) * stride, size);
  553. }
  554. return 0;
  555. }
  556. static int old_codec47(SANMVideoContext *ctx, int top,
  557. int left, int width, int height)
  558. {
  559. int i, j, seq, compr, new_rot, tbl_pos, skip;
  560. int stride = ctx->pitch;
  561. uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * stride;
  562. uint8_t *prev1 = (uint8_t*)ctx->frm1;
  563. uint8_t *prev2 = (uint8_t*)ctx->frm2;
  564. uint32_t decoded_size;
  565. tbl_pos = bytestream2_tell(&ctx->gb);
  566. seq = bytestream2_get_le16(&ctx->gb);
  567. compr = bytestream2_get_byte(&ctx->gb);
  568. new_rot = bytestream2_get_byte(&ctx->gb);
  569. skip = bytestream2_get_byte(&ctx->gb);
  570. bytestream2_skip(&ctx->gb, 9);
  571. decoded_size = bytestream2_get_le32(&ctx->gb);
  572. bytestream2_skip(&ctx->gb, 8);
  573. if (decoded_size > ctx->height * stride - left - top * stride) {
  574. decoded_size = ctx->height * stride - left - top * stride;
  575. av_log(ctx->avctx, AV_LOG_WARNING, "decoded size is too large\n");
  576. }
  577. if (skip & 1)
  578. bytestream2_skip(&ctx->gb, 0x8080);
  579. if (!seq) {
  580. ctx->prev_seq = -1;
  581. memset(prev1, 0, ctx->height * stride);
  582. memset(prev2, 0, ctx->height * stride);
  583. }
  584. av_dlog(ctx->avctx, "compression %d\n", compr);
  585. switch (compr) {
  586. case 0:
  587. if (bytestream2_get_bytes_left(&ctx->gb) < width * height)
  588. return AVERROR_INVALIDDATA;
  589. for (j = 0; j < height; j++) {
  590. bytestream2_get_bufferu(&ctx->gb, dst, width);
  591. dst += stride;
  592. }
  593. break;
  594. case 1:
  595. if (bytestream2_get_bytes_left(&ctx->gb) < ((width + 1) >> 1) * ((height + 1) >> 1))
  596. return AVERROR_INVALIDDATA;
  597. for (j = 0; j < height; j += 2) {
  598. for (i = 0; i < width; i += 2) {
  599. dst[i] = dst[i + 1] =
  600. dst[stride + i] = dst[stride + i + 1] = bytestream2_get_byteu(&ctx->gb);
  601. }
  602. dst += stride * 2;
  603. }
  604. break;
  605. case 2:
  606. if (seq == ctx->prev_seq + 1) {
  607. for (j = 0; j < height; j += 8) {
  608. for (i = 0; i < width; i += 8) {
  609. if (process_block(ctx, dst + i, prev1 + i, prev2 + i, stride,
  610. tbl_pos + 8, 8))
  611. return AVERROR_INVALIDDATA;
  612. }
  613. dst += stride * 8;
  614. prev1 += stride * 8;
  615. prev2 += stride * 8;
  616. }
  617. }
  618. break;
  619. case 3:
  620. memcpy(ctx->frm0, ctx->frm2, ctx->pitch * ctx->height);
  621. break;
  622. case 4:
  623. memcpy(ctx->frm0, ctx->frm1, ctx->pitch * ctx->height);
  624. break;
  625. case 5:
  626. if (rle_decode(ctx, dst, decoded_size))
  627. return AVERROR_INVALIDDATA;
  628. break;
  629. default:
  630. av_log(ctx->avctx, AV_LOG_ERROR,
  631. "subcodec 47 compression %d not implemented\n", compr);
  632. return AVERROR_PATCHWELCOME;
  633. }
  634. if (seq == ctx->prev_seq + 1)
  635. ctx->rotate_code = new_rot;
  636. else
  637. ctx->rotate_code = 0;
  638. ctx->prev_seq = seq;
  639. return 0;
  640. }
  641. static int process_frame_obj(SANMVideoContext *ctx)
  642. {
  643. uint16_t codec, top, left, w, h;
  644. codec = bytestream2_get_le16u(&ctx->gb);
  645. left = bytestream2_get_le16u(&ctx->gb);
  646. top = bytestream2_get_le16u(&ctx->gb);
  647. w = bytestream2_get_le16u(&ctx->gb);
  648. h = bytestream2_get_le16u(&ctx->gb);
  649. if (ctx->width < left + w || ctx->height < top + h) {
  650. if (av_image_check_size(FFMAX(left + w, ctx->width),
  651. FFMAX(top + h, ctx->height), 0, ctx->avctx) < 0)
  652. return AVERROR_INVALIDDATA;
  653. avcodec_set_dimensions(ctx->avctx, FFMAX(left + w, ctx->width),
  654. FFMAX(top + h, ctx->height));
  655. init_sizes(ctx, FFMAX(left + w, ctx->width),
  656. FFMAX(top + h, ctx->height));
  657. if (init_buffers(ctx)) {
  658. av_log(ctx->avctx, AV_LOG_ERROR, "error resizing buffers\n");
  659. return AVERROR(ENOMEM);
  660. }
  661. }
  662. bytestream2_skip(&ctx->gb, 4);
  663. av_dlog(ctx->avctx, "subcodec %d\n", codec);
  664. switch (codec) {
  665. case 1:
  666. case 3:
  667. return old_codec1(ctx, top, left, w, h);
  668. break;
  669. case 37:
  670. return old_codec37(ctx, top, left, w, h);
  671. break;
  672. case 47:
  673. return old_codec47(ctx, top, left, w, h);
  674. break;
  675. default:
  676. av_log_ask_for_sample(ctx->avctx, "unknown subcodec %d\n", codec);
  677. return AVERROR_PATCHWELCOME;
  678. }
  679. }
  680. static int decode_0(SANMVideoContext *ctx)
  681. {
  682. uint16_t *frm = ctx->frm0;
  683. int x, y;
  684. if (bytestream2_get_bytes_left(&ctx->gb) < ctx->width * ctx->height * 2) {
  685. av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for raw frame\n");
  686. return AVERROR_INVALIDDATA;
  687. }
  688. for (y = 0; y < ctx->height; y++) {
  689. for (x = 0; x < ctx->width; x++)
  690. frm[x] = bytestream2_get_le16u(&ctx->gb);
  691. frm += ctx->pitch;
  692. }
  693. return 0;
  694. }
  695. static int decode_nop(SANMVideoContext *ctx)
  696. {
  697. av_log_ask_for_sample(ctx->avctx, "unknown/unsupported compression type\n");
  698. return AVERROR_PATCHWELCOME;
  699. }
  700. static void copy_block(uint16_t *pdest, uint16_t *psrc, int block_size, int pitch)
  701. {
  702. uint8_t *dst = (uint8_t *)pdest;
  703. uint8_t *src = (uint8_t *)psrc;
  704. int stride = pitch * 2;
  705. switch (block_size) {
  706. case 2:
  707. copy_block4(dst, src, stride, stride, 2);
  708. break;
  709. case 4:
  710. copy_block8(dst, src, stride, stride, 4);
  711. break;
  712. case 8:
  713. copy_block16(dst, src, stride, stride, 8);
  714. break;
  715. }
  716. }
  717. static void fill_block(uint16_t *pdest, uint16_t color, int block_size, int pitch)
  718. {
  719. int x, y;
  720. pitch -= block_size;
  721. for (y = 0; y < block_size; y++, pdest += pitch)
  722. for (x = 0; x < block_size; x++)
  723. *pdest++ = color;
  724. }
  725. static int draw_glyph(SANMVideoContext *ctx, uint16_t *dst, int index, uint16_t fg_color,
  726. uint16_t bg_color, int block_size, int pitch)
  727. {
  728. int8_t *pglyph;
  729. uint16_t colors[2] = { fg_color, bg_color };
  730. int x, y;
  731. if (index >= NGLYPHS) {
  732. av_log(ctx->avctx, AV_LOG_ERROR, "ignoring nonexistent glyph #%u\n", index);
  733. return AVERROR_INVALIDDATA;
  734. }
  735. pglyph = block_size == 8 ? ctx->p8x8glyphs[index] : ctx->p4x4glyphs[index];
  736. pitch -= block_size;
  737. for (y = 0; y < block_size; y++, dst += pitch)
  738. for (x = 0; x < block_size; x++)
  739. *dst++ = colors[*pglyph++];
  740. return 0;
  741. }
  742. static int opcode_0xf7(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
  743. {
  744. uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
  745. if (block_size == 2) {
  746. uint32_t indices;
  747. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  748. return AVERROR_INVALIDDATA;
  749. indices = bytestream2_get_le32u(&ctx->gb);
  750. dst[0] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  751. dst[1] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  752. dst[pitch] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  753. dst[pitch + 1] = ctx->codebook[indices & 0xFF];
  754. } else {
  755. uint16_t fgcolor, bgcolor;
  756. int glyph;
  757. if (bytestream2_get_bytes_left(&ctx->gb) < 3)
  758. return AVERROR_INVALIDDATA;
  759. glyph = bytestream2_get_byteu(&ctx->gb);
  760. bgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  761. fgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  762. draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
  763. }
  764. return 0;
  765. }
  766. static int opcode_0xf8(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
  767. {
  768. uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
  769. if (block_size == 2) {
  770. if (bytestream2_get_bytes_left(&ctx->gb) < 8)
  771. return AVERROR_INVALIDDATA;
  772. dst[0] = bytestream2_get_le16u(&ctx->gb);
  773. dst[1] = bytestream2_get_le16u(&ctx->gb);
  774. dst[pitch] = bytestream2_get_le16u(&ctx->gb);
  775. dst[pitch + 1] = bytestream2_get_le16u(&ctx->gb);
  776. } else {
  777. uint16_t fgcolor, bgcolor;
  778. int glyph;
  779. if (bytestream2_get_bytes_left(&ctx->gb) < 5)
  780. return AVERROR_INVALIDDATA;
  781. glyph = bytestream2_get_byteu(&ctx->gb);
  782. bgcolor = bytestream2_get_le16u(&ctx->gb);
  783. fgcolor = bytestream2_get_le16u(&ctx->gb);
  784. draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
  785. }
  786. return 0;
  787. }
  788. static int good_mvec(SANMVideoContext *ctx, int cx, int cy, int mx, int my,
  789. int block_size)
  790. {
  791. int start_pos = cx + mx + (cy + my) * ctx->pitch;
  792. int end_pos = start_pos + (block_size - 1) * (ctx->pitch + 1);
  793. int good = start_pos >= 0 && end_pos < (ctx->buf_size >> 1);
  794. if (!good) {
  795. av_log(ctx->avctx, AV_LOG_ERROR, "ignoring invalid motion vector (%i, %i)->(%u, %u), block size = %u\n",
  796. cx + mx, cy + my, cx, cy, block_size);
  797. }
  798. return good;
  799. }
  800. static int codec2subblock(SANMVideoContext *ctx, int cx, int cy, int blk_size)
  801. {
  802. int16_t mx, my, index;
  803. int opcode;
  804. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  805. return AVERROR_INVALIDDATA;
  806. opcode = bytestream2_get_byteu(&ctx->gb);
  807. av_dlog(ctx->avctx, "opcode 0x%0X cx %d cy %d blk %d\n", opcode, cx, cy, blk_size);
  808. switch (opcode) {
  809. default:
  810. mx = motion_vectors[opcode][0];
  811. my = motion_vectors[opcode][1];
  812. if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
  813. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  814. ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
  815. blk_size, ctx->pitch);
  816. }
  817. break;
  818. case 0xF5:
  819. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  820. return AVERROR_INVALIDDATA;
  821. index = bytestream2_get_le16u(&ctx->gb);
  822. mx = index % ctx->width;
  823. my = index / ctx->width;
  824. if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
  825. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  826. ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
  827. blk_size, ctx->pitch);
  828. }
  829. break;
  830. case 0xF6:
  831. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  832. ctx->frm1 + cx + ctx->pitch * cy,
  833. blk_size, ctx->pitch);
  834. break;
  835. case 0xF7:
  836. opcode_0xf7(ctx, cx, cy, blk_size, ctx->pitch);
  837. break;
  838. case 0xF8:
  839. opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
  840. break;
  841. case 0xF9:
  842. case 0xFA:
  843. case 0xFB:
  844. case 0xFC:
  845. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  846. ctx->small_codebook[opcode - 0xf9], blk_size, ctx->pitch);
  847. break;
  848. case 0xFD:
  849. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  850. return AVERROR_INVALIDDATA;
  851. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  852. ctx->codebook[bytestream2_get_byteu(&ctx->gb)], blk_size, ctx->pitch);
  853. break;
  854. case 0xFE:
  855. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  856. return AVERROR_INVALIDDATA;
  857. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  858. bytestream2_get_le16u(&ctx->gb), blk_size, ctx->pitch);
  859. break;
  860. case 0xFF:
  861. if (blk_size == 2) {
  862. opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
  863. } else {
  864. blk_size >>= 1;
  865. if (codec2subblock(ctx, cx , cy , blk_size))
  866. return AVERROR_INVALIDDATA;
  867. if (codec2subblock(ctx, cx + blk_size, cy , blk_size))
  868. return AVERROR_INVALIDDATA;
  869. if (codec2subblock(ctx, cx , cy + blk_size, blk_size))
  870. return AVERROR_INVALIDDATA;
  871. if (codec2subblock(ctx, cx + blk_size, cy + blk_size, blk_size))
  872. return AVERROR_INVALIDDATA;
  873. }
  874. break;
  875. }
  876. return 0;
  877. }
  878. static int decode_2(SANMVideoContext *ctx)
  879. {
  880. int cx, cy, ret;
  881. for (cy = 0; cy < ctx->aligned_height; cy += 8) {
  882. for (cx = 0; cx < ctx->aligned_width; cx += 8) {
  883. if (ret = codec2subblock(ctx, cx, cy, 8))
  884. return ret;
  885. }
  886. }
  887. return 0;
  888. }
  889. static int decode_3(SANMVideoContext *ctx)
  890. {
  891. memcpy(ctx->frm0, ctx->frm2, ctx->frm2_size);
  892. return 0;
  893. }
  894. static int decode_4(SANMVideoContext *ctx)
  895. {
  896. memcpy(ctx->frm0, ctx->frm1, ctx->frm1_size);
  897. return 0;
  898. }
  899. static int decode_5(SANMVideoContext *ctx)
  900. {
  901. #if HAVE_BIGENDIAN
  902. uint16_t *frm;
  903. int npixels;
  904. #endif
  905. uint8_t *dst = (uint8_t*)ctx->frm0;
  906. if (rle_decode(ctx, dst, ctx->buf_size))
  907. return AVERROR_INVALIDDATA;
  908. #if HAVE_BIGENDIAN
  909. npixels = ctx->npixels;
  910. frm = ctx->frm0;
  911. while (npixels--)
  912. *frm++ = av_bswap16(*frm);
  913. #endif
  914. return 0;
  915. }
  916. static int decode_6(SANMVideoContext *ctx)
  917. {
  918. int npixels = ctx->npixels;
  919. uint16_t *frm = ctx->frm0;
  920. if (bytestream2_get_bytes_left(&ctx->gb) < npixels) {
  921. av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for frame\n");
  922. return AVERROR_INVALIDDATA;
  923. }
  924. while (npixels--)
  925. *frm++ = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  926. return 0;
  927. }
  928. static int decode_8(SANMVideoContext *ctx)
  929. {
  930. uint16_t *pdest = ctx->frm0;
  931. uint8_t *rsrc;
  932. long npixels = ctx->npixels;
  933. av_fast_malloc(&ctx->rle_buf, &ctx->rle_buf_size, npixels);
  934. if (!ctx->rle_buf) {
  935. av_log(ctx->avctx, AV_LOG_ERROR, "RLE buffer allocation failed\n");
  936. return AVERROR(ENOMEM);
  937. }
  938. rsrc = ctx->rle_buf;
  939. if (rle_decode(ctx, rsrc, npixels))
  940. return AVERROR_INVALIDDATA;
  941. while (npixels--)
  942. *pdest++ = ctx->codebook[*rsrc++];
  943. return 0;
  944. }
  945. typedef int (*frm_decoder)(SANMVideoContext *ctx);
  946. static const frm_decoder v1_decoders[] = {
  947. decode_0, decode_nop, decode_2, decode_3, decode_4, decode_5,
  948. decode_6, decode_nop, decode_8
  949. };
  950. static int read_frame_header(SANMVideoContext *ctx, SANMFrameHeader *hdr)
  951. {
  952. int i, ret;
  953. if ((ret = bytestream2_get_bytes_left(&ctx->gb)) < 560) {
  954. av_log(ctx->avctx, AV_LOG_ERROR, "too short input frame (%d bytes)\n",
  955. ret);
  956. return AVERROR_INVALIDDATA;
  957. }
  958. bytestream2_skip(&ctx->gb, 8); // skip pad
  959. hdr->width = bytestream2_get_le32u(&ctx->gb);
  960. hdr->height = bytestream2_get_le32u(&ctx->gb);
  961. if (hdr->width != ctx->width || hdr->height != ctx->height) {
  962. av_log(ctx->avctx, AV_LOG_ERROR, "variable size frames are not implemented\n");
  963. return AVERROR_PATCHWELCOME;
  964. }
  965. hdr->seq_num = bytestream2_get_le16u(&ctx->gb);
  966. hdr->codec = bytestream2_get_byteu(&ctx->gb);
  967. hdr->rotate_code = bytestream2_get_byteu(&ctx->gb);
  968. bytestream2_skip(&ctx->gb, 4); // skip pad
  969. for (i = 0; i < 4; i++)
  970. ctx->small_codebook[i] = bytestream2_get_le16u(&ctx->gb);
  971. hdr->bg_color = bytestream2_get_le16u(&ctx->gb);
  972. bytestream2_skip(&ctx->gb, 2); // skip pad
  973. hdr->rle_output_size = bytestream2_get_le32u(&ctx->gb);
  974. for (i = 0; i < 256; i++)
  975. ctx->codebook[i] = bytestream2_get_le16u(&ctx->gb);
  976. bytestream2_skip(&ctx->gb, 8); // skip pad
  977. av_dlog(ctx->avctx, "subcodec %d\n", hdr->codec);
  978. return 0;
  979. }
  980. static void fill_frame(uint16_t *pbuf, int buf_size, uint16_t color)
  981. {
  982. while (buf_size--)
  983. *pbuf++ = color;
  984. }
  985. static int copy_output(SANMVideoContext *ctx, SANMFrameHeader *hdr)
  986. {
  987. uint8_t *dst;
  988. const uint8_t *src = (uint8_t*) ctx->frm0;
  989. int ret, dstpitch, height = ctx->height;
  990. int srcpitch = ctx->pitch * (hdr ? sizeof(ctx->frm0[0]) : 1);
  991. if ((ret = ff_get_buffer(ctx->avctx, ctx->output)) < 0) {
  992. av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  993. return ret;
  994. }
  995. dst = ctx->output->data[0];
  996. dstpitch = ctx->output->linesize[0];
  997. while (height--) {
  998. memcpy(dst, src, srcpitch);
  999. src += srcpitch;
  1000. dst += dstpitch;
  1001. }
  1002. return 0;
  1003. }
  1004. static int decode_frame(AVCodecContext *avctx, void *data,
  1005. int *got_frame_ptr, AVPacket *pkt)
  1006. {
  1007. SANMVideoContext *ctx = avctx->priv_data;
  1008. int i, ret;
  1009. bytestream2_init(&ctx->gb, pkt->data, pkt->size);
  1010. if (ctx->output->data[0])
  1011. avctx->release_buffer(avctx, ctx->output);
  1012. if (!ctx->version) {
  1013. int to_store = 0;
  1014. while (bytestream2_get_bytes_left(&ctx->gb) >= 8) {
  1015. uint32_t sig, size;
  1016. int pos;
  1017. sig = bytestream2_get_be32u(&ctx->gb);
  1018. size = bytestream2_get_be32u(&ctx->gb);
  1019. pos = bytestream2_tell(&ctx->gb);
  1020. if (bytestream2_get_bytes_left(&ctx->gb) < size) {
  1021. av_log(avctx, AV_LOG_ERROR, "incorrect chunk size %d\n", size);
  1022. break;
  1023. }
  1024. switch (sig) {
  1025. case MKBETAG('N', 'P', 'A', 'L'):
  1026. if (size != 256 * 3) {
  1027. av_log(avctx, AV_LOG_ERROR, "incorrect palette block size %d\n",
  1028. size);
  1029. return AVERROR_INVALIDDATA;
  1030. }
  1031. for (i = 0; i < 256; i++)
  1032. ctx->pal[i] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->gb);
  1033. break;
  1034. case MKBETAG('F', 'O', 'B', 'J'):
  1035. if (size < 16)
  1036. return AVERROR_INVALIDDATA;
  1037. if (ret = process_frame_obj(ctx))
  1038. return ret;
  1039. break;
  1040. case MKBETAG('X', 'P', 'A', 'L'):
  1041. if (size == 6 || size == 4) {
  1042. uint8_t tmp[3];
  1043. int j;
  1044. for (i = 0; i < 256; i++) {
  1045. for (j = 0; j < 3; j++) {
  1046. int t = (ctx->pal[i] >> (16 - j * 8)) & 0xFF;
  1047. tmp[j] = av_clip_uint8((t * 129 + ctx->delta_pal[i * 3 + j]) >> 7);
  1048. }
  1049. ctx->pal[i] = 0xFFU << 24 | AV_RB24(tmp);
  1050. }
  1051. } else {
  1052. if (size < 768 * 2 + 4) {
  1053. av_log(avctx, AV_LOG_ERROR, "incorrect palette change block size %d\n",
  1054. size);
  1055. return AVERROR_INVALIDDATA;
  1056. }
  1057. bytestream2_skipu(&ctx->gb, 4);
  1058. for (i = 0; i < 768; i++)
  1059. ctx->delta_pal[i] = bytestream2_get_le16u(&ctx->gb);
  1060. if (size >= 768 * 5 + 4) {
  1061. for (i = 0; i < 256; i++)
  1062. ctx->pal[i] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->gb);
  1063. } else {
  1064. memset(ctx->pal, 0, sizeof(ctx->pal));
  1065. }
  1066. }
  1067. break;
  1068. case MKBETAG('S', 'T', 'O', 'R'):
  1069. to_store = 1;
  1070. break;
  1071. case MKBETAG('F', 'T', 'C', 'H'):
  1072. memcpy(ctx->frm0, ctx->stored_frame, ctx->buf_size);
  1073. break;
  1074. default:
  1075. bytestream2_skip(&ctx->gb, size);
  1076. av_log(avctx, AV_LOG_DEBUG, "unknown/unsupported chunk %x\n", sig);
  1077. break;
  1078. }
  1079. bytestream2_seek(&ctx->gb, pos + size, SEEK_SET);
  1080. if (size & 1)
  1081. bytestream2_skip(&ctx->gb, 1);
  1082. }
  1083. if (to_store)
  1084. memcpy(ctx->stored_frame, ctx->frm0, ctx->buf_size);
  1085. if ((ret = copy_output(ctx, NULL)))
  1086. return ret;
  1087. memcpy(ctx->output->data[1], ctx->pal, 1024);
  1088. } else {
  1089. SANMFrameHeader header;
  1090. if ((ret = read_frame_header(ctx, &header)))
  1091. return ret;
  1092. ctx->rotate_code = header.rotate_code;
  1093. if ((ctx->output->key_frame = !header.seq_num)) {
  1094. ctx->output->pict_type = AV_PICTURE_TYPE_I;
  1095. fill_frame(ctx->frm1, ctx->npixels, header.bg_color);
  1096. fill_frame(ctx->frm2, ctx->npixels, header.bg_color);
  1097. } else {
  1098. ctx->output->pict_type = AV_PICTURE_TYPE_P;
  1099. }
  1100. if (header.codec < FF_ARRAY_ELEMS(v1_decoders)) {
  1101. if ((ret = v1_decoders[header.codec](ctx))) {
  1102. av_log(avctx, AV_LOG_ERROR,
  1103. "subcodec %d: error decoding frame\n", header.codec);
  1104. return ret;
  1105. }
  1106. } else {
  1107. av_log_ask_for_sample(avctx, "subcodec %d is not implemented\n",
  1108. header.codec);
  1109. return AVERROR_PATCHWELCOME;
  1110. }
  1111. if ((ret = copy_output(ctx, &header)))
  1112. return ret;
  1113. }
  1114. if (ctx->rotate_code)
  1115. rotate_bufs(ctx, ctx->rotate_code);
  1116. *got_frame_ptr = 1;
  1117. *(AVFrame*)data = *ctx->output;
  1118. return pkt->size;
  1119. }
  1120. AVCodec ff_sanm_decoder = {
  1121. .name = "sanm",
  1122. .type = AVMEDIA_TYPE_VIDEO,
  1123. .id = AV_CODEC_ID_SANM,
  1124. .priv_data_size = sizeof(SANMVideoContext),
  1125. .init = decode_init,
  1126. .close = decode_end,
  1127. .decode = decode_frame,
  1128. .capabilities = CODEC_CAP_DR1,
  1129. .long_name = NULL_IF_CONFIG_SMALL("LucasArts SMUSH video"),
  1130. };