You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1292 lines
40KB

  1. /*
  2. * LucasArts Smush video decoder
  3. * Copyright (c) 2006 Cyril Zorin
  4. * Copyright (c) 2011 Konstantin Shishkov
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. // #define DEBUG 1
  23. #include "avcodec.h"
  24. #include "bytestream.h"
  25. #include "internal.h"
  26. #include "libavutil/bswap.h"
  27. #include "libavutil/imgutils.h"
  28. #include "libavcodec/dsputil.h"
  29. #include "sanm_data.h"
  30. #define NGLYPHS 256
  31. typedef struct {
  32. AVCodecContext *avctx;
  33. GetByteContext gb;
  34. int version, subversion;
  35. uint32_t pal[256];
  36. int16_t delta_pal[768];
  37. int pitch;
  38. int width, height;
  39. int aligned_width, aligned_height;
  40. int prev_seq;
  41. AVFrame frame, *output;
  42. uint16_t *frm0, *frm1, *frm2;
  43. uint8_t *stored_frame;
  44. uint32_t frm0_size, frm1_size, frm2_size;
  45. uint32_t stored_frame_size;
  46. uint8_t *rle_buf;
  47. unsigned int rle_buf_size;
  48. int rotate_code;
  49. long npixels, buf_size;
  50. uint16_t codebook[256];
  51. uint16_t small_codebook[4];
  52. int8_t p4x4glyphs[NGLYPHS][16];
  53. int8_t p8x8glyphs[NGLYPHS][64];
  54. } SANMVideoContext;
  55. typedef struct {
  56. int seq_num, codec, rotate_code, rle_output_size;
  57. uint16_t bg_color;
  58. uint32_t width, height;
  59. } SANMFrameHeader;
  60. enum GlyphEdge {
  61. LEFT_EDGE,
  62. TOP_EDGE,
  63. RIGHT_EDGE,
  64. BOTTOM_EDGE,
  65. NO_EDGE
  66. };
  67. enum GlyphDir {
  68. DIR_LEFT,
  69. DIR_UP,
  70. DIR_RIGHT,
  71. DIR_DOWN,
  72. NO_DIR
  73. };
  74. /**
  75. * Return enum GlyphEdge of box where point (x, y) lies.
  76. *
  77. * @param x x point coordinate
  78. * @param y y point coordinate
  79. * @param edge_size box width/height.
  80. */
  81. static enum GlyphEdge which_edge(int x, int y, int edge_size)
  82. {
  83. const int edge_max = edge_size - 1;
  84. if (!y) {
  85. return BOTTOM_EDGE;
  86. } else if (y == edge_max) {
  87. return TOP_EDGE;
  88. } else if (!x) {
  89. return LEFT_EDGE;
  90. } else if (x == edge_max) {
  91. return RIGHT_EDGE;
  92. } else {
  93. return NO_EDGE;
  94. }
  95. }
  96. static enum GlyphDir which_direction(enum GlyphEdge edge0, enum GlyphEdge edge1)
  97. {
  98. if ((edge0 == LEFT_EDGE && edge1 == RIGHT_EDGE) ||
  99. (edge1 == LEFT_EDGE && edge0 == RIGHT_EDGE) ||
  100. (edge0 == BOTTOM_EDGE && edge1 != TOP_EDGE) ||
  101. (edge1 == BOTTOM_EDGE && edge0 != TOP_EDGE)) {
  102. return DIR_UP;
  103. } else if ((edge0 == TOP_EDGE && edge1 != BOTTOM_EDGE) ||
  104. (edge1 == TOP_EDGE && edge0 != BOTTOM_EDGE)) {
  105. return DIR_DOWN;
  106. } else if ((edge0 == LEFT_EDGE && edge1 != RIGHT_EDGE) ||
  107. (edge1 == LEFT_EDGE && edge0 != RIGHT_EDGE)) {
  108. return DIR_LEFT;
  109. } else if ((edge0 == TOP_EDGE && edge1 == BOTTOM_EDGE) ||
  110. (edge1 == TOP_EDGE && edge0 == BOTTOM_EDGE) ||
  111. (edge0 == RIGHT_EDGE && edge1 != LEFT_EDGE) ||
  112. (edge1 == RIGHT_EDGE && edge0 != LEFT_EDGE)) {
  113. return DIR_RIGHT;
  114. }
  115. return NO_DIR;
  116. }
  117. /**
  118. * Interpolate two points.
  119. */
  120. static void interp_point(int8_t *points, int x0, int y0, int x1, int y1,
  121. int pos, int npoints)
  122. {
  123. if (npoints) {
  124. points[0] = (x0 * pos + x1 * (npoints - pos) + (npoints >> 1)) / npoints;
  125. points[1] = (y0 * pos + y1 * (npoints - pos) + (npoints >> 1)) / npoints;
  126. } else {
  127. points[0] = x0;
  128. points[1] = y0;
  129. }
  130. }
  131. /**
  132. * Construct glyphs by iterating through vectors coordinates.
  133. *
  134. * @param pglyphs pointer to table where glyphs are stored
  135. * @param xvec pointer to x component of vectors coordinates
  136. * @param yvec pointer to y component of vectors coordinates
  137. * @param side_length glyph width/height.
  138. */
  139. static void make_glyphs(int8_t *pglyphs, const int8_t *xvec, const int8_t *yvec,
  140. const int side_length)
  141. {
  142. const int glyph_size = side_length * side_length;
  143. int8_t *pglyph = pglyphs;
  144. int i, j;
  145. for (i = 0; i < GLYPH_COORD_VECT_SIZE; i++) {
  146. int x0 = xvec[i];
  147. int y0 = yvec[i];
  148. enum GlyphEdge edge0 = which_edge(x0, y0, side_length);
  149. for (j = 0; j < GLYPH_COORD_VECT_SIZE; j++, pglyph += glyph_size) {
  150. int x1 = xvec[j];
  151. int y1 = yvec[j];
  152. enum GlyphEdge edge1 = which_edge(x1, y1, side_length);
  153. enum GlyphDir dir = which_direction(edge0, edge1);
  154. int npoints = FFMAX(FFABS(x1 - x0), FFABS(y1 - y0));
  155. int ipoint;
  156. for (ipoint = 0; ipoint <= npoints; ipoint++) {
  157. int8_t point[2];
  158. int irow, icol;
  159. interp_point(point, x0, y0, x1, y1, ipoint, npoints);
  160. switch (dir) {
  161. case DIR_UP:
  162. for (irow = point[1]; irow >= 0; irow--)
  163. pglyph[point[0] + irow * side_length] = 1;
  164. break;
  165. case DIR_DOWN:
  166. for (irow = point[1]; irow < side_length; irow++)
  167. pglyph[point[0] + irow * side_length] = 1;
  168. break;
  169. case DIR_LEFT:
  170. for (icol = point[0]; icol >= 0; icol--)
  171. pglyph[icol + point[1] * side_length] = 1;
  172. break;
  173. case DIR_RIGHT:
  174. for (icol = point[0]; icol < side_length; icol++)
  175. pglyph[icol + point[1] * side_length] = 1;
  176. break;
  177. }
  178. }
  179. }
  180. }
  181. }
  182. static void init_sizes(SANMVideoContext *ctx, int width, int height)
  183. {
  184. ctx->width = width;
  185. ctx->height = height;
  186. ctx->npixels = width * height;
  187. ctx->aligned_width = FFALIGN(width, 8);
  188. ctx->aligned_height = FFALIGN(height, 8);
  189. ctx->buf_size = ctx->aligned_width * ctx->aligned_height * sizeof(ctx->frm0[0]);
  190. ctx->pitch = width;
  191. }
  192. static void destroy_buffers(SANMVideoContext *ctx)
  193. {
  194. av_freep(&ctx->frm0);
  195. av_freep(&ctx->frm1);
  196. av_freep(&ctx->frm2);
  197. av_freep(&ctx->stored_frame);
  198. av_freep(&ctx->rle_buf);
  199. }
  200. static av_cold int init_buffers(SANMVideoContext *ctx)
  201. {
  202. av_fast_padded_malloc(&ctx->frm0, &ctx->frm0_size, ctx->buf_size);
  203. av_fast_padded_malloc(&ctx->frm1, &ctx->frm1_size, ctx->buf_size);
  204. av_fast_padded_malloc(&ctx->frm2, &ctx->frm2_size, ctx->buf_size);
  205. if (!ctx->version)
  206. av_fast_padded_malloc(&ctx->stored_frame, &ctx->stored_frame_size, ctx->buf_size);
  207. if (!ctx->frm0 || !ctx->frm1 || !ctx->frm2 || (!ctx->stored_frame && !ctx->version)) {
  208. destroy_buffers(ctx);
  209. return AVERROR(ENOMEM);
  210. }
  211. return 0;
  212. }
  213. static void rotate_bufs(SANMVideoContext *ctx, int rotate_code)
  214. {
  215. av_dlog(ctx->avctx, "rotate %d\n", rotate_code);
  216. if (rotate_code == 2)
  217. FFSWAP(uint16_t*, ctx->frm1, ctx->frm2);
  218. FFSWAP(uint16_t*, ctx->frm2, ctx->frm0);
  219. }
  220. static av_cold int decode_init(AVCodecContext *avctx)
  221. {
  222. SANMVideoContext *ctx = avctx->priv_data;
  223. ctx->avctx = avctx;
  224. ctx->version = !avctx->extradata_size;
  225. avctx->pix_fmt = ctx->version ? AV_PIX_FMT_RGB565 : AV_PIX_FMT_PAL8;
  226. init_sizes(ctx, avctx->width, avctx->height);
  227. if (init_buffers(ctx)) {
  228. av_log(avctx, AV_LOG_ERROR, "error allocating buffers\n");
  229. return AVERROR(ENOMEM);
  230. }
  231. ctx->output = &ctx->frame;
  232. ctx->output->data[0] = 0;
  233. make_glyphs(ctx->p4x4glyphs[0], glyph4_x, glyph4_y, 4);
  234. make_glyphs(ctx->p8x8glyphs[0], glyph8_x, glyph8_y, 8);
  235. if (!ctx->version) {
  236. int i;
  237. if (avctx->extradata_size < 1026) {
  238. av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
  239. return AVERROR_INVALIDDATA;
  240. }
  241. ctx->subversion = AV_RL16(avctx->extradata);
  242. for (i = 0; i < 256; i++)
  243. ctx->pal[i] = 0xFFU << 24 | AV_RL32(avctx->extradata + 2 + i * 4);
  244. }
  245. return 0;
  246. }
  247. static av_cold int decode_end(AVCodecContext *avctx)
  248. {
  249. SANMVideoContext *ctx = avctx->priv_data;
  250. destroy_buffers(ctx);
  251. if (ctx->frame.data[0]) {
  252. avctx->release_buffer(avctx, &ctx->frame);
  253. ctx->frame.data[0] = 0;
  254. }
  255. return 0;
  256. }
  257. static int rle_decode(SANMVideoContext *ctx, uint8_t *dst, const int out_size)
  258. {
  259. int opcode, color, run_len, left = out_size;
  260. while (left > 0) {
  261. opcode = bytestream2_get_byte(&ctx->gb);
  262. run_len = (opcode >> 1) + 1;
  263. if (run_len > left || bytestream2_get_bytes_left(&ctx->gb) <= 0)
  264. return AVERROR_INVALIDDATA;
  265. if (opcode & 1) {
  266. color = bytestream2_get_byte(&ctx->gb);
  267. memset(dst, color, run_len);
  268. } else {
  269. if (bytestream2_get_bytes_left(&ctx->gb) < run_len)
  270. return AVERROR_INVALIDDATA;
  271. bytestream2_get_bufferu(&ctx->gb, dst, run_len);
  272. }
  273. dst += run_len;
  274. left -= run_len;
  275. }
  276. return 0;
  277. }
  278. static int old_codec1(SANMVideoContext *ctx, int top,
  279. int left, int width, int height)
  280. {
  281. uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * ctx->pitch;
  282. int i, j, len, flag, code, val, pos, end;
  283. for (i = 0; i < height; i++) {
  284. pos = 0;
  285. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  286. return AVERROR_INVALIDDATA;
  287. len = bytestream2_get_le16u(&ctx->gb);
  288. end = bytestream2_tell(&ctx->gb) + len;
  289. while (bytestream2_tell(&ctx->gb) < end) {
  290. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  291. return AVERROR_INVALIDDATA;
  292. code = bytestream2_get_byteu(&ctx->gb);
  293. flag = code & 1;
  294. code = (code >> 1) + 1;
  295. if (pos + code > width)
  296. return AVERROR_INVALIDDATA;
  297. if (flag) {
  298. val = bytestream2_get_byteu(&ctx->gb);
  299. if (val)
  300. memset(dst + pos, val, code);
  301. pos += code;
  302. } else {
  303. if (bytestream2_get_bytes_left(&ctx->gb) < code)
  304. return AVERROR_INVALIDDATA;
  305. for (j = 0; j < code; j++) {
  306. val = bytestream2_get_byteu(&ctx->gb);
  307. if (val)
  308. dst[pos] = val;
  309. pos++;
  310. }
  311. }
  312. }
  313. dst += ctx->pitch;
  314. }
  315. ctx->rotate_code = 0;
  316. return 0;
  317. }
  318. static inline void codec37_mv(uint8_t *dst, const uint8_t *src,
  319. int height, int stride, int x, int y)
  320. {
  321. int pos, i, j;
  322. pos = x + y * stride;
  323. for (j = 0; j < 4; j++) {
  324. for (i = 0; i < 4; i++) {
  325. if ((pos + i) < 0 || (pos + i) >= height * stride)
  326. dst[i] = 0;
  327. else
  328. dst[i] = src[i];
  329. }
  330. dst += stride;
  331. src += stride;
  332. pos += stride;
  333. }
  334. }
  335. static int old_codec37(SANMVideoContext *ctx, int top,
  336. int left, int width, int height)
  337. {
  338. int stride = ctx->pitch;
  339. int i, j, k, t;
  340. int skip_run = 0;
  341. int compr, mvoff, seq, flags;
  342. uint32_t decoded_size;
  343. uint8_t *dst, *prev;
  344. compr = bytestream2_get_byte(&ctx->gb);
  345. mvoff = bytestream2_get_byte(&ctx->gb);
  346. seq = bytestream2_get_le16(&ctx->gb);
  347. decoded_size = bytestream2_get_le32(&ctx->gb);
  348. bytestream2_skip(&ctx->gb, 4);
  349. flags = bytestream2_get_byte(&ctx->gb);
  350. bytestream2_skip(&ctx->gb, 3);
  351. ctx->rotate_code = 0;
  352. if (((seq & 1) || !(flags & 1)) && (compr && compr != 2))
  353. rotate_bufs(ctx, 1);
  354. dst = ((uint8_t*)ctx->frm0) + left + top * stride;
  355. prev = ((uint8_t*)ctx->frm2) + left + top * stride;
  356. if (mvoff > 2) {
  357. av_log(ctx->avctx, AV_LOG_ERROR, "invalid motion base value %d\n", mvoff);
  358. return AVERROR_INVALIDDATA;
  359. }
  360. av_dlog(ctx->avctx, "compression %d\n", compr);
  361. switch (compr) {
  362. case 0:
  363. for (i = 0; i < height; i++) {
  364. bytestream2_get_buffer(&ctx->gb, dst, width);
  365. dst += stride;
  366. }
  367. memset(ctx->frm1, 0, ctx->height * stride);
  368. memset(ctx->frm2, 0, ctx->height * stride);
  369. break;
  370. case 2:
  371. if (rle_decode(ctx, dst, decoded_size))
  372. return AVERROR_INVALIDDATA;
  373. memset(ctx->frm1, 0, ctx->frm1_size);
  374. memset(ctx->frm2, 0, ctx->frm2_size);
  375. break;
  376. case 3:
  377. case 4:
  378. if (flags & 4) {
  379. for (j = 0; j < height; j += 4) {
  380. for (i = 0; i < width; i += 4) {
  381. int code;
  382. if (skip_run) {
  383. skip_run--;
  384. copy_block4(dst + i, prev + i, stride, stride, 4);
  385. continue;
  386. }
  387. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  388. return AVERROR_INVALIDDATA;
  389. code = bytestream2_get_byteu(&ctx->gb);
  390. switch (code) {
  391. case 0xFF:
  392. if (bytestream2_get_bytes_left(&ctx->gb) < 16)
  393. return AVERROR_INVALIDDATA;
  394. for (k = 0; k < 4; k++)
  395. bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
  396. break;
  397. case 0xFE:
  398. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  399. return AVERROR_INVALIDDATA;
  400. for (k = 0; k < 4; k++)
  401. memset(dst + i + k * stride, bytestream2_get_byteu(&ctx->gb), 4);
  402. break;
  403. case 0xFD:
  404. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  405. return AVERROR_INVALIDDATA;
  406. t = bytestream2_get_byteu(&ctx->gb);
  407. for (k = 0; k < 4; k++)
  408. memset(dst + i + k * stride, t, 4);
  409. break;
  410. default:
  411. if (compr == 4 && !code) {
  412. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  413. return AVERROR_INVALIDDATA;
  414. skip_run = bytestream2_get_byteu(&ctx->gb) + 1;
  415. i -= 4;
  416. } else {
  417. int mx, my;
  418. mx = c37_mv[(mvoff * 255 + code) * 2 ];
  419. my = c37_mv[(mvoff * 255 + code) * 2 + 1];
  420. codec37_mv(dst + i, prev + i + mx + my * stride,
  421. ctx->height, stride, i + mx, j + my);
  422. }
  423. }
  424. }
  425. dst += stride * 4;
  426. prev += stride * 4;
  427. }
  428. } else {
  429. for (j = 0; j < height; j += 4) {
  430. for (i = 0; i < width; i += 4) {
  431. int code;
  432. if (skip_run) {
  433. skip_run--;
  434. copy_block4(dst + i, prev + i, stride, stride, 4);
  435. continue;
  436. }
  437. code = bytestream2_get_byte(&ctx->gb);
  438. if (code == 0xFF) {
  439. if (bytestream2_get_bytes_left(&ctx->gb) < 16)
  440. return AVERROR_INVALIDDATA;
  441. for (k = 0; k < 4; k++)
  442. bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
  443. } else if (compr == 4 && !code) {
  444. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  445. return AVERROR_INVALIDDATA;
  446. skip_run = bytestream2_get_byteu(&ctx->gb) + 1;
  447. i -= 4;
  448. } else {
  449. int mx, my;
  450. mx = c37_mv[(mvoff * 255 + code) * 2];
  451. my = c37_mv[(mvoff * 255 + code) * 2 + 1];
  452. codec37_mv(dst + i, prev + i + mx + my * stride,
  453. ctx->height, stride, i + mx, j + my);
  454. }
  455. }
  456. dst += stride * 4;
  457. prev += stride * 4;
  458. }
  459. }
  460. break;
  461. default:
  462. av_log(ctx->avctx, AV_LOG_ERROR,
  463. "subcodec 37 compression %d not implemented\n", compr);
  464. return AVERROR_PATCHWELCOME;
  465. }
  466. return 0;
  467. }
  468. static int process_block(SANMVideoContext *ctx, uint8_t *dst, uint8_t *prev1,
  469. uint8_t *prev2, int stride, int tbl, int size)
  470. {
  471. int code, k, t;
  472. uint8_t colors[2];
  473. int8_t *pglyph;
  474. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  475. return AVERROR_INVALIDDATA;
  476. code = bytestream2_get_byteu(&ctx->gb);
  477. if (code >= 0xF8) {
  478. switch (code) {
  479. case 0xFF:
  480. if (size == 2) {
  481. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  482. return AVERROR_INVALIDDATA;
  483. dst[0] = bytestream2_get_byteu(&ctx->gb);
  484. dst[1] = bytestream2_get_byteu(&ctx->gb);
  485. dst[0+stride] = bytestream2_get_byteu(&ctx->gb);
  486. dst[1+stride] = bytestream2_get_byteu(&ctx->gb);
  487. } else {
  488. size >>= 1;
  489. if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
  490. return AVERROR_INVALIDDATA;
  491. if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
  492. stride, tbl, size))
  493. return AVERROR_INVALIDDATA;
  494. dst += size * stride;
  495. prev1 += size * stride;
  496. prev2 += size * stride;
  497. if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
  498. return AVERROR_INVALIDDATA;
  499. if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
  500. stride, tbl, size))
  501. return AVERROR_INVALIDDATA;
  502. }
  503. break;
  504. case 0xFE:
  505. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  506. return AVERROR_INVALIDDATA;
  507. t = bytestream2_get_byteu(&ctx->gb);
  508. for (k = 0; k < size; k++)
  509. memset(dst + k * stride, t, size);
  510. break;
  511. case 0xFD:
  512. if (bytestream2_get_bytes_left(&ctx->gb) < 3)
  513. return AVERROR_INVALIDDATA;
  514. code = bytestream2_get_byteu(&ctx->gb);
  515. pglyph = (size == 8) ? ctx->p8x8glyphs[code] : ctx->p4x4glyphs[code];
  516. bytestream2_get_bufferu(&ctx->gb, colors, 2);
  517. for (k = 0; k < size; k++)
  518. for (t = 0; t < size; t++)
  519. dst[t + k * stride] = colors[!*pglyph++];
  520. break;
  521. case 0xFC:
  522. for (k = 0; k < size; k++)
  523. memcpy(dst + k * stride, prev1 + k * stride, size);
  524. break;
  525. default:
  526. k = bytestream2_tell(&ctx->gb);
  527. bytestream2_seek(&ctx->gb, tbl + (code & 7), SEEK_SET);
  528. t = bytestream2_get_byte(&ctx->gb);
  529. bytestream2_seek(&ctx->gb, k, SEEK_SET);
  530. for (k = 0; k < size; k++)
  531. memset(dst + k * stride, t, size);
  532. }
  533. } else {
  534. int mx = motion_vectors[code][0];
  535. int my = motion_vectors[code][1];
  536. for (k = 0; k < size; k++)
  537. memcpy(dst + k * stride, prev2 + mx + (my + k) * stride, size);
  538. }
  539. return 0;
  540. }
  541. static int old_codec47(SANMVideoContext *ctx, int top,
  542. int left, int width, int height)
  543. {
  544. int i, j, seq, compr, new_rot, tbl_pos, skip;
  545. int stride = ctx->pitch;
  546. uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * stride;
  547. uint8_t *prev1 = (uint8_t*)ctx->frm1;
  548. uint8_t *prev2 = (uint8_t*)ctx->frm2;
  549. uint32_t decoded_size;
  550. tbl_pos = bytestream2_tell(&ctx->gb);
  551. seq = bytestream2_get_le16(&ctx->gb);
  552. compr = bytestream2_get_byte(&ctx->gb);
  553. new_rot = bytestream2_get_byte(&ctx->gb);
  554. skip = bytestream2_get_byte(&ctx->gb);
  555. bytestream2_skip(&ctx->gb, 9);
  556. decoded_size = bytestream2_get_le32(&ctx->gb);
  557. bytestream2_skip(&ctx->gb, 8);
  558. if (decoded_size > height * stride - left - top * stride) {
  559. decoded_size = height * stride - left - top * stride;
  560. av_log(ctx->avctx, AV_LOG_WARNING, "decoded size is too large\n");
  561. }
  562. if (skip & 1)
  563. bytestream2_skip(&ctx->gb, 0x8080);
  564. if (!seq) {
  565. ctx->prev_seq = -1;
  566. memset(prev1, 0, ctx->height * stride);
  567. memset(prev2, 0, ctx->height * stride);
  568. }
  569. av_dlog(ctx->avctx, "compression %d\n", compr);
  570. switch (compr) {
  571. case 0:
  572. if (bytestream2_get_bytes_left(&ctx->gb) < width * height)
  573. return AVERROR_INVALIDDATA;
  574. for (j = 0; j < height; j++) {
  575. bytestream2_get_bufferu(&ctx->gb, dst, width);
  576. dst += stride;
  577. }
  578. break;
  579. case 1:
  580. if (bytestream2_get_bytes_left(&ctx->gb) < ((width + 1) >> 1) * ((height + 1) >> 1))
  581. return AVERROR_INVALIDDATA;
  582. for (j = 0; j < height; j += 2) {
  583. for (i = 0; i < width; i += 2) {
  584. dst[i] = dst[i + 1] =
  585. dst[stride + i] = dst[stride + i + 1] = bytestream2_get_byteu(&ctx->gb);
  586. }
  587. dst += stride * 2;
  588. }
  589. break;
  590. case 2:
  591. if (seq == ctx->prev_seq + 1) {
  592. for (j = 0; j < height; j += 8) {
  593. for (i = 0; i < width; i += 8) {
  594. if (process_block(ctx, dst + i, prev1 + i, prev2 + i, stride,
  595. tbl_pos + 8, 8))
  596. return AVERROR_INVALIDDATA;
  597. }
  598. dst += stride * 8;
  599. prev1 += stride * 8;
  600. prev2 += stride * 8;
  601. }
  602. }
  603. break;
  604. case 3:
  605. memcpy(ctx->frm0, ctx->frm2, ctx->pitch * ctx->height);
  606. break;
  607. case 4:
  608. memcpy(ctx->frm0, ctx->frm1, ctx->pitch * ctx->height);
  609. break;
  610. case 5:
  611. if (rle_decode(ctx, dst, decoded_size))
  612. return AVERROR_INVALIDDATA;
  613. break;
  614. default:
  615. av_log(ctx->avctx, AV_LOG_ERROR,
  616. "subcodec 47 compression %d not implemented\n", compr);
  617. return AVERROR_PATCHWELCOME;
  618. }
  619. if (seq == ctx->prev_seq + 1)
  620. ctx->rotate_code = new_rot;
  621. else
  622. ctx->rotate_code = 0;
  623. ctx->prev_seq = seq;
  624. return 0;
  625. }
  626. static int process_frame_obj(SANMVideoContext *ctx)
  627. {
  628. uint16_t codec, top, left, w, h;
  629. codec = bytestream2_get_le16u(&ctx->gb);
  630. left = bytestream2_get_le16u(&ctx->gb);
  631. top = bytestream2_get_le16u(&ctx->gb);
  632. w = bytestream2_get_le16u(&ctx->gb);
  633. h = bytestream2_get_le16u(&ctx->gb);
  634. if (ctx->width < left + w || ctx->height < top + h) {
  635. if (av_image_check_size(FFMAX(left + w, ctx->width),
  636. FFMAX(top + h, ctx->height), 0, ctx->avctx) < 0)
  637. return AVERROR_INVALIDDATA;
  638. avcodec_set_dimensions(ctx->avctx, FFMAX(left + w, ctx->width),
  639. FFMAX(top + h, ctx->height));
  640. init_sizes(ctx, left + w, top + h);
  641. if (init_buffers(ctx)) {
  642. av_log(ctx->avctx, AV_LOG_ERROR, "error resizing buffers\n");
  643. return AVERROR(ENOMEM);
  644. }
  645. }
  646. bytestream2_skip(&ctx->gb, 4);
  647. av_dlog(ctx->avctx, "subcodec %d\n", codec);
  648. switch (codec) {
  649. case 1:
  650. case 3:
  651. return old_codec1(ctx, top, left, w, h);
  652. break;
  653. case 37:
  654. return old_codec37(ctx, top, left, w, h);
  655. break;
  656. case 47:
  657. return old_codec47(ctx, top, left, w, h);
  658. break;
  659. default:
  660. av_log_ask_for_sample(ctx->avctx, "unknown subcodec %d\n", codec);
  661. return AVERROR_PATCHWELCOME;
  662. }
  663. }
  664. static int decode_0(SANMVideoContext *ctx)
  665. {
  666. uint16_t *frm = ctx->frm0;
  667. int x, y;
  668. if (bytestream2_get_bytes_left(&ctx->gb) < ctx->width * ctx->height * 2) {
  669. av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for raw frame\n");
  670. return AVERROR_INVALIDDATA;
  671. }
  672. for (y = 0; y < ctx->height; y++) {
  673. for (x = 0; x < ctx->width; x++)
  674. frm[x] = bytestream2_get_le16u(&ctx->gb);
  675. frm += ctx->pitch;
  676. }
  677. return 0;
  678. }
  679. static int decode_nop(SANMVideoContext *ctx)
  680. {
  681. av_log_ask_for_sample(ctx->avctx, "unknown/unsupported compression type\n");
  682. return AVERROR_PATCHWELCOME;
  683. }
  684. static void copy_block(uint16_t *pdest, uint16_t *psrc, int block_size, int pitch)
  685. {
  686. uint8_t *dst = (uint8_t *)pdest;
  687. uint8_t *src = (uint8_t *)psrc;
  688. int stride = pitch * 2;
  689. switch (block_size) {
  690. case 2:
  691. copy_block4(dst, src, stride, stride, 2);
  692. break;
  693. case 4:
  694. copy_block8(dst, src, stride, stride, 4);
  695. break;
  696. case 8:
  697. copy_block16(dst, src, stride, stride, 8);
  698. break;
  699. }
  700. }
  701. static void fill_block(uint16_t *pdest, uint16_t color, int block_size, int pitch)
  702. {
  703. int x, y;
  704. pitch -= block_size;
  705. for (y = 0; y < block_size; y++, pdest += pitch)
  706. for (x = 0; x < block_size; x++)
  707. *pdest++ = color;
  708. }
  709. static int draw_glyph(SANMVideoContext *ctx, uint16_t *dst, int index, uint16_t fg_color,
  710. uint16_t bg_color, int block_size, int pitch)
  711. {
  712. int8_t *pglyph;
  713. uint16_t colors[2] = { fg_color, bg_color };
  714. int x, y;
  715. if (index >= NGLYPHS) {
  716. av_log(ctx->avctx, AV_LOG_ERROR, "ignoring nonexistent glyph #%u\n", index);
  717. return AVERROR_INVALIDDATA;
  718. }
  719. pglyph = block_size == 8 ? ctx->p8x8glyphs[index] : ctx->p4x4glyphs[index];
  720. pitch -= block_size;
  721. for (y = 0; y < block_size; y++, dst += pitch)
  722. for (x = 0; x < block_size; x++)
  723. *dst++ = colors[*pglyph++];
  724. return 0;
  725. }
  726. static int opcode_0xf7(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
  727. {
  728. uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
  729. if (block_size == 2) {
  730. uint32_t indices;
  731. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  732. return AVERROR_INVALIDDATA;
  733. indices = bytestream2_get_le32u(&ctx->gb);
  734. dst[0] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  735. dst[1] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  736. dst[pitch] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  737. dst[pitch + 1] = ctx->codebook[indices & 0xFF];
  738. } else {
  739. uint16_t fgcolor, bgcolor;
  740. int glyph;
  741. if (bytestream2_get_bytes_left(&ctx->gb) < 3)
  742. return AVERROR_INVALIDDATA;
  743. glyph = bytestream2_get_byteu(&ctx->gb);
  744. bgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  745. fgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  746. draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
  747. }
  748. return 0;
  749. }
  750. static int opcode_0xf8(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
  751. {
  752. uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
  753. if (block_size == 2) {
  754. if (bytestream2_get_bytes_left(&ctx->gb) < 8)
  755. return AVERROR_INVALIDDATA;
  756. dst[0] = bytestream2_get_le16u(&ctx->gb);
  757. dst[1] = bytestream2_get_le16u(&ctx->gb);
  758. dst[pitch] = bytestream2_get_le16u(&ctx->gb);
  759. dst[pitch + 1] = bytestream2_get_le16u(&ctx->gb);
  760. } else {
  761. uint16_t fgcolor, bgcolor;
  762. int glyph;
  763. if (bytestream2_get_bytes_left(&ctx->gb) < 5)
  764. return AVERROR_INVALIDDATA;
  765. glyph = bytestream2_get_byteu(&ctx->gb);
  766. bgcolor = bytestream2_get_le16u(&ctx->gb);
  767. fgcolor = bytestream2_get_le16u(&ctx->gb);
  768. draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
  769. }
  770. return 0;
  771. }
  772. static int good_mvec(SANMVideoContext *ctx, int cx, int cy, int mx, int my,
  773. int block_size)
  774. {
  775. int start_pos = cx + mx + (cy + my) * ctx->pitch;
  776. int end_pos = start_pos + (block_size - 1) * (ctx->pitch + 1);
  777. int good = start_pos >= 0 && end_pos < (ctx->buf_size >> 1);
  778. if (!good) {
  779. av_log(ctx->avctx, AV_LOG_ERROR, "ignoring invalid motion vector (%i, %i)->(%u, %u), block size = %u\n",
  780. cx + mx, cy + my, cx, cy, block_size);
  781. }
  782. return good;
  783. }
  784. static int codec2subblock(SANMVideoContext *ctx, int cx, int cy, int blk_size)
  785. {
  786. int16_t mx, my, index;
  787. int opcode;
  788. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  789. return AVERROR_INVALIDDATA;
  790. opcode = bytestream2_get_byteu(&ctx->gb);
  791. av_dlog(ctx->avctx, "opcode 0x%0X cx %d cy %d blk %d\n", opcode, cx, cy, blk_size);
  792. switch (opcode) {
  793. default:
  794. mx = motion_vectors[opcode][0];
  795. my = motion_vectors[opcode][1];
  796. if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
  797. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  798. ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
  799. blk_size, ctx->pitch);
  800. }
  801. break;
  802. case 0xF5:
  803. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  804. return AVERROR_INVALIDDATA;
  805. index = bytestream2_get_le16u(&ctx->gb);
  806. mx = index % ctx->width;
  807. my = index / ctx->width;
  808. if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
  809. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  810. ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
  811. blk_size, ctx->pitch);
  812. }
  813. break;
  814. case 0xF6:
  815. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  816. ctx->frm1 + cx + ctx->pitch * cy,
  817. blk_size, ctx->pitch);
  818. break;
  819. case 0xF7:
  820. opcode_0xf7(ctx, cx, cy, blk_size, ctx->pitch);
  821. break;
  822. case 0xF8:
  823. opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
  824. break;
  825. case 0xF9:
  826. case 0xFA:
  827. case 0xFB:
  828. case 0xFC:
  829. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  830. ctx->small_codebook[opcode - 0xf9], blk_size, ctx->pitch);
  831. break;
  832. case 0xFD:
  833. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  834. return AVERROR_INVALIDDATA;
  835. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  836. ctx->codebook[bytestream2_get_byteu(&ctx->gb)], blk_size, ctx->pitch);
  837. break;
  838. case 0xFE:
  839. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  840. return AVERROR_INVALIDDATA;
  841. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  842. bytestream2_get_le16u(&ctx->gb), blk_size, ctx->pitch);
  843. break;
  844. case 0xFF:
  845. if (blk_size == 2) {
  846. opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
  847. } else {
  848. blk_size >>= 1;
  849. if (codec2subblock(ctx, cx , cy , blk_size))
  850. return AVERROR_INVALIDDATA;
  851. if (codec2subblock(ctx, cx + blk_size, cy , blk_size))
  852. return AVERROR_INVALIDDATA;
  853. if (codec2subblock(ctx, cx , cy + blk_size, blk_size))
  854. return AVERROR_INVALIDDATA;
  855. if (codec2subblock(ctx, cx + blk_size, cy + blk_size, blk_size))
  856. return AVERROR_INVALIDDATA;
  857. }
  858. break;
  859. }
  860. return 0;
  861. }
  862. static int decode_2(SANMVideoContext *ctx)
  863. {
  864. int cx, cy, ret;
  865. for (cy = 0; cy < ctx->aligned_height; cy += 8) {
  866. for (cx = 0; cx < ctx->aligned_width; cx += 8) {
  867. if (ret = codec2subblock(ctx, cx, cy, 8))
  868. return ret;
  869. }
  870. }
  871. return 0;
  872. }
  873. static int decode_3(SANMVideoContext *ctx)
  874. {
  875. memcpy(ctx->frm0, ctx->frm2, ctx->frm2_size);
  876. return 0;
  877. }
  878. static int decode_4(SANMVideoContext *ctx)
  879. {
  880. memcpy(ctx->frm0, ctx->frm1, ctx->frm1_size);
  881. return 0;
  882. }
  883. static int decode_5(SANMVideoContext *ctx)
  884. {
  885. #if HAVE_BIGENDIAN
  886. uint16_t *frm;
  887. int npixels;
  888. #endif
  889. uint8_t *dst = (uint8_t*)ctx->frm0;
  890. if (rle_decode(ctx, dst, ctx->buf_size))
  891. return AVERROR_INVALIDDATA;
  892. #if HAVE_BIGENDIAN
  893. npixels = ctx->npixels;
  894. frm = ctx->frm0;
  895. while (npixels--)
  896. *frm++ = av_bswap16(*frm);
  897. #endif
  898. return 0;
  899. }
  900. static int decode_6(SANMVideoContext *ctx)
  901. {
  902. int npixels = ctx->npixels;
  903. uint16_t *frm = ctx->frm0;
  904. if (bytestream2_get_bytes_left(&ctx->gb) < npixels) {
  905. av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for frame\n");
  906. return AVERROR_INVALIDDATA;
  907. }
  908. while (npixels--)
  909. *frm++ = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  910. return 0;
  911. }
  912. static int decode_8(SANMVideoContext *ctx)
  913. {
  914. uint16_t *pdest = ctx->frm0;
  915. uint8_t *rsrc;
  916. long npixels = ctx->npixels;
  917. av_fast_malloc(&ctx->rle_buf, &ctx->rle_buf_size, npixels);
  918. if (!ctx->rle_buf) {
  919. av_log(ctx->avctx, AV_LOG_ERROR, "RLE buffer allocation failed\n");
  920. return AVERROR(ENOMEM);
  921. }
  922. rsrc = ctx->rle_buf;
  923. if (rle_decode(ctx, rsrc, npixels))
  924. return AVERROR_INVALIDDATA;
  925. while (npixels--)
  926. *pdest++ = ctx->codebook[*rsrc++];
  927. return 0;
  928. }
  929. typedef int (*frm_decoder)(SANMVideoContext *ctx);
  930. static const frm_decoder v1_decoders[] = {
  931. decode_0, decode_nop, decode_2, decode_3, decode_4, decode_5,
  932. decode_6, decode_nop, decode_8
  933. };
  934. static int read_frame_header(SANMVideoContext *ctx, SANMFrameHeader *hdr)
  935. {
  936. int i, ret;
  937. if ((ret = bytestream2_get_bytes_left(&ctx->gb)) < 560) {
  938. av_log(ctx->avctx, AV_LOG_ERROR, "too short input frame (%d bytes)\n",
  939. ret);
  940. return AVERROR_INVALIDDATA;
  941. }
  942. bytestream2_skip(&ctx->gb, 8); // skip pad
  943. hdr->width = bytestream2_get_le32u(&ctx->gb);
  944. hdr->height = bytestream2_get_le32u(&ctx->gb);
  945. if (hdr->width != ctx->width || hdr->height != ctx->height) {
  946. av_log(ctx->avctx, AV_LOG_ERROR, "variable size frames are not implemented\n");
  947. return AVERROR_PATCHWELCOME;
  948. }
  949. hdr->seq_num = bytestream2_get_le16u(&ctx->gb);
  950. hdr->codec = bytestream2_get_byteu(&ctx->gb);
  951. hdr->rotate_code = bytestream2_get_byteu(&ctx->gb);
  952. bytestream2_skip(&ctx->gb, 4); // skip pad
  953. for (i = 0; i < 4; i++)
  954. ctx->small_codebook[i] = bytestream2_get_le16u(&ctx->gb);
  955. hdr->bg_color = bytestream2_get_le16u(&ctx->gb);
  956. bytestream2_skip(&ctx->gb, 2); // skip pad
  957. hdr->rle_output_size = bytestream2_get_le32u(&ctx->gb);
  958. for (i = 0; i < 256; i++)
  959. ctx->codebook[i] = bytestream2_get_le16u(&ctx->gb);
  960. bytestream2_skip(&ctx->gb, 8); // skip pad
  961. av_dlog(ctx->avctx, "subcodec %d\n", hdr->codec);
  962. return 0;
  963. }
  964. static void fill_frame(uint16_t *pbuf, int buf_size, uint16_t color)
  965. {
  966. while (buf_size--)
  967. *pbuf++ = color;
  968. }
  969. static int copy_output(SANMVideoContext *ctx, SANMFrameHeader *hdr)
  970. {
  971. uint8_t *dst;
  972. const uint8_t *src = (uint8_t*) ctx->frm0;
  973. int ret, dstpitch, height = ctx->height;
  974. int srcpitch = ctx->pitch * (hdr ? sizeof(ctx->frm0[0]) : 1);
  975. if ((ret = ff_get_buffer(ctx->avctx, ctx->output)) < 0) {
  976. av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  977. return ret;
  978. }
  979. dst = ctx->output->data[0];
  980. dstpitch = ctx->output->linesize[0];
  981. while (height--) {
  982. memcpy(dst, src, srcpitch);
  983. src += srcpitch;
  984. dst += dstpitch;
  985. }
  986. return 0;
  987. }
  988. static int decode_frame(AVCodecContext *avctx, void *data,
  989. int *got_frame_ptr, AVPacket *pkt)
  990. {
  991. SANMVideoContext *ctx = avctx->priv_data;
  992. int i, ret;
  993. bytestream2_init(&ctx->gb, pkt->data, pkt->size);
  994. if (ctx->output->data[0])
  995. avctx->release_buffer(avctx, ctx->output);
  996. if (!ctx->version) {
  997. int to_store = 0;
  998. while (bytestream2_get_bytes_left(&ctx->gb) >= 8) {
  999. uint32_t sig, size;
  1000. int pos;
  1001. sig = bytestream2_get_be32u(&ctx->gb);
  1002. size = bytestream2_get_be32u(&ctx->gb);
  1003. pos = bytestream2_tell(&ctx->gb);
  1004. if (bytestream2_get_bytes_left(&ctx->gb) < size) {
  1005. av_log(avctx, AV_LOG_ERROR, "incorrect chunk size %d\n", size);
  1006. break;
  1007. }
  1008. switch (sig) {
  1009. case MKBETAG('N', 'P', 'A', 'L'):
  1010. if (size != 256 * 3) {
  1011. av_log(avctx, AV_LOG_ERROR, "incorrect palette block size %d\n",
  1012. size);
  1013. return AVERROR_INVALIDDATA;
  1014. }
  1015. for (i = 0; i < 256; i++)
  1016. ctx->pal[i] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->gb);
  1017. break;
  1018. case MKBETAG('F', 'O', 'B', 'J'):
  1019. if (size < 16)
  1020. return AVERROR_INVALIDDATA;
  1021. if (ret = process_frame_obj(ctx))
  1022. return ret;
  1023. break;
  1024. case MKBETAG('X', 'P', 'A', 'L'):
  1025. if (size == 6 || size == 4) {
  1026. uint8_t tmp[3];
  1027. int j;
  1028. for (i = 0; i < 256; i++) {
  1029. for (j = 0; j < 3; j++) {
  1030. int t = (ctx->pal[i] >> (16 - j * 8)) & 0xFF;
  1031. tmp[j] = av_clip_uint8((t * 129 + ctx->delta_pal[i * 3 + j]) >> 7);
  1032. }
  1033. ctx->pal[i] = 0xFFU << 24 | AV_RB24(tmp);
  1034. }
  1035. } else {
  1036. if (size < 768 * 2 + 4) {
  1037. av_log(avctx, AV_LOG_ERROR, "incorrect palette change block size %d\n",
  1038. size);
  1039. return AVERROR_INVALIDDATA;
  1040. }
  1041. bytestream2_skipu(&ctx->gb, 4);
  1042. for (i = 0; i < 768; i++)
  1043. ctx->delta_pal[i] = bytestream2_get_le16u(&ctx->gb);
  1044. if (size >= 768 * 5 + 4) {
  1045. for (i = 0; i < 256; i++)
  1046. ctx->pal[i] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->gb);
  1047. } else {
  1048. memset(ctx->pal, 0, sizeof(ctx->pal));
  1049. }
  1050. }
  1051. break;
  1052. case MKBETAG('S', 'T', 'O', 'R'):
  1053. to_store = 1;
  1054. break;
  1055. case MKBETAG('F', 'T', 'C', 'H'):
  1056. memcpy(ctx->frm0, ctx->stored_frame, ctx->buf_size);
  1057. break;
  1058. default:
  1059. bytestream2_skip(&ctx->gb, size);
  1060. av_log(avctx, AV_LOG_DEBUG, "unknown/unsupported chunk %x\n", sig);
  1061. break;
  1062. }
  1063. bytestream2_seek(&ctx->gb, pos + size, SEEK_SET);
  1064. if (size & 1)
  1065. bytestream2_skip(&ctx->gb, 1);
  1066. }
  1067. if (to_store)
  1068. memcpy(ctx->stored_frame, ctx->frm0, ctx->buf_size);
  1069. if ((ret = copy_output(ctx, NULL)))
  1070. return ret;
  1071. memcpy(ctx->output->data[1], ctx->pal, 1024);
  1072. } else {
  1073. SANMFrameHeader header;
  1074. if ((ret = read_frame_header(ctx, &header)))
  1075. return ret;
  1076. ctx->rotate_code = header.rotate_code;
  1077. if ((ctx->output->key_frame = !header.seq_num)) {
  1078. ctx->output->pict_type = AV_PICTURE_TYPE_I;
  1079. fill_frame(ctx->frm1, ctx->npixels, header.bg_color);
  1080. fill_frame(ctx->frm2, ctx->npixels, header.bg_color);
  1081. } else {
  1082. ctx->output->pict_type = AV_PICTURE_TYPE_P;
  1083. }
  1084. if (header.codec < FF_ARRAY_ELEMS(v1_decoders)) {
  1085. if ((ret = v1_decoders[header.codec](ctx))) {
  1086. av_log(avctx, AV_LOG_ERROR,
  1087. "subcodec %d: error decoding frame\n", header.codec);
  1088. return ret;
  1089. }
  1090. } else {
  1091. av_log_ask_for_sample(avctx, "subcodec %d is not implemented\n",
  1092. header.codec);
  1093. return AVERROR_PATCHWELCOME;
  1094. }
  1095. if ((ret = copy_output(ctx, &header)))
  1096. return ret;
  1097. }
  1098. if (ctx->rotate_code)
  1099. rotate_bufs(ctx, ctx->rotate_code);
  1100. *got_frame_ptr = 1;
  1101. *(AVFrame*)data = *ctx->output;
  1102. return pkt->size;
  1103. }
  1104. AVCodec ff_sanm_decoder = {
  1105. .name = "sanm",
  1106. .type = AVMEDIA_TYPE_VIDEO,
  1107. .id = AV_CODEC_ID_SANM,
  1108. .priv_data_size = sizeof(SANMVideoContext),
  1109. .init = decode_init,
  1110. .close = decode_end,
  1111. .decode = decode_frame,
  1112. .capabilities = CODEC_CAP_DR1,
  1113. .long_name = NULL_IF_CONFIG_SMALL("LucasArts SMUSH video"),
  1114. };