You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1284 lines
40KB

  1. /*
  2. * LucasArts Smush video decoder
  3. * Copyright (c) 2006 Cyril Zorin
  4. * Copyright (c) 2011 Konstantin Shishkov
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. // #define DEBUG 1
  23. #include "avcodec.h"
  24. #include "bytestream.h"
  25. #include "internal.h"
  26. #include "libavutil/bswap.h"
  27. #include "libavcodec/dsputil.h"
  28. #include "sanm_data.h"
  29. #define NGLYPHS 256
  30. typedef struct {
  31. AVCodecContext *avctx;
  32. GetByteContext gb;
  33. int version, subversion;
  34. uint32_t pal[256];
  35. int16_t delta_pal[768];
  36. int pitch;
  37. int width, height;
  38. int aligned_width, aligned_height;
  39. int prev_seq;
  40. AVFrame frame, *output;
  41. uint16_t *frm0, *frm1, *frm2;
  42. uint8_t *stored_frame;
  43. uint32_t frm0_size, frm1_size, frm2_size;
  44. uint32_t stored_frame_size;
  45. uint8_t *rle_buf;
  46. unsigned int rle_buf_size;
  47. int rotate_code;
  48. long npixels, buf_size;
  49. uint16_t codebook[256];
  50. uint16_t small_codebook[4];
  51. int8_t p4x4glyphs[NGLYPHS][16];
  52. int8_t p8x8glyphs[NGLYPHS][64];
  53. } SANMVideoContext;
  54. typedef struct {
  55. int seq_num, codec, rotate_code, rle_output_size;
  56. uint16_t bg_color;
  57. uint32_t width, height;
  58. } SANMFrameHeader;
  59. enum GlyphEdge {
  60. LEFT_EDGE,
  61. TOP_EDGE,
  62. RIGHT_EDGE,
  63. BOTTOM_EDGE,
  64. NO_EDGE
  65. };
  66. enum GlyphDir {
  67. DIR_LEFT,
  68. DIR_UP,
  69. DIR_RIGHT,
  70. DIR_DOWN,
  71. NO_DIR
  72. };
  73. /**
  74. * Return enum GlyphEdge of box where point (x, y) lies.
  75. *
  76. * @param x x point coordinate
  77. * @param y y point coordinate
  78. * @param edge_size box width/height.
  79. */
  80. static enum GlyphEdge which_edge(int x, int y, int edge_size)
  81. {
  82. const int edge_max = edge_size - 1;
  83. if (!y) {
  84. return BOTTOM_EDGE;
  85. } else if (y == edge_max) {
  86. return TOP_EDGE;
  87. } else if (!x) {
  88. return LEFT_EDGE;
  89. } else if (x == edge_max) {
  90. return RIGHT_EDGE;
  91. } else {
  92. return NO_EDGE;
  93. }
  94. }
  95. static enum GlyphDir which_direction(enum GlyphEdge edge0, enum GlyphEdge edge1)
  96. {
  97. if ((edge0 == LEFT_EDGE && edge1 == RIGHT_EDGE) ||
  98. (edge1 == LEFT_EDGE && edge0 == RIGHT_EDGE) ||
  99. (edge0 == BOTTOM_EDGE && edge1 != TOP_EDGE) ||
  100. (edge1 == BOTTOM_EDGE && edge0 != TOP_EDGE)) {
  101. return DIR_UP;
  102. } else if ((edge0 == TOP_EDGE && edge1 != BOTTOM_EDGE) ||
  103. (edge1 == TOP_EDGE && edge0 != BOTTOM_EDGE)) {
  104. return DIR_DOWN;
  105. } else if ((edge0 == LEFT_EDGE && edge1 != RIGHT_EDGE) ||
  106. (edge1 == LEFT_EDGE && edge0 != RIGHT_EDGE)) {
  107. return DIR_LEFT;
  108. } else if ((edge0 == TOP_EDGE && edge1 == BOTTOM_EDGE) ||
  109. (edge1 == TOP_EDGE && edge0 == BOTTOM_EDGE) ||
  110. (edge0 == RIGHT_EDGE && edge1 != LEFT_EDGE) ||
  111. (edge1 == RIGHT_EDGE && edge0 != LEFT_EDGE)) {
  112. return DIR_RIGHT;
  113. }
  114. return NO_DIR;
  115. }
  116. /**
  117. * Interpolate two points.
  118. */
  119. static void interp_point(int8_t *points, int x0, int y0, int x1, int y1,
  120. int pos, int npoints)
  121. {
  122. if (npoints) {
  123. points[0] = (x0 * pos + x1 * (npoints - pos) + (npoints >> 1)) / npoints;
  124. points[1] = (y0 * pos + y1 * (npoints - pos) + (npoints >> 1)) / npoints;
  125. } else {
  126. points[0] = x0;
  127. points[1] = y0;
  128. }
  129. }
  130. /**
  131. * Construct glyphs by iterating through vectors coordinates.
  132. *
  133. * @param pglyphs pointer to table where glyphs are stored
  134. * @param xvec pointer to x component of vectors coordinates
  135. * @param yvec pointer to y component of vectors coordinates
  136. * @param side_length glyph width/height.
  137. */
  138. static void make_glyphs(int8_t *pglyphs, const int8_t *xvec, const int8_t *yvec,
  139. const int side_length)
  140. {
  141. const int glyph_size = side_length * side_length;
  142. int8_t *pglyph = pglyphs;
  143. int i, j;
  144. for (i = 0; i < GLYPH_COORD_VECT_SIZE; i++) {
  145. int x0 = xvec[i];
  146. int y0 = yvec[i];
  147. enum GlyphEdge edge0 = which_edge(x0, y0, side_length);
  148. for (j = 0; j < GLYPH_COORD_VECT_SIZE; j++, pglyph += glyph_size) {
  149. int x1 = xvec[j];
  150. int y1 = yvec[j];
  151. enum GlyphEdge edge1 = which_edge(x1, y1, side_length);
  152. enum GlyphDir dir = which_direction(edge0, edge1);
  153. int npoints = FFMAX(FFABS(x1 - x0), FFABS(y1 - y0));
  154. int ipoint;
  155. for (ipoint = 0; ipoint <= npoints; ipoint++) {
  156. int8_t point[2];
  157. int irow, icol;
  158. interp_point(point, x0, y0, x1, y1, ipoint, npoints);
  159. switch (dir) {
  160. case DIR_UP:
  161. for (irow = point[1]; irow >= 0; irow--)
  162. pglyph[point[0] + irow * side_length] = 1;
  163. break;
  164. case DIR_DOWN:
  165. for (irow = point[1]; irow < side_length; irow++)
  166. pglyph[point[0] + irow * side_length] = 1;
  167. break;
  168. case DIR_LEFT:
  169. for (icol = point[0]; icol >= 0; icol--)
  170. pglyph[icol + point[1] * side_length] = 1;
  171. break;
  172. case DIR_RIGHT:
  173. for (icol = point[0]; icol < side_length; icol++)
  174. pglyph[icol + point[1] * side_length] = 1;
  175. break;
  176. }
  177. }
  178. }
  179. }
  180. }
  181. static void init_sizes(SANMVideoContext *ctx, int width, int height)
  182. {
  183. ctx->width = width;
  184. ctx->height = height;
  185. ctx->npixels = width * height;
  186. ctx->aligned_width = FFALIGN(width, 8);
  187. ctx->aligned_height = FFALIGN(height, 8);
  188. ctx->buf_size = ctx->aligned_width * ctx->aligned_height * sizeof(ctx->frm0[0]);
  189. ctx->pitch = width;
  190. }
  191. static void destroy_buffers(SANMVideoContext *ctx)
  192. {
  193. av_freep(&ctx->frm0);
  194. av_freep(&ctx->frm1);
  195. av_freep(&ctx->frm2);
  196. av_freep(&ctx->stored_frame);
  197. av_freep(&ctx->rle_buf);
  198. }
  199. static av_cold int init_buffers(SANMVideoContext *ctx)
  200. {
  201. av_fast_padded_malloc(&ctx->frm0, &ctx->frm0_size, ctx->buf_size);
  202. av_fast_padded_malloc(&ctx->frm1, &ctx->frm1_size, ctx->buf_size);
  203. av_fast_padded_malloc(&ctx->frm2, &ctx->frm2_size, ctx->buf_size);
  204. if (!ctx->version)
  205. av_fast_padded_malloc(&ctx->stored_frame, &ctx->stored_frame_size, ctx->buf_size);
  206. if (!ctx->frm0 || !ctx->frm1 || !ctx->frm2 || (!ctx->stored_frame && !ctx->version)) {
  207. destroy_buffers(ctx);
  208. return AVERROR(ENOMEM);
  209. }
  210. return 0;
  211. }
  212. static void rotate_bufs(SANMVideoContext *ctx, int rotate_code)
  213. {
  214. av_dlog(ctx->avctx, "rotate %d\n", rotate_code);
  215. if (rotate_code == 2)
  216. FFSWAP(uint16_t*, ctx->frm1, ctx->frm2);
  217. FFSWAP(uint16_t*, ctx->frm2, ctx->frm0);
  218. }
  219. static av_cold int decode_init(AVCodecContext *avctx)
  220. {
  221. SANMVideoContext *ctx = avctx->priv_data;
  222. ctx->avctx = avctx;
  223. ctx->version = !avctx->extradata_size;
  224. avctx->pix_fmt = ctx->version ? AV_PIX_FMT_RGB565 : AV_PIX_FMT_PAL8;
  225. init_sizes(ctx, avctx->width, avctx->height);
  226. if (init_buffers(ctx)) {
  227. av_log(avctx, AV_LOG_ERROR, "error allocating buffers\n");
  228. return AVERROR(ENOMEM);
  229. }
  230. ctx->output = &ctx->frame;
  231. ctx->output->data[0] = 0;
  232. make_glyphs(ctx->p4x4glyphs[0], glyph4_x, glyph4_y, 4);
  233. make_glyphs(ctx->p8x8glyphs[0], glyph8_x, glyph8_y, 8);
  234. if (!ctx->version) {
  235. int i;
  236. if (avctx->extradata_size < 1026) {
  237. av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
  238. return AVERROR_INVALIDDATA;
  239. }
  240. ctx->subversion = AV_RL16(avctx->extradata);
  241. for (i = 0; i < 256; i++)
  242. ctx->pal[i] = 0xFFU << 24 | AV_RL32(avctx->extradata + 2 + i * 4);
  243. }
  244. return 0;
  245. }
  246. static av_cold int decode_end(AVCodecContext *avctx)
  247. {
  248. SANMVideoContext *ctx = avctx->priv_data;
  249. destroy_buffers(ctx);
  250. if (ctx->frame.data[0]) {
  251. avctx->release_buffer(avctx, &ctx->frame);
  252. ctx->frame.data[0] = 0;
  253. }
  254. return 0;
  255. }
  256. static int rle_decode(SANMVideoContext *ctx, uint8_t *dst, const int out_size)
  257. {
  258. int opcode, color, run_len, left = out_size;
  259. while (left > 0) {
  260. opcode = bytestream2_get_byte(&ctx->gb);
  261. run_len = (opcode >> 1) + 1;
  262. if (run_len > left || bytestream2_get_bytes_left(&ctx->gb) <= 0)
  263. return AVERROR_INVALIDDATA;
  264. if (opcode & 1) {
  265. color = bytestream2_get_byte(&ctx->gb);
  266. memset(dst, color, run_len);
  267. } else {
  268. if (bytestream2_get_bytes_left(&ctx->gb) < run_len)
  269. return AVERROR_INVALIDDATA;
  270. bytestream2_get_bufferu(&ctx->gb, dst, run_len);
  271. }
  272. dst += run_len;
  273. left -= run_len;
  274. }
  275. return 0;
  276. }
  277. static int old_codec1(SANMVideoContext *ctx, int top,
  278. int left, int width, int height)
  279. {
  280. uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * ctx->pitch;
  281. int i, j, len, flag, code, val, pos, end;
  282. for (i = 0; i < height; i++) {
  283. pos = 0;
  284. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  285. return AVERROR_INVALIDDATA;
  286. len = bytestream2_get_le16u(&ctx->gb);
  287. end = bytestream2_tell(&ctx->gb) + len;
  288. while (bytestream2_tell(&ctx->gb) < end) {
  289. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  290. return AVERROR_INVALIDDATA;
  291. code = bytestream2_get_byteu(&ctx->gb);
  292. flag = code & 1;
  293. code = (code >> 1) + 1;
  294. if (pos + code > width)
  295. return AVERROR_INVALIDDATA;
  296. if (flag) {
  297. val = bytestream2_get_byteu(&ctx->gb);
  298. if (val)
  299. memset(dst + pos, val, code);
  300. pos += code;
  301. } else {
  302. if (bytestream2_get_bytes_left(&ctx->gb) < code)
  303. return AVERROR_INVALIDDATA;
  304. for (j = 0; j < code; j++) {
  305. val = bytestream2_get_byteu(&ctx->gb);
  306. if (val)
  307. dst[pos] = val;
  308. pos++;
  309. }
  310. }
  311. }
  312. dst += ctx->pitch;
  313. }
  314. ctx->rotate_code = 0;
  315. return 0;
  316. }
  317. static inline void codec37_mv(uint8_t *dst, const uint8_t *src,
  318. int height, int stride, int x, int y)
  319. {
  320. int pos, i, j;
  321. pos = x + y * stride;
  322. for (j = 0; j < 4; j++) {
  323. for (i = 0; i < 4; i++) {
  324. if ((pos + i) < 0 || (pos + i) >= height * stride)
  325. dst[i] = 0;
  326. else
  327. dst[i] = src[i];
  328. }
  329. dst += stride;
  330. src += stride;
  331. pos += stride;
  332. }
  333. }
  334. static int old_codec37(SANMVideoContext *ctx, int top,
  335. int left, int width, int height)
  336. {
  337. int stride = ctx->pitch;
  338. int i, j, k, t;
  339. int skip_run = 0;
  340. int compr, mvoff, seq, flags;
  341. uint32_t decoded_size;
  342. uint8_t *dst, *prev;
  343. compr = bytestream2_get_byte(&ctx->gb);
  344. mvoff = bytestream2_get_byte(&ctx->gb);
  345. seq = bytestream2_get_le16(&ctx->gb);
  346. decoded_size = bytestream2_get_le32(&ctx->gb);
  347. bytestream2_skip(&ctx->gb, 4);
  348. flags = bytestream2_get_byte(&ctx->gb);
  349. bytestream2_skip(&ctx->gb, 3);
  350. ctx->rotate_code = 0;
  351. if (((seq & 1) || !(flags & 1)) && (compr && compr != 2))
  352. rotate_bufs(ctx, 1);
  353. dst = ((uint8_t*)ctx->frm0) + left + top * stride;
  354. prev = ((uint8_t*)ctx->frm2) + left + top * stride;
  355. if (mvoff > 2) {
  356. av_log(ctx->avctx, AV_LOG_ERROR, "invalid motion base value %d\n", mvoff);
  357. return AVERROR_INVALIDDATA;
  358. }
  359. av_dlog(ctx->avctx, "compression %d\n", compr);
  360. switch (compr) {
  361. case 0:
  362. for (i = 0; i < height; i++) {
  363. bytestream2_get_buffer(&ctx->gb, dst, width);
  364. dst += stride;
  365. }
  366. memset(ctx->frm1, 0, ctx->height * stride);
  367. memset(ctx->frm2, 0, ctx->height * stride);
  368. break;
  369. case 2:
  370. if (rle_decode(ctx, dst, decoded_size))
  371. return AVERROR_INVALIDDATA;
  372. memset(ctx->frm1, 0, ctx->frm1_size);
  373. memset(ctx->frm2, 0, ctx->frm2_size);
  374. break;
  375. case 3:
  376. case 4:
  377. if (flags & 4) {
  378. for (j = 0; j < height; j += 4) {
  379. for (i = 0; i < width; i += 4) {
  380. int code;
  381. if (skip_run) {
  382. skip_run--;
  383. copy_block4(dst + i, prev + i, stride, stride, 4);
  384. continue;
  385. }
  386. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  387. return AVERROR_INVALIDDATA;
  388. code = bytestream2_get_byteu(&ctx->gb);
  389. switch (code) {
  390. case 0xFF:
  391. if (bytestream2_get_bytes_left(&ctx->gb) < 16)
  392. return AVERROR_INVALIDDATA;
  393. for (k = 0; k < 4; k++)
  394. bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
  395. break;
  396. case 0xFE:
  397. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  398. return AVERROR_INVALIDDATA;
  399. for (k = 0; k < 4; k++)
  400. memset(dst + i + k * stride, bytestream2_get_byteu(&ctx->gb), 4);
  401. break;
  402. case 0xFD:
  403. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  404. return AVERROR_INVALIDDATA;
  405. t = bytestream2_get_byteu(&ctx->gb);
  406. for (k = 0; k < 4; k++)
  407. memset(dst + i + k * stride, t, 4);
  408. break;
  409. default:
  410. if (compr == 4 && !code) {
  411. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  412. return AVERROR_INVALIDDATA;
  413. skip_run = bytestream2_get_byteu(&ctx->gb) + 1;
  414. i -= 4;
  415. } else {
  416. int mx, my;
  417. mx = c37_mv[(mvoff * 255 + code) * 2 ];
  418. my = c37_mv[(mvoff * 255 + code) * 2 + 1];
  419. codec37_mv(dst + i, prev + i + mx + my * stride,
  420. ctx->height, stride, i + mx, j + my);
  421. }
  422. }
  423. }
  424. dst += stride * 4;
  425. prev += stride * 4;
  426. }
  427. } else {
  428. for (j = 0; j < height; j += 4) {
  429. for (i = 0; i < width; i += 4) {
  430. int code;
  431. if (skip_run) {
  432. skip_run--;
  433. copy_block4(dst + i, prev + i, stride, stride, 4);
  434. continue;
  435. }
  436. code = bytestream2_get_byte(&ctx->gb);
  437. if (code == 0xFF) {
  438. if (bytestream2_get_bytes_left(&ctx->gb) < 16)
  439. return AVERROR_INVALIDDATA;
  440. for (k = 0; k < 4; k++)
  441. bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
  442. } else if (compr == 4 && !code) {
  443. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  444. return AVERROR_INVALIDDATA;
  445. skip_run = bytestream2_get_byteu(&ctx->gb) + 1;
  446. i -= 4;
  447. } else {
  448. int mx, my;
  449. mx = c37_mv[(mvoff * 255 + code) * 2];
  450. my = c37_mv[(mvoff * 255 + code) * 2 + 1];
  451. codec37_mv(dst + i, prev + i + mx + my * stride,
  452. ctx->height, stride, i + mx, j + my);
  453. }
  454. }
  455. dst += stride * 4;
  456. prev += stride * 4;
  457. }
  458. }
  459. break;
  460. default:
  461. av_log(ctx->avctx, AV_LOG_ERROR,
  462. "subcodec 37 compression %d not implemented\n", compr);
  463. return AVERROR_PATCHWELCOME;
  464. }
  465. return 0;
  466. }
  467. static int process_block(SANMVideoContext *ctx, uint8_t *dst, uint8_t *prev1,
  468. uint8_t *prev2, int stride, int tbl, int size)
  469. {
  470. int code, k, t;
  471. uint8_t colors[2];
  472. int8_t *pglyph;
  473. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  474. return AVERROR_INVALIDDATA;
  475. code = bytestream2_get_byteu(&ctx->gb);
  476. if (code >= 0xF8) {
  477. switch (code) {
  478. case 0xFF:
  479. if (size == 2) {
  480. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  481. return AVERROR_INVALIDDATA;
  482. dst[0] = bytestream2_get_byteu(&ctx->gb);
  483. dst[1] = bytestream2_get_byteu(&ctx->gb);
  484. dst[0+stride] = bytestream2_get_byteu(&ctx->gb);
  485. dst[1+stride] = bytestream2_get_byteu(&ctx->gb);
  486. } else {
  487. size >>= 1;
  488. if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
  489. return AVERROR_INVALIDDATA;
  490. if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
  491. stride, tbl, size))
  492. return AVERROR_INVALIDDATA;
  493. dst += size * stride;
  494. prev1 += size * stride;
  495. prev2 += size * stride;
  496. if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
  497. return AVERROR_INVALIDDATA;
  498. if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
  499. stride, tbl, size))
  500. return AVERROR_INVALIDDATA;
  501. }
  502. break;
  503. case 0xFE:
  504. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  505. return AVERROR_INVALIDDATA;
  506. t = bytestream2_get_byteu(&ctx->gb);
  507. for (k = 0; k < size; k++)
  508. memset(dst + k * stride, t, size);
  509. break;
  510. case 0xFD:
  511. if (bytestream2_get_bytes_left(&ctx->gb) < 3)
  512. return AVERROR_INVALIDDATA;
  513. code = bytestream2_get_byteu(&ctx->gb);
  514. pglyph = (size == 8) ? ctx->p8x8glyphs[code] : ctx->p4x4glyphs[code];
  515. bytestream2_get_bufferu(&ctx->gb, colors, 2);
  516. for (k = 0; k < size; k++)
  517. for (t = 0; t < size; t++)
  518. dst[t + k * stride] = colors[!*pglyph++];
  519. break;
  520. case 0xFC:
  521. for (k = 0; k < size; k++)
  522. memcpy(dst + k * stride, prev1 + k * stride, size);
  523. break;
  524. default:
  525. k = bytestream2_tell(&ctx->gb);
  526. bytestream2_seek(&ctx->gb, tbl + (code & 7), SEEK_SET);
  527. t = bytestream2_get_byte(&ctx->gb);
  528. bytestream2_seek(&ctx->gb, k, SEEK_SET);
  529. for (k = 0; k < size; k++)
  530. memset(dst + k * stride, t, size);
  531. }
  532. } else {
  533. int mx = motion_vectors[code][0];
  534. int my = motion_vectors[code][1];
  535. for (k = 0; k < size; k++)
  536. memcpy(dst + k * stride, prev2 + mx + (my + k) * stride, size);
  537. }
  538. return 0;
  539. }
  540. static int old_codec47(SANMVideoContext *ctx, int top,
  541. int left, int width, int height)
  542. {
  543. int i, j, seq, compr, new_rot, tbl_pos, skip;
  544. int stride = ctx->pitch;
  545. uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * stride;
  546. uint8_t *prev1 = (uint8_t*)ctx->frm1;
  547. uint8_t *prev2 = (uint8_t*)ctx->frm2;
  548. uint32_t decoded_size;
  549. tbl_pos = bytestream2_tell(&ctx->gb);
  550. seq = bytestream2_get_le16(&ctx->gb);
  551. compr = bytestream2_get_byte(&ctx->gb);
  552. new_rot = bytestream2_get_byte(&ctx->gb);
  553. skip = bytestream2_get_byte(&ctx->gb);
  554. bytestream2_skip(&ctx->gb, 9);
  555. decoded_size = bytestream2_get_le32(&ctx->gb);
  556. bytestream2_skip(&ctx->gb, 8);
  557. if (skip & 1)
  558. bytestream2_skip(&ctx->gb, 0x8080);
  559. if (!seq) {
  560. ctx->prev_seq = -1;
  561. memset(prev1, 0, ctx->height * stride);
  562. memset(prev2, 0, ctx->height * stride);
  563. }
  564. av_dlog(ctx->avctx, "compression %d\n", compr);
  565. switch (compr) {
  566. case 0:
  567. if (bytestream2_get_bytes_left(&ctx->gb) < width * height)
  568. return AVERROR_INVALIDDATA;
  569. for (j = 0; j < height; j++) {
  570. for (i = 0; i < width; i++)
  571. bytestream2_get_bufferu(&ctx->gb, dst, width);
  572. dst += stride;
  573. }
  574. break;
  575. case 1:
  576. if (bytestream2_get_bytes_left(&ctx->gb) < ((width + 1) >> 1) * ((height + 1) >> 1))
  577. return AVERROR_INVALIDDATA;
  578. for (j = 0; j < height; j += 2) {
  579. for (i = 0; i < width; i += 2) {
  580. dst[i] = dst[i + 1] =
  581. dst[stride + i] = dst[stride + i + 1] = bytestream2_get_byteu(&ctx->gb);
  582. }
  583. dst += stride * 2;
  584. }
  585. break;
  586. case 2:
  587. if (seq == ctx->prev_seq + 1) {
  588. for (j = 0; j < height; j += 8) {
  589. for (i = 0; i < width; i += 8) {
  590. if (process_block(ctx, dst + i, prev1 + i, prev2 + i, stride,
  591. tbl_pos + 8, 8))
  592. return AVERROR_INVALIDDATA;
  593. }
  594. dst += stride * 8;
  595. prev1 += stride * 8;
  596. prev2 += stride * 8;
  597. }
  598. }
  599. break;
  600. case 3:
  601. memcpy(ctx->frm0, ctx->frm2, ctx->pitch * ctx->height);
  602. break;
  603. case 4:
  604. memcpy(ctx->frm0, ctx->frm1, ctx->pitch * ctx->height);
  605. break;
  606. case 5:
  607. if (rle_decode(ctx, dst, decoded_size))
  608. return AVERROR_INVALIDDATA;
  609. break;
  610. default:
  611. av_log(ctx->avctx, AV_LOG_ERROR,
  612. "subcodec 47 compression %d not implemented\n", compr);
  613. return AVERROR_PATCHWELCOME;
  614. }
  615. if (seq == ctx->prev_seq + 1)
  616. ctx->rotate_code = new_rot;
  617. else
  618. ctx->rotate_code = 0;
  619. ctx->prev_seq = seq;
  620. return 0;
  621. }
  622. static int process_frame_obj(SANMVideoContext *ctx)
  623. {
  624. uint16_t codec, top, left, w, h;
  625. codec = bytestream2_get_le16u(&ctx->gb);
  626. left = bytestream2_get_le16u(&ctx->gb);
  627. top = bytestream2_get_le16u(&ctx->gb);
  628. w = bytestream2_get_le16u(&ctx->gb);
  629. h = bytestream2_get_le16u(&ctx->gb);
  630. if (ctx->width < left + w || ctx->height < top + h) {
  631. ctx->avctx->width = FFMAX(left + w, ctx->width);
  632. ctx->avctx->height = FFMAX(top + h, ctx->height);
  633. init_sizes(ctx, left + w, top + h);
  634. if (init_buffers(ctx)) {
  635. av_log(ctx->avctx, AV_LOG_ERROR, "error resizing buffers\n");
  636. return AVERROR(ENOMEM);
  637. }
  638. }
  639. bytestream2_skip(&ctx->gb, 4);
  640. av_dlog(ctx->avctx, "subcodec %d\n", codec);
  641. switch (codec) {
  642. case 1:
  643. case 3:
  644. return old_codec1(ctx, top, left, w, h);
  645. break;
  646. case 37:
  647. return old_codec37(ctx, top, left, w, h);
  648. break;
  649. case 47:
  650. return old_codec47(ctx, top, left, w, h);
  651. break;
  652. default:
  653. av_log_ask_for_sample(ctx->avctx, "unknown subcodec %d\n", codec);
  654. return AVERROR_PATCHWELCOME;
  655. }
  656. }
  657. static int decode_0(SANMVideoContext *ctx)
  658. {
  659. uint16_t *frm = ctx->frm0;
  660. int x, y;
  661. if (bytestream2_get_bytes_left(&ctx->gb) < ctx->width * ctx->height * 2) {
  662. av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for raw frame\n");
  663. return AVERROR_INVALIDDATA;
  664. }
  665. for (y = 0; y < ctx->height; y++) {
  666. for (x = 0; x < ctx->width; x++)
  667. frm[x] = bytestream2_get_le16u(&ctx->gb);
  668. frm += ctx->pitch;
  669. }
  670. return 0;
  671. }
  672. static int decode_nop(SANMVideoContext *ctx)
  673. {
  674. av_log_ask_for_sample(ctx->avctx, "unknown/unsupported compression type\n");
  675. return AVERROR_PATCHWELCOME;
  676. }
  677. static void copy_block(uint16_t *pdest, uint16_t *psrc, int block_size, int pitch)
  678. {
  679. uint8_t *dst = (uint8_t *)pdest;
  680. uint8_t *src = (uint8_t *)psrc;
  681. int stride = pitch * 2;
  682. switch (block_size) {
  683. case 2:
  684. copy_block4(dst, src, stride, stride, 2);
  685. break;
  686. case 4:
  687. copy_block8(dst, src, stride, stride, 4);
  688. break;
  689. case 8:
  690. copy_block16(dst, src, stride, stride, 8);
  691. break;
  692. }
  693. }
  694. static void fill_block(uint16_t *pdest, uint16_t color, int block_size, int pitch)
  695. {
  696. int x, y;
  697. pitch -= block_size;
  698. for (y = 0; y < block_size; y++, pdest += pitch)
  699. for (x = 0; x < block_size; x++)
  700. *pdest++ = color;
  701. }
  702. static int draw_glyph(SANMVideoContext *ctx, uint16_t *dst, int index, uint16_t fg_color,
  703. uint16_t bg_color, int block_size, int pitch)
  704. {
  705. int8_t *pglyph;
  706. uint16_t colors[2] = { fg_color, bg_color };
  707. int x, y;
  708. if (index >= NGLYPHS) {
  709. av_log(ctx->avctx, AV_LOG_ERROR, "ignoring nonexistent glyph #%u\n", index);
  710. return AVERROR_INVALIDDATA;
  711. }
  712. pglyph = block_size == 8 ? ctx->p8x8glyphs[index] : ctx->p4x4glyphs[index];
  713. pitch -= block_size;
  714. for (y = 0; y < block_size; y++, dst += pitch)
  715. for (x = 0; x < block_size; x++)
  716. *dst++ = colors[*pglyph++];
  717. return 0;
  718. }
  719. static int opcode_0xf7(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
  720. {
  721. uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
  722. if (block_size == 2) {
  723. uint32_t indices;
  724. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  725. return AVERROR_INVALIDDATA;
  726. indices = bytestream2_get_le32u(&ctx->gb);
  727. dst[0] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  728. dst[1] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  729. dst[pitch] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  730. dst[pitch + 1] = ctx->codebook[indices & 0xFF];
  731. } else {
  732. uint16_t fgcolor, bgcolor;
  733. int glyph;
  734. if (bytestream2_get_bytes_left(&ctx->gb) < 3)
  735. return AVERROR_INVALIDDATA;
  736. glyph = bytestream2_get_byteu(&ctx->gb);
  737. bgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  738. fgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  739. draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
  740. }
  741. return 0;
  742. }
  743. static int opcode_0xf8(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
  744. {
  745. uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
  746. if (block_size == 2) {
  747. if (bytestream2_get_bytes_left(&ctx->gb) < 8)
  748. return AVERROR_INVALIDDATA;
  749. dst[0] = bytestream2_get_le16u(&ctx->gb);
  750. dst[1] = bytestream2_get_le16u(&ctx->gb);
  751. dst[pitch] = bytestream2_get_le16u(&ctx->gb);
  752. dst[pitch + 1] = bytestream2_get_le16u(&ctx->gb);
  753. } else {
  754. uint16_t fgcolor, bgcolor;
  755. int glyph;
  756. if (bytestream2_get_bytes_left(&ctx->gb) < 5)
  757. return AVERROR_INVALIDDATA;
  758. glyph = bytestream2_get_byteu(&ctx->gb);
  759. bgcolor = bytestream2_get_le16u(&ctx->gb);
  760. fgcolor = bytestream2_get_le16u(&ctx->gb);
  761. draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
  762. }
  763. return 0;
  764. }
  765. static int good_mvec(SANMVideoContext *ctx, int cx, int cy, int mx, int my,
  766. int block_size)
  767. {
  768. int start_pos = cx + mx + (cy + my) * ctx->pitch;
  769. int end_pos = start_pos + (block_size - 1) * (ctx->pitch + 1);
  770. int good = start_pos >= 0 && end_pos < (ctx->buf_size >> 1);
  771. if (!good) {
  772. av_log(ctx->avctx, AV_LOG_ERROR, "ignoring invalid motion vector (%i, %i)->(%u, %u), block size = %u\n",
  773. cx + mx, cy + my, cx, cy, block_size);
  774. }
  775. return good;
  776. }
  777. static int codec2subblock(SANMVideoContext *ctx, int cx, int cy, int blk_size)
  778. {
  779. int16_t mx, my, index;
  780. int opcode;
  781. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  782. return AVERROR_INVALIDDATA;
  783. opcode = bytestream2_get_byteu(&ctx->gb);
  784. av_dlog(ctx->avctx, "opcode 0x%0X cx %d cy %d blk %d\n", opcode, cx, cy, blk_size);
  785. switch (opcode) {
  786. default:
  787. mx = motion_vectors[opcode][0];
  788. my = motion_vectors[opcode][1];
  789. if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
  790. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  791. ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
  792. blk_size, ctx->pitch);
  793. }
  794. break;
  795. case 0xF5:
  796. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  797. return AVERROR_INVALIDDATA;
  798. index = bytestream2_get_le16u(&ctx->gb);
  799. mx = index % ctx->width;
  800. my = index / ctx->width;
  801. if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
  802. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  803. ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
  804. blk_size, ctx->pitch);
  805. }
  806. break;
  807. case 0xF6:
  808. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  809. ctx->frm1 + cx + ctx->pitch * cy,
  810. blk_size, ctx->pitch);
  811. break;
  812. case 0xF7:
  813. opcode_0xf7(ctx, cx, cy, blk_size, ctx->pitch);
  814. break;
  815. case 0xF8:
  816. opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
  817. break;
  818. case 0xF9:
  819. case 0xFA:
  820. case 0xFB:
  821. case 0xFC:
  822. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  823. ctx->small_codebook[opcode - 0xf9], blk_size, ctx->pitch);
  824. break;
  825. case 0xFD:
  826. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  827. return AVERROR_INVALIDDATA;
  828. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  829. ctx->codebook[bytestream2_get_byteu(&ctx->gb)], blk_size, ctx->pitch);
  830. break;
  831. case 0xFE:
  832. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  833. return AVERROR_INVALIDDATA;
  834. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  835. bytestream2_get_le16u(&ctx->gb), blk_size, ctx->pitch);
  836. break;
  837. case 0xFF:
  838. if (blk_size == 2) {
  839. opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
  840. } else {
  841. blk_size >>= 1;
  842. if (codec2subblock(ctx, cx , cy , blk_size))
  843. return AVERROR_INVALIDDATA;
  844. if (codec2subblock(ctx, cx + blk_size, cy , blk_size))
  845. return AVERROR_INVALIDDATA;
  846. if (codec2subblock(ctx, cx , cy + blk_size, blk_size))
  847. return AVERROR_INVALIDDATA;
  848. if (codec2subblock(ctx, cx + blk_size, cy + blk_size, blk_size))
  849. return AVERROR_INVALIDDATA;
  850. }
  851. break;
  852. }
  853. return 0;
  854. }
  855. static int decode_2(SANMVideoContext *ctx)
  856. {
  857. int cx, cy, ret;
  858. for (cy = 0; cy < ctx->aligned_height; cy += 8) {
  859. for (cx = 0; cx < ctx->aligned_width; cx += 8) {
  860. if (ret = codec2subblock(ctx, cx, cy, 8))
  861. return ret;
  862. }
  863. }
  864. return 0;
  865. }
  866. static int decode_3(SANMVideoContext *ctx)
  867. {
  868. memcpy(ctx->frm0, ctx->frm2, ctx->frm2_size);
  869. return 0;
  870. }
  871. static int decode_4(SANMVideoContext *ctx)
  872. {
  873. memcpy(ctx->frm0, ctx->frm1, ctx->frm1_size);
  874. return 0;
  875. }
  876. static int decode_5(SANMVideoContext *ctx)
  877. {
  878. #if HAVE_BIGENDIAN
  879. uint16_t *frm;
  880. int npixels;
  881. #endif
  882. uint8_t *dst = (uint8_t*)ctx->frm0;
  883. if (rle_decode(ctx, dst, ctx->buf_size))
  884. return AVERROR_INVALIDDATA;
  885. #if HAVE_BIGENDIAN
  886. npixels = ctx->npixels;
  887. frm = ctx->frm0;
  888. while (npixels--)
  889. *frm++ = av_bswap16(*frm);
  890. #endif
  891. return 0;
  892. }
  893. static int decode_6(SANMVideoContext *ctx)
  894. {
  895. int npixels = ctx->npixels;
  896. uint16_t *frm = ctx->frm0;
  897. if (bytestream2_get_bytes_left(&ctx->gb) < npixels) {
  898. av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for frame\n");
  899. return AVERROR_INVALIDDATA;
  900. }
  901. while (npixels--)
  902. *frm++ = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  903. return 0;
  904. }
  905. static int decode_8(SANMVideoContext *ctx)
  906. {
  907. uint16_t *pdest = ctx->frm0;
  908. uint8_t *rsrc;
  909. long npixels = ctx->npixels;
  910. av_fast_malloc(&ctx->rle_buf, &ctx->rle_buf_size, npixels);
  911. if (!ctx->rle_buf) {
  912. av_log(ctx->avctx, AV_LOG_ERROR, "RLE buffer allocation failed\n");
  913. return AVERROR(ENOMEM);
  914. }
  915. rsrc = ctx->rle_buf;
  916. if (rle_decode(ctx, rsrc, npixels))
  917. return AVERROR_INVALIDDATA;
  918. while (npixels--)
  919. *pdest++ = ctx->codebook[*rsrc++];
  920. return 0;
  921. }
  922. typedef int (*frm_decoder)(SANMVideoContext *ctx);
  923. static const frm_decoder v1_decoders[] = {
  924. decode_0, decode_nop, decode_2, decode_3, decode_4, decode_5,
  925. decode_6, decode_nop, decode_8
  926. };
  927. static int read_frame_header(SANMVideoContext *ctx, SANMFrameHeader *hdr)
  928. {
  929. int i, ret;
  930. if ((ret = bytestream2_get_bytes_left(&ctx->gb)) < 560) {
  931. av_log(ctx->avctx, AV_LOG_ERROR, "too short input frame (%d bytes)\n",
  932. ret);
  933. return AVERROR_INVALIDDATA;
  934. }
  935. bytestream2_skip(&ctx->gb, 8); // skip pad
  936. hdr->width = bytestream2_get_le32u(&ctx->gb);
  937. hdr->height = bytestream2_get_le32u(&ctx->gb);
  938. if (hdr->width != ctx->width || hdr->height != ctx->height) {
  939. av_log(ctx->avctx, AV_LOG_ERROR, "variable size frames are not implemented\n");
  940. return AVERROR_PATCHWELCOME;
  941. }
  942. hdr->seq_num = bytestream2_get_le16u(&ctx->gb);
  943. hdr->codec = bytestream2_get_byteu(&ctx->gb);
  944. hdr->rotate_code = bytestream2_get_byteu(&ctx->gb);
  945. bytestream2_skip(&ctx->gb, 4); // skip pad
  946. for (i = 0; i < 4; i++)
  947. ctx->small_codebook[i] = bytestream2_get_le16u(&ctx->gb);
  948. hdr->bg_color = bytestream2_get_le16u(&ctx->gb);
  949. bytestream2_skip(&ctx->gb, 2); // skip pad
  950. hdr->rle_output_size = bytestream2_get_le32u(&ctx->gb);
  951. for (i = 0; i < 256; i++)
  952. ctx->codebook[i] = bytestream2_get_le16u(&ctx->gb);
  953. bytestream2_skip(&ctx->gb, 8); // skip pad
  954. av_dlog(ctx->avctx, "subcodec %d\n", hdr->codec);
  955. return 0;
  956. }
  957. static void fill_frame(uint16_t *pbuf, int buf_size, uint16_t color)
  958. {
  959. while (buf_size--)
  960. *pbuf++ = color;
  961. }
  962. static int copy_output(SANMVideoContext *ctx, SANMFrameHeader *hdr)
  963. {
  964. uint8_t *dst;
  965. const uint8_t *src = (uint8_t*) ctx->frm0;
  966. int ret, dstpitch, height = ctx->height;
  967. int srcpitch = ctx->pitch * (hdr ? sizeof(ctx->frm0[0]) : 1);
  968. if ((ret = ff_get_buffer(ctx->avctx, ctx->output)) < 0) {
  969. av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  970. return ret;
  971. }
  972. dst = ctx->output->data[0];
  973. dstpitch = ctx->output->linesize[0];
  974. while (height--) {
  975. memcpy(dst, src, srcpitch);
  976. src += srcpitch;
  977. dst += dstpitch;
  978. }
  979. return 0;
  980. }
  981. static int decode_frame(AVCodecContext *avctx, void *data,
  982. int *got_frame_ptr, AVPacket *pkt)
  983. {
  984. SANMVideoContext *ctx = avctx->priv_data;
  985. int i, ret;
  986. bytestream2_init(&ctx->gb, pkt->data, pkt->size);
  987. if (ctx->output->data[0])
  988. avctx->release_buffer(avctx, ctx->output);
  989. if (!ctx->version) {
  990. int to_store = 0;
  991. while (bytestream2_get_bytes_left(&ctx->gb) >= 8) {
  992. uint32_t sig, size;
  993. int pos;
  994. sig = bytestream2_get_be32u(&ctx->gb);
  995. size = bytestream2_get_be32u(&ctx->gb);
  996. pos = bytestream2_tell(&ctx->gb);
  997. if (bytestream2_get_bytes_left(&ctx->gb) < size) {
  998. av_log(avctx, AV_LOG_ERROR, "incorrect chunk size %d\n", size);
  999. break;
  1000. }
  1001. switch (sig) {
  1002. case MKBETAG('N', 'P', 'A', 'L'):
  1003. if (size != 256 * 3) {
  1004. av_log(avctx, AV_LOG_ERROR, "incorrect palette block size %d\n",
  1005. size);
  1006. return AVERROR_INVALIDDATA;
  1007. }
  1008. for (i = 0; i < 256; i++)
  1009. ctx->pal[i] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->gb);
  1010. break;
  1011. case MKBETAG('F', 'O', 'B', 'J'):
  1012. if (size < 16)
  1013. return AVERROR_INVALIDDATA;
  1014. if (ret = process_frame_obj(ctx))
  1015. return ret;
  1016. break;
  1017. case MKBETAG('X', 'P', 'A', 'L'):
  1018. if (size == 6 || size == 4) {
  1019. uint8_t tmp[3];
  1020. int j;
  1021. for (i = 0; i < 256; i++) {
  1022. for (j = 0; j < 3; j++) {
  1023. int t = (ctx->pal[i] >> (16 - j * 8)) & 0xFF;
  1024. tmp[j] = av_clip_uint8((t * 129 + ctx->delta_pal[i * 3 + j]) >> 7);
  1025. }
  1026. ctx->pal[i] = 0xFFU << 24 | AV_RB24(tmp);
  1027. }
  1028. } else {
  1029. if (size < 768 * 2 + 4) {
  1030. av_log(avctx, AV_LOG_ERROR, "incorrect palette change block size %d\n",
  1031. size);
  1032. return AVERROR_INVALIDDATA;
  1033. }
  1034. bytestream2_skipu(&ctx->gb, 4);
  1035. for (i = 0; i < 768; i++)
  1036. ctx->delta_pal[i] = bytestream2_get_le16u(&ctx->gb);
  1037. if (size >= 768 * 5 + 4) {
  1038. for (i = 0; i < 256; i++)
  1039. ctx->pal[i] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->gb);
  1040. } else {
  1041. memset(ctx->pal, 0, sizeof(ctx->pal));
  1042. }
  1043. }
  1044. break;
  1045. case MKBETAG('S', 'T', 'O', 'R'):
  1046. to_store = 1;
  1047. break;
  1048. case MKBETAG('F', 'T', 'C', 'H'):
  1049. memcpy(ctx->frm0, ctx->stored_frame, ctx->buf_size);
  1050. break;
  1051. default:
  1052. bytestream2_skip(&ctx->gb, size);
  1053. av_log(avctx, AV_LOG_DEBUG, "unknown/unsupported chunk %x\n", sig);
  1054. break;
  1055. }
  1056. bytestream2_seek(&ctx->gb, pos + size, SEEK_SET);
  1057. if (size & 1)
  1058. bytestream2_skip(&ctx->gb, 1);
  1059. }
  1060. if (to_store)
  1061. memcpy(ctx->stored_frame, ctx->frm0, ctx->buf_size);
  1062. if ((ret = copy_output(ctx, NULL)))
  1063. return ret;
  1064. memcpy(ctx->output->data[1], ctx->pal, 1024);
  1065. } else {
  1066. SANMFrameHeader header;
  1067. if ((ret = read_frame_header(ctx, &header)))
  1068. return ret;
  1069. ctx->rotate_code = header.rotate_code;
  1070. if ((ctx->output->key_frame = !header.seq_num)) {
  1071. ctx->output->pict_type = AV_PICTURE_TYPE_I;
  1072. fill_frame(ctx->frm1, ctx->npixels, header.bg_color);
  1073. fill_frame(ctx->frm2, ctx->npixels, header.bg_color);
  1074. } else {
  1075. ctx->output->pict_type = AV_PICTURE_TYPE_P;
  1076. }
  1077. if (header.codec < FF_ARRAY_ELEMS(v1_decoders)) {
  1078. if ((ret = v1_decoders[header.codec](ctx))) {
  1079. av_log(avctx, AV_LOG_ERROR,
  1080. "subcodec %d: error decoding frame\n", header.codec);
  1081. return ret;
  1082. }
  1083. } else {
  1084. av_log_ask_for_sample(avctx, "subcodec %d is not implemented\n",
  1085. header.codec);
  1086. return AVERROR_PATCHWELCOME;
  1087. }
  1088. if ((ret = copy_output(ctx, &header)))
  1089. return ret;
  1090. }
  1091. if (ctx->rotate_code)
  1092. rotate_bufs(ctx, ctx->rotate_code);
  1093. *got_frame_ptr = 1;
  1094. *(AVFrame*)data = *ctx->output;
  1095. return pkt->size;
  1096. }
  1097. AVCodec ff_sanm_decoder = {
  1098. .name = "sanm",
  1099. .type = AVMEDIA_TYPE_VIDEO,
  1100. .id = AV_CODEC_ID_SANM,
  1101. .priv_data_size = sizeof(SANMVideoContext),
  1102. .init = decode_init,
  1103. .close = decode_end,
  1104. .decode = decode_frame,
  1105. .capabilities = CODEC_CAP_DR1,
  1106. .long_name = NULL_IF_CONFIG_SMALL("LucasArts SMUSH video"),
  1107. };