You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1283 lines
40KB

  1. /*
  2. * LucasArts Smush video decoder
  3. * Copyright (c) 2006 Cyril Zorin
  4. * Copyright (c) 2011 Konstantin Shishkov
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. // #define DEBUG 1
  23. #include "avcodec.h"
  24. #include "bytestream.h"
  25. #include "libavutil/bswap.h"
  26. #include "libavcodec/dsputil.h"
  27. #include "sanm_data.h"
  28. #define NGLYPHS 256
  29. typedef struct {
  30. AVCodecContext *avctx;
  31. GetByteContext gb;
  32. int version, subversion;
  33. uint32_t pal[256];
  34. int16_t delta_pal[768];
  35. int pitch;
  36. int width, height;
  37. int aligned_width, aligned_height;
  38. int prev_seq;
  39. AVFrame frame, *output;
  40. uint16_t *frm0, *frm1, *frm2;
  41. uint8_t *stored_frame;
  42. uint32_t frm0_size, frm1_size, frm2_size;
  43. uint32_t stored_frame_size;
  44. uint8_t *rle_buf;
  45. unsigned int rle_buf_size;
  46. int rotate_code;
  47. long npixels, buf_size;
  48. uint16_t codebook[256];
  49. uint16_t small_codebook[4];
  50. int8_t p4x4glyphs[NGLYPHS][16];
  51. int8_t p8x8glyphs[NGLYPHS][64];
  52. } SANMVideoContext;
  53. typedef struct {
  54. int seq_num, codec, rotate_code, rle_output_size;
  55. uint16_t bg_color;
  56. uint32_t width, height;
  57. } SANMFrameHeader;
  58. enum GlyphEdge {
  59. LEFT_EDGE,
  60. TOP_EDGE,
  61. RIGHT_EDGE,
  62. BOTTOM_EDGE,
  63. NO_EDGE
  64. };
  65. enum GlyphDir {
  66. DIR_LEFT,
  67. DIR_UP,
  68. DIR_RIGHT,
  69. DIR_DOWN,
  70. NO_DIR
  71. };
  72. /**
  73. * Return enum GlyphEdge of box where point (x, y) lies.
  74. *
  75. * @param x x point coordinate
  76. * @param y y point coordinate
  77. * @param edge_size box width/height.
  78. */
  79. static enum GlyphEdge which_edge(int x, int y, int edge_size)
  80. {
  81. const int edge_max = edge_size - 1;
  82. if (!y) {
  83. return BOTTOM_EDGE;
  84. } else if (y == edge_max) {
  85. return TOP_EDGE;
  86. } else if (!x) {
  87. return LEFT_EDGE;
  88. } else if (x == edge_max) {
  89. return RIGHT_EDGE;
  90. } else {
  91. return NO_EDGE;
  92. }
  93. }
  94. static enum GlyphDir which_direction(enum GlyphEdge edge0, enum GlyphEdge edge1)
  95. {
  96. if ((edge0 == LEFT_EDGE && edge1 == RIGHT_EDGE) ||
  97. (edge1 == LEFT_EDGE && edge0 == RIGHT_EDGE) ||
  98. (edge0 == BOTTOM_EDGE && edge1 != TOP_EDGE) ||
  99. (edge1 == BOTTOM_EDGE && edge0 != TOP_EDGE)) {
  100. return DIR_UP;
  101. } else if ((edge0 == TOP_EDGE && edge1 != BOTTOM_EDGE) ||
  102. (edge1 == TOP_EDGE && edge0 != BOTTOM_EDGE)) {
  103. return DIR_DOWN;
  104. } else if ((edge0 == LEFT_EDGE && edge1 != RIGHT_EDGE) ||
  105. (edge1 == LEFT_EDGE && edge0 != RIGHT_EDGE)) {
  106. return DIR_LEFT;
  107. } else if ((edge0 == TOP_EDGE && edge1 == BOTTOM_EDGE) ||
  108. (edge1 == TOP_EDGE && edge0 == BOTTOM_EDGE) ||
  109. (edge0 == RIGHT_EDGE && edge1 != LEFT_EDGE) ||
  110. (edge1 == RIGHT_EDGE && edge0 != LEFT_EDGE)) {
  111. return DIR_RIGHT;
  112. }
  113. return NO_DIR;
  114. }
  115. /**
  116. * Interpolate two points.
  117. */
  118. static void interp_point(int8_t *points, int x0, int y0, int x1, int y1,
  119. int pos, int npoints)
  120. {
  121. if (npoints) {
  122. points[0] = (x0 * pos + x1 * (npoints - pos) + (npoints >> 1)) / npoints;
  123. points[1] = (y0 * pos + y1 * (npoints - pos) + (npoints >> 1)) / npoints;
  124. } else {
  125. points[0] = x0;
  126. points[1] = y0;
  127. }
  128. }
  129. /**
  130. * Construct glyphs by iterating through vectors coordinates.
  131. *
  132. * @param pglyphs pointer to table where glyphs are stored
  133. * @param xvec pointer to x component of vectors coordinates
  134. * @param yvec pointer to y component of vectors coordinates
  135. * @param side_length glyph width/height.
  136. */
  137. static void make_glyphs(int8_t *pglyphs, const int8_t *xvec, const int8_t *yvec,
  138. const int side_length)
  139. {
  140. const int glyph_size = side_length * side_length;
  141. int8_t *pglyph = pglyphs;
  142. int i, j;
  143. for (i = 0; i < GLYPH_COORD_VECT_SIZE; i++) {
  144. int x0 = xvec[i];
  145. int y0 = yvec[i];
  146. enum GlyphEdge edge0 = which_edge(x0, y0, side_length);
  147. for (j = 0; j < GLYPH_COORD_VECT_SIZE; j++, pglyph += glyph_size) {
  148. int x1 = xvec[j];
  149. int y1 = yvec[j];
  150. enum GlyphEdge edge1 = which_edge(x1, y1, side_length);
  151. enum GlyphDir dir = which_direction(edge0, edge1);
  152. int npoints = FFMAX(FFABS(x1 - x0), FFABS(y1 - y0));
  153. int ipoint;
  154. for (ipoint = 0; ipoint <= npoints; ipoint++) {
  155. int8_t point[2];
  156. int irow, icol;
  157. interp_point(point, x0, y0, x1, y1, ipoint, npoints);
  158. switch (dir) {
  159. case DIR_UP:
  160. for (irow = point[1]; irow >= 0; irow--)
  161. pglyph[point[0] + irow * side_length] = 1;
  162. break;
  163. case DIR_DOWN:
  164. for (irow = point[1]; irow < side_length; irow++)
  165. pglyph[point[0] + irow * side_length] = 1;
  166. break;
  167. case DIR_LEFT:
  168. for (icol = point[0]; icol >= 0; icol--)
  169. pglyph[icol + point[1] * side_length] = 1;
  170. break;
  171. case DIR_RIGHT:
  172. for (icol = point[0]; icol < side_length; icol++)
  173. pglyph[icol + point[1] * side_length] = 1;
  174. break;
  175. }
  176. }
  177. }
  178. }
  179. }
  180. static void init_sizes(SANMVideoContext *ctx, int width, int height)
  181. {
  182. ctx->width = width;
  183. ctx->height = height;
  184. ctx->npixels = width * height;
  185. ctx->aligned_width = FFALIGN(width, 8);
  186. ctx->aligned_height = FFALIGN(height, 8);
  187. ctx->buf_size = ctx->aligned_width * ctx->aligned_height * sizeof(ctx->frm0[0]);
  188. ctx->pitch = width;
  189. }
  190. static void destroy_buffers(SANMVideoContext *ctx)
  191. {
  192. av_freep(&ctx->frm0);
  193. av_freep(&ctx->frm1);
  194. av_freep(&ctx->frm2);
  195. av_freep(&ctx->stored_frame);
  196. av_freep(&ctx->rle_buf);
  197. }
  198. static av_cold int init_buffers(SANMVideoContext *ctx)
  199. {
  200. av_fast_padded_malloc(&ctx->frm0, &ctx->frm0_size, ctx->buf_size);
  201. av_fast_padded_malloc(&ctx->frm1, &ctx->frm1_size, ctx->buf_size);
  202. av_fast_padded_malloc(&ctx->frm2, &ctx->frm2_size, ctx->buf_size);
  203. if (!ctx->version)
  204. av_fast_padded_malloc(&ctx->stored_frame, &ctx->stored_frame_size, ctx->buf_size);
  205. if (!ctx->frm0 || !ctx->frm1 || !ctx->frm2 || (!ctx->stored_frame && !ctx->version)) {
  206. destroy_buffers(ctx);
  207. return AVERROR(ENOMEM);
  208. }
  209. return 0;
  210. }
  211. static void rotate_bufs(SANMVideoContext *ctx, int rotate_code)
  212. {
  213. av_dlog(ctx->avctx, "rotate %d\n", rotate_code);
  214. if (rotate_code == 2)
  215. FFSWAP(uint16_t*, ctx->frm1, ctx->frm2);
  216. FFSWAP(uint16_t*, ctx->frm2, ctx->frm0);
  217. }
  218. static av_cold int decode_init(AVCodecContext *avctx)
  219. {
  220. SANMVideoContext *ctx = avctx->priv_data;
  221. ctx->avctx = avctx;
  222. ctx->version = !avctx->extradata_size;
  223. avctx->pix_fmt = ctx->version ? PIX_FMT_RGB565 : PIX_FMT_PAL8;
  224. init_sizes(ctx, avctx->width, avctx->height);
  225. if (init_buffers(ctx)) {
  226. av_log(avctx, AV_LOG_ERROR, "error allocating buffers\n");
  227. return AVERROR(ENOMEM);
  228. }
  229. ctx->output = &ctx->frame;
  230. ctx->output->data[0] = 0;
  231. make_glyphs(ctx->p4x4glyphs[0], glyph4_x, glyph4_y, 4);
  232. make_glyphs(ctx->p8x8glyphs[0], glyph8_x, glyph8_y, 8);
  233. if (!ctx->version) {
  234. int i;
  235. if (avctx->extradata_size < 1026) {
  236. av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
  237. return AVERROR_INVALIDDATA;
  238. }
  239. ctx->subversion = AV_RL16(avctx->extradata);
  240. for (i = 0; i < 256; i++)
  241. ctx->pal[i] = 0xFF << 24 | AV_RL32(avctx->extradata + 2 + i * 4);
  242. }
  243. return 0;
  244. }
  245. static av_cold int decode_end(AVCodecContext *avctx)
  246. {
  247. SANMVideoContext *ctx = avctx->priv_data;
  248. destroy_buffers(ctx);
  249. if (ctx->frame.data[0]) {
  250. avctx->release_buffer(avctx, &ctx->frame);
  251. ctx->frame.data[0] = 0;
  252. }
  253. return 0;
  254. }
  255. static int rle_decode(SANMVideoContext *ctx, uint8_t *dst, const int out_size)
  256. {
  257. int opcode, color, run_len, left = out_size;
  258. while (left > 0) {
  259. opcode = bytestream2_get_byte(&ctx->gb);
  260. run_len = (opcode >> 1) + 1;
  261. if (run_len > left || bytestream2_get_bytes_left(&ctx->gb) <= 0)
  262. return AVERROR_INVALIDDATA;
  263. if (opcode & 1) {
  264. color = bytestream2_get_byte(&ctx->gb);
  265. memset(dst, color, run_len);
  266. } else {
  267. if (bytestream2_get_bytes_left(&ctx->gb) < run_len)
  268. return AVERROR_INVALIDDATA;
  269. bytestream2_get_bufferu(&ctx->gb, dst, run_len);
  270. }
  271. dst += run_len;
  272. left -= run_len;
  273. }
  274. return 0;
  275. }
  276. static int old_codec1(SANMVideoContext *ctx, int top,
  277. int left, int width, int height)
  278. {
  279. uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * ctx->pitch;
  280. int i, j, len, flag, code, val, pos, end;
  281. for (i = 0; i < height; i++) {
  282. pos = 0;
  283. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  284. return AVERROR_INVALIDDATA;
  285. len = bytestream2_get_le16u(&ctx->gb);
  286. end = bytestream2_tell(&ctx->gb) + len;
  287. while (bytestream2_tell(&ctx->gb) < end) {
  288. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  289. return AVERROR_INVALIDDATA;
  290. code = bytestream2_get_byteu(&ctx->gb);
  291. flag = code & 1;
  292. code = (code >> 1) + 1;
  293. if (pos + code > width)
  294. return AVERROR_INVALIDDATA;
  295. if (flag) {
  296. val = bytestream2_get_byteu(&ctx->gb);
  297. if (val)
  298. memset(dst + pos, val, code);
  299. pos += code;
  300. } else {
  301. if (bytestream2_get_bytes_left(&ctx->gb) < code)
  302. return AVERROR_INVALIDDATA;
  303. for (j = 0; j < code; j++) {
  304. val = bytestream2_get_byteu(&ctx->gb);
  305. if (val)
  306. dst[pos] = val;
  307. pos++;
  308. }
  309. }
  310. }
  311. dst += ctx->pitch;
  312. }
  313. ctx->rotate_code = 0;
  314. return 0;
  315. }
  316. static inline void codec37_mv(uint8_t *dst, const uint8_t *src,
  317. int height, int stride, int x, int y)
  318. {
  319. int pos, i, j;
  320. pos = x + y * stride;
  321. for (j = 0; j < 4; j++) {
  322. for (i = 0; i < 4; i++) {
  323. if ((pos + i) < 0 || (pos + i) >= height * stride)
  324. dst[i] = 0;
  325. else
  326. dst[i] = src[i];
  327. }
  328. dst += stride;
  329. src += stride;
  330. pos += stride;
  331. }
  332. }
  333. static int old_codec37(SANMVideoContext *ctx, int top,
  334. int left, int width, int height)
  335. {
  336. int stride = ctx->pitch;
  337. int i, j, k, t;
  338. int skip_run = 0;
  339. int compr, mvoff, seq, flags;
  340. uint32_t decoded_size;
  341. uint8_t *dst, *prev;
  342. compr = bytestream2_get_byte(&ctx->gb);
  343. mvoff = bytestream2_get_byte(&ctx->gb);
  344. seq = bytestream2_get_le16(&ctx->gb);
  345. decoded_size = bytestream2_get_le32(&ctx->gb);
  346. bytestream2_skip(&ctx->gb, 4);
  347. flags = bytestream2_get_byte(&ctx->gb);
  348. bytestream2_skip(&ctx->gb, 3);
  349. ctx->rotate_code = 0;
  350. if (((seq & 1) || !(flags & 1)) && (compr && compr != 2))
  351. rotate_bufs(ctx, 1);
  352. dst = ((uint8_t*)ctx->frm0) + left + top * stride;
  353. prev = ((uint8_t*)ctx->frm2) + left + top * stride;
  354. if (mvoff > 2) {
  355. av_log(ctx->avctx, AV_LOG_ERROR, "invalid motion base value %d\n", mvoff);
  356. return AVERROR_INVALIDDATA;
  357. }
  358. av_dlog(ctx->avctx, "compression %d\n", compr);
  359. switch (compr) {
  360. case 0:
  361. for (i = 0; i < height; i++) {
  362. bytestream2_get_buffer(&ctx->gb, dst, width);
  363. dst += stride;
  364. }
  365. memset(ctx->frm1, 0, ctx->height * stride);
  366. memset(ctx->frm2, 0, ctx->height * stride);
  367. break;
  368. case 2:
  369. if (rle_decode(ctx, dst, decoded_size))
  370. return AVERROR_INVALIDDATA;
  371. memset(ctx->frm1, 0, ctx->frm1_size);
  372. memset(ctx->frm2, 0, ctx->frm2_size);
  373. break;
  374. case 3:
  375. case 4:
  376. if (flags & 4) {
  377. for (j = 0; j < height; j += 4) {
  378. for (i = 0; i < width; i += 4) {
  379. int code;
  380. if (skip_run) {
  381. skip_run--;
  382. copy_block4(dst + i, prev + i, stride, stride, 4);
  383. continue;
  384. }
  385. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  386. return AVERROR_INVALIDDATA;
  387. code = bytestream2_get_byteu(&ctx->gb);
  388. switch (code) {
  389. case 0xFF:
  390. if (bytestream2_get_bytes_left(&ctx->gb) < 16)
  391. return AVERROR_INVALIDDATA;
  392. for (k = 0; k < 4; k++)
  393. bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
  394. break;
  395. case 0xFE:
  396. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  397. return AVERROR_INVALIDDATA;
  398. for (k = 0; k < 4; k++)
  399. memset(dst + i + k * stride, bytestream2_get_byteu(&ctx->gb), 4);
  400. break;
  401. case 0xFD:
  402. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  403. return AVERROR_INVALIDDATA;
  404. t = bytestream2_get_byteu(&ctx->gb);
  405. for (k = 0; k < 4; k++)
  406. memset(dst + i + k * stride, t, 4);
  407. break;
  408. default:
  409. if (compr == 4 && !code) {
  410. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  411. return AVERROR_INVALIDDATA;
  412. skip_run = bytestream2_get_byteu(&ctx->gb) + 1;
  413. i -= 4;
  414. } else {
  415. int mx, my;
  416. mx = c37_mv[(mvoff * 255 + code) * 2 ];
  417. my = c37_mv[(mvoff * 255 + code) * 2 + 1];
  418. codec37_mv(dst + i, prev + i + mx + my * stride,
  419. ctx->height, stride, i + mx, j + my);
  420. }
  421. }
  422. }
  423. dst += stride * 4;
  424. prev += stride * 4;
  425. }
  426. } else {
  427. for (j = 0; j < height; j += 4) {
  428. for (i = 0; i < width; i += 4) {
  429. int code;
  430. if (skip_run) {
  431. skip_run--;
  432. copy_block4(dst + i, prev + i, stride, stride, 4);
  433. continue;
  434. }
  435. code = bytestream2_get_byte(&ctx->gb);
  436. if (code == 0xFF) {
  437. if (bytestream2_get_bytes_left(&ctx->gb) < 16)
  438. return AVERROR_INVALIDDATA;
  439. for (k = 0; k < 4; k++)
  440. bytestream2_get_bufferu(&ctx->gb, dst + i + k * stride, 4);
  441. } else if (compr == 4 && !code) {
  442. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  443. return AVERROR_INVALIDDATA;
  444. skip_run = bytestream2_get_byteu(&ctx->gb) + 1;
  445. i -= 4;
  446. } else {
  447. int mx, my;
  448. mx = c37_mv[(mvoff * 255 + code) * 2];
  449. my = c37_mv[(mvoff * 255 + code) * 2 + 1];
  450. codec37_mv(dst + i, prev + i + mx + my * stride,
  451. ctx->height, stride, i + mx, j + my);
  452. }
  453. }
  454. dst += stride * 4;
  455. prev += stride * 4;
  456. }
  457. }
  458. break;
  459. default:
  460. av_log(ctx->avctx, AV_LOG_ERROR,
  461. "subcodec 37 compression %d not implemented\n", compr);
  462. return AVERROR_PATCHWELCOME;
  463. }
  464. return 0;
  465. }
  466. static int process_block(SANMVideoContext *ctx, uint8_t *dst, uint8_t *prev1,
  467. uint8_t *prev2, int stride, int tbl, int size)
  468. {
  469. int code, k, t;
  470. uint8_t colors[2];
  471. int8_t *pglyph;
  472. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  473. return AVERROR_INVALIDDATA;
  474. code = bytestream2_get_byteu(&ctx->gb);
  475. if (code >= 0xF8) {
  476. switch (code) {
  477. case 0xFF:
  478. if (size == 2) {
  479. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  480. return AVERROR_INVALIDDATA;
  481. dst[0] = bytestream2_get_byteu(&ctx->gb);
  482. dst[1] = bytestream2_get_byteu(&ctx->gb);
  483. dst[0+stride] = bytestream2_get_byteu(&ctx->gb);
  484. dst[1+stride] = bytestream2_get_byteu(&ctx->gb);
  485. } else {
  486. size >>= 1;
  487. if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
  488. return AVERROR_INVALIDDATA;
  489. if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
  490. stride, tbl, size))
  491. return AVERROR_INVALIDDATA;
  492. dst += size * stride;
  493. prev1 += size * stride;
  494. prev2 += size * stride;
  495. if (process_block(ctx, dst, prev1, prev2, stride, tbl, size))
  496. return AVERROR_INVALIDDATA;
  497. if (process_block(ctx, dst + size, prev1 + size, prev2 + size,
  498. stride, tbl, size))
  499. return AVERROR_INVALIDDATA;
  500. }
  501. break;
  502. case 0xFE:
  503. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  504. return AVERROR_INVALIDDATA;
  505. t = bytestream2_get_byteu(&ctx->gb);
  506. for (k = 0; k < size; k++)
  507. memset(dst + k * stride, t, size);
  508. break;
  509. case 0xFD:
  510. if (bytestream2_get_bytes_left(&ctx->gb) < 3)
  511. return AVERROR_INVALIDDATA;
  512. code = bytestream2_get_byteu(&ctx->gb);
  513. pglyph = (size == 8) ? ctx->p8x8glyphs[code] : ctx->p4x4glyphs[code];
  514. bytestream2_get_bufferu(&ctx->gb, colors, 2);
  515. for (k = 0; k < size; k++)
  516. for (t = 0; t < size; t++)
  517. dst[t + k * stride] = colors[!*pglyph++];
  518. break;
  519. case 0xFC:
  520. for (k = 0; k < size; k++)
  521. memcpy(dst + k * stride, prev1 + k * stride, size);
  522. break;
  523. default:
  524. k = bytestream2_tell(&ctx->gb);
  525. bytestream2_seek(&ctx->gb, tbl + (code & 7), SEEK_SET);
  526. t = bytestream2_get_byte(&ctx->gb);
  527. bytestream2_seek(&ctx->gb, k, SEEK_SET);
  528. for (k = 0; k < size; k++)
  529. memset(dst + k * stride, t, size);
  530. }
  531. } else {
  532. int mx = motion_vectors[code][0];
  533. int my = motion_vectors[code][1];
  534. for (k = 0; k < size; k++)
  535. memcpy(dst + k * stride, prev2 + mx + (my + k) * stride, size);
  536. }
  537. return 0;
  538. }
  539. static int old_codec47(SANMVideoContext *ctx, int top,
  540. int left, int width, int height)
  541. {
  542. int i, j, seq, compr, new_rot, tbl_pos, skip;
  543. int stride = ctx->pitch;
  544. uint8_t *dst = ((uint8_t*)ctx->frm0) + left + top * stride;
  545. uint8_t *prev1 = (uint8_t*)ctx->frm1;
  546. uint8_t *prev2 = (uint8_t*)ctx->frm2;
  547. uint32_t decoded_size;
  548. tbl_pos = bytestream2_tell(&ctx->gb);
  549. seq = bytestream2_get_le16(&ctx->gb);
  550. compr = bytestream2_get_byte(&ctx->gb);
  551. new_rot = bytestream2_get_byte(&ctx->gb);
  552. skip = bytestream2_get_byte(&ctx->gb);
  553. bytestream2_skip(&ctx->gb, 9);
  554. decoded_size = bytestream2_get_le32(&ctx->gb);
  555. bytestream2_skip(&ctx->gb, 8);
  556. if (skip & 1)
  557. bytestream2_skip(&ctx->gb, 0x8080);
  558. if (!seq) {
  559. ctx->prev_seq = -1;
  560. memset(prev1, 0, ctx->height * stride);
  561. memset(prev2, 0, ctx->height * stride);
  562. }
  563. av_dlog(ctx->avctx, "compression %d\n", compr);
  564. switch (compr) {
  565. case 0:
  566. if (bytestream2_get_bytes_left(&ctx->gb) < width * height)
  567. return AVERROR_INVALIDDATA;
  568. for (j = 0; j < height; j++) {
  569. for (i = 0; i < width; i++)
  570. bytestream2_get_bufferu(&ctx->gb, dst, width);
  571. dst += stride;
  572. }
  573. break;
  574. case 1:
  575. if (bytestream2_get_bytes_left(&ctx->gb) < ((width + 1) >> 1) * ((height + 1) >> 1))
  576. return AVERROR_INVALIDDATA;
  577. for (j = 0; j < height; j += 2) {
  578. for (i = 0; i < width; i += 2) {
  579. dst[i] = dst[i + 1] =
  580. dst[stride + i] = dst[stride + i + 1] = bytestream2_get_byteu(&ctx->gb);
  581. }
  582. dst += stride * 2;
  583. }
  584. break;
  585. case 2:
  586. if (seq == ctx->prev_seq + 1) {
  587. for (j = 0; j < height; j += 8) {
  588. for (i = 0; i < width; i += 8) {
  589. if (process_block(ctx, dst + i, prev1 + i, prev2 + i, stride,
  590. tbl_pos + 8, 8))
  591. return AVERROR_INVALIDDATA;
  592. }
  593. dst += stride * 8;
  594. prev1 += stride * 8;
  595. prev2 += stride * 8;
  596. }
  597. }
  598. break;
  599. case 3:
  600. memcpy(ctx->frm0, ctx->frm2, ctx->pitch * ctx->height);
  601. break;
  602. case 4:
  603. memcpy(ctx->frm0, ctx->frm1, ctx->pitch * ctx->height);
  604. break;
  605. case 5:
  606. if (rle_decode(ctx, dst, decoded_size))
  607. return AVERROR_INVALIDDATA;
  608. break;
  609. default:
  610. av_log(ctx->avctx, AV_LOG_ERROR,
  611. "subcodec 47 compression %d not implemented\n", compr);
  612. return AVERROR_PATCHWELCOME;
  613. }
  614. if (seq == ctx->prev_seq + 1)
  615. ctx->rotate_code = new_rot;
  616. else
  617. ctx->rotate_code = 0;
  618. ctx->prev_seq = seq;
  619. return 0;
  620. }
  621. static int process_frame_obj(SANMVideoContext *ctx)
  622. {
  623. uint16_t codec, top, left, w, h;
  624. codec = bytestream2_get_le16u(&ctx->gb);
  625. left = bytestream2_get_le16u(&ctx->gb);
  626. top = bytestream2_get_le16u(&ctx->gb);
  627. w = bytestream2_get_le16u(&ctx->gb);
  628. h = bytestream2_get_le16u(&ctx->gb);
  629. if (ctx->width < left + w || ctx->height < top + h) {
  630. ctx->avctx->width = FFMAX(left + w, ctx->width);
  631. ctx->avctx->height = FFMAX(top + h, ctx->height);
  632. init_sizes(ctx, left + w, top + h);
  633. if (init_buffers(ctx)) {
  634. av_log(ctx->avctx, AV_LOG_ERROR, "error resizing buffers\n");
  635. return AVERROR(ENOMEM);
  636. }
  637. }
  638. bytestream2_skip(&ctx->gb, 4);
  639. av_dlog(ctx->avctx, "subcodec %d\n", codec);
  640. switch (codec) {
  641. case 1:
  642. case 3:
  643. return old_codec1(ctx, top, left, w, h);
  644. break;
  645. case 37:
  646. return old_codec37(ctx, top, left, w, h);
  647. break;
  648. case 47:
  649. return old_codec47(ctx, top, left, w, h);
  650. break;
  651. default:
  652. av_log_ask_for_sample(ctx->avctx, "unknown subcodec %d\n", codec);
  653. return AVERROR_PATCHWELCOME;
  654. }
  655. }
  656. static int decode_0(SANMVideoContext *ctx)
  657. {
  658. uint16_t *frm = ctx->frm0;
  659. int x, y;
  660. if (bytestream2_get_bytes_left(&ctx->gb) < ctx->width * ctx->height * 2) {
  661. av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for raw frame\n");
  662. return AVERROR_INVALIDDATA;
  663. }
  664. for (y = 0; y < ctx->height; y++) {
  665. for (x = 0; x < ctx->width; x++)
  666. frm[x] = bytestream2_get_le16u(&ctx->gb);
  667. frm += ctx->pitch;
  668. }
  669. return 0;
  670. }
  671. static int decode_nop(SANMVideoContext *ctx)
  672. {
  673. av_log_ask_for_sample(ctx->avctx, "unknown/unsupported compression type\n");
  674. return AVERROR_PATCHWELCOME;
  675. }
  676. static void copy_block(uint16_t *pdest, uint16_t *psrc, int block_size, int pitch)
  677. {
  678. uint8_t *dst = (uint8_t *)pdest;
  679. uint8_t *src = (uint8_t *)psrc;
  680. int stride = pitch * 2;
  681. switch (block_size) {
  682. case 2:
  683. copy_block4(dst, src, stride, stride, 2);
  684. break;
  685. case 4:
  686. copy_block8(dst, src, stride, stride, 4);
  687. break;
  688. case 8:
  689. copy_block16(dst, src, stride, stride, 8);
  690. break;
  691. }
  692. }
  693. static void fill_block(uint16_t *pdest, uint16_t color, int block_size, int pitch)
  694. {
  695. int x, y;
  696. pitch -= block_size;
  697. for (y = 0; y < block_size; y++, pdest += pitch)
  698. for (x = 0; x < block_size; x++)
  699. *pdest++ = color;
  700. }
  701. static int draw_glyph(SANMVideoContext *ctx, uint16_t *dst, int index, uint16_t fg_color,
  702. uint16_t bg_color, int block_size, int pitch)
  703. {
  704. int8_t *pglyph;
  705. uint16_t colors[2] = { fg_color, bg_color };
  706. int x, y;
  707. if (index > NGLYPHS) {
  708. av_log(ctx->avctx, AV_LOG_ERROR, "ignoring nonexistent glyph #%u\n", index);
  709. return AVERROR_INVALIDDATA;
  710. }
  711. pglyph = block_size == 8 ? ctx->p8x8glyphs[index] : ctx->p4x4glyphs[index];
  712. pitch -= block_size;
  713. for (y = 0; y < block_size; y++, dst += pitch)
  714. for (x = 0; x < block_size; x++)
  715. *dst++ = colors[*pglyph++];
  716. return 0;
  717. }
  718. static int opcode_0xf7(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
  719. {
  720. uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
  721. if (block_size == 2) {
  722. uint32_t indices;
  723. if (bytestream2_get_bytes_left(&ctx->gb) < 4)
  724. return AVERROR_INVALIDDATA;
  725. indices = bytestream2_get_le32u(&ctx->gb);
  726. dst[0] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  727. dst[1] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  728. dst[pitch] = ctx->codebook[indices & 0xFF]; indices >>= 8;
  729. dst[pitch + 1] = ctx->codebook[indices & 0xFF];
  730. } else {
  731. uint16_t fgcolor, bgcolor;
  732. int glyph;
  733. if (bytestream2_get_bytes_left(&ctx->gb) < 3)
  734. return AVERROR_INVALIDDATA;
  735. glyph = bytestream2_get_byteu(&ctx->gb);
  736. bgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  737. fgcolor = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  738. draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
  739. }
  740. return 0;
  741. }
  742. static int opcode_0xf8(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
  743. {
  744. uint16_t *dst = ctx->frm0 + cx + cy * ctx->pitch;
  745. if (block_size == 2) {
  746. if (bytestream2_get_bytes_left(&ctx->gb) < 8)
  747. return AVERROR_INVALIDDATA;
  748. dst[0] = bytestream2_get_le16u(&ctx->gb);
  749. dst[1] = bytestream2_get_le16u(&ctx->gb);
  750. dst[pitch] = bytestream2_get_le16u(&ctx->gb);
  751. dst[pitch + 1] = bytestream2_get_le16u(&ctx->gb);
  752. } else {
  753. uint16_t fgcolor, bgcolor;
  754. int glyph;
  755. if (bytestream2_get_bytes_left(&ctx->gb) < 5)
  756. return AVERROR_INVALIDDATA;
  757. glyph = bytestream2_get_byteu(&ctx->gb);
  758. bgcolor = bytestream2_get_le16u(&ctx->gb);
  759. fgcolor = bytestream2_get_le16u(&ctx->gb);
  760. draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
  761. }
  762. return 0;
  763. }
  764. static int good_mvec(SANMVideoContext *ctx, int cx, int cy, int mx, int my,
  765. int block_size)
  766. {
  767. int start_pos = cx + mx + (cy + my) * ctx->pitch;
  768. int end_pos = start_pos + (block_size - 1) * (ctx->pitch + 1);
  769. int good = start_pos >= 0 && end_pos < (ctx->buf_size >> 1);
  770. if (!good) {
  771. av_log(ctx->avctx, AV_LOG_ERROR, "ignoring invalid motion vector (%i, %i)->(%u, %u), block size = %u\n",
  772. cx + mx, cy + my, cx, cy, block_size);
  773. }
  774. return good;
  775. }
  776. static int codec2subblock(SANMVideoContext *ctx, int cx, int cy, int blk_size)
  777. {
  778. int16_t mx, my, index;
  779. int opcode;
  780. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  781. return AVERROR_INVALIDDATA;
  782. opcode = bytestream2_get_byteu(&ctx->gb);
  783. av_dlog(ctx->avctx, "opcode 0x%0X cx %d cy %d blk %d\n", opcode, cx, cy, blk_size);
  784. switch (opcode) {
  785. default:
  786. mx = motion_vectors[opcode][0];
  787. my = motion_vectors[opcode][1];
  788. if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
  789. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  790. ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
  791. blk_size, ctx->pitch);
  792. }
  793. break;
  794. case 0xF5:
  795. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  796. return AVERROR_INVALIDDATA;
  797. index = bytestream2_get_le16u(&ctx->gb);
  798. mx = index % ctx->width;
  799. my = index / ctx->width;
  800. if (good_mvec(ctx, cx, cy, mx, my, blk_size)) {
  801. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  802. ctx->frm2 + cx + mx + ctx->pitch * (cy + my),
  803. blk_size, ctx->pitch);
  804. }
  805. break;
  806. case 0xF6:
  807. copy_block(ctx->frm0 + cx + ctx->pitch * cy,
  808. ctx->frm1 + cx + ctx->pitch * cy,
  809. blk_size, ctx->pitch);
  810. break;
  811. case 0xF7:
  812. opcode_0xf7(ctx, cx, cy, blk_size, ctx->pitch);
  813. break;
  814. case 0xF8:
  815. opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
  816. break;
  817. case 0xF9:
  818. case 0xFA:
  819. case 0xFB:
  820. case 0xFC:
  821. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  822. ctx->small_codebook[opcode - 0xf9], blk_size, ctx->pitch);
  823. break;
  824. case 0xFD:
  825. if (bytestream2_get_bytes_left(&ctx->gb) < 1)
  826. return AVERROR_INVALIDDATA;
  827. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  828. ctx->codebook[bytestream2_get_byteu(&ctx->gb)], blk_size, ctx->pitch);
  829. break;
  830. case 0xFE:
  831. if (bytestream2_get_bytes_left(&ctx->gb) < 2)
  832. return AVERROR_INVALIDDATA;
  833. fill_block(ctx->frm0 + cx + cy * ctx->pitch,
  834. bytestream2_get_le16u(&ctx->gb), blk_size, ctx->pitch);
  835. break;
  836. case 0xFF:
  837. if (blk_size == 2) {
  838. opcode_0xf8(ctx, cx, cy, blk_size, ctx->pitch);
  839. } else {
  840. blk_size >>= 1;
  841. if (codec2subblock(ctx, cx , cy , blk_size))
  842. return AVERROR_INVALIDDATA;
  843. if (codec2subblock(ctx, cx + blk_size, cy , blk_size))
  844. return AVERROR_INVALIDDATA;
  845. if (codec2subblock(ctx, cx , cy + blk_size, blk_size))
  846. return AVERROR_INVALIDDATA;
  847. if (codec2subblock(ctx, cx + blk_size, cy + blk_size, blk_size))
  848. return AVERROR_INVALIDDATA;
  849. }
  850. break;
  851. }
  852. return 0;
  853. }
  854. static int decode_2(SANMVideoContext *ctx)
  855. {
  856. int cx, cy, ret;
  857. for (cy = 0; cy < ctx->aligned_height; cy += 8) {
  858. for (cx = 0; cx < ctx->aligned_width; cx += 8) {
  859. if (ret = codec2subblock(ctx, cx, cy, 8))
  860. return ret;
  861. }
  862. }
  863. return 0;
  864. }
  865. static int decode_3(SANMVideoContext *ctx)
  866. {
  867. memcpy(ctx->frm0, ctx->frm2, ctx->frm2_size);
  868. return 0;
  869. }
  870. static int decode_4(SANMVideoContext *ctx)
  871. {
  872. memcpy(ctx->frm0, ctx->frm1, ctx->frm1_size);
  873. return 0;
  874. }
  875. static int decode_5(SANMVideoContext *ctx)
  876. {
  877. #if HAVE_BIGENDIAN
  878. uint16_t *frm;
  879. int npixels;
  880. #endif
  881. uint8_t *dst = (uint8_t*)ctx->frm0;
  882. if (rle_decode(ctx, dst, ctx->buf_size))
  883. return AVERROR_INVALIDDATA;
  884. #if HAVE_BIGENDIAN
  885. npixels = ctx->npixels;
  886. frm = ctx->frm0;
  887. while (npixels--)
  888. *frm++ = av_bswap16(*frm);
  889. #endif
  890. return 0;
  891. }
  892. static int decode_6(SANMVideoContext *ctx)
  893. {
  894. int npixels = ctx->npixels;
  895. uint16_t *frm = ctx->frm0;
  896. if (bytestream2_get_bytes_left(&ctx->gb) < npixels) {
  897. av_log(ctx->avctx, AV_LOG_ERROR, "insufficient data for frame\n");
  898. return AVERROR_INVALIDDATA;
  899. }
  900. while (npixels--)
  901. *frm++ = ctx->codebook[bytestream2_get_byteu(&ctx->gb)];
  902. return 0;
  903. }
  904. static int decode_8(SANMVideoContext *ctx)
  905. {
  906. uint16_t *pdest = ctx->frm0;
  907. uint8_t *rsrc;
  908. long npixels = ctx->npixels;
  909. av_fast_malloc(&ctx->rle_buf, &ctx->rle_buf_size, npixels);
  910. if (!ctx->rle_buf) {
  911. av_log(ctx->avctx, AV_LOG_ERROR, "RLE buffer allocation failed\n");
  912. return AVERROR(ENOMEM);
  913. }
  914. rsrc = ctx->rle_buf;
  915. if (rle_decode(ctx, rsrc, npixels))
  916. return AVERROR_INVALIDDATA;
  917. while (npixels--)
  918. *pdest++ = ctx->codebook[*rsrc++];
  919. return 0;
  920. }
  921. typedef int (*frm_decoder)(SANMVideoContext *ctx);
  922. static const frm_decoder v1_decoders[] = {
  923. decode_0, decode_nop, decode_2, decode_3, decode_4, decode_5,
  924. decode_6, decode_nop, decode_8
  925. };
  926. static int read_frame_header(SANMVideoContext *ctx, SANMFrameHeader *hdr)
  927. {
  928. int i, ret;
  929. if ((ret = bytestream2_get_bytes_left(&ctx->gb)) < 560) {
  930. av_log(ctx->avctx, AV_LOG_ERROR, "too short input frame (%d bytes)\n",
  931. ret);
  932. return AVERROR_INVALIDDATA;
  933. }
  934. bytestream2_skip(&ctx->gb, 8); // skip pad
  935. hdr->width = bytestream2_get_le32u(&ctx->gb);
  936. hdr->height = bytestream2_get_le32u(&ctx->gb);
  937. if (hdr->width != ctx->width || hdr->height != ctx->height) {
  938. av_log(ctx->avctx, AV_LOG_ERROR, "variable size frames are not implemented\n");
  939. return AVERROR_PATCHWELCOME;
  940. }
  941. hdr->seq_num = bytestream2_get_le16u(&ctx->gb);
  942. hdr->codec = bytestream2_get_byteu(&ctx->gb);
  943. hdr->rotate_code = bytestream2_get_byteu(&ctx->gb);
  944. bytestream2_skip(&ctx->gb, 4); // skip pad
  945. for (i = 0; i < 4; i++)
  946. ctx->small_codebook[i] = bytestream2_get_le16u(&ctx->gb);
  947. hdr->bg_color = bytestream2_get_le16u(&ctx->gb);
  948. bytestream2_skip(&ctx->gb, 2); // skip pad
  949. hdr->rle_output_size = bytestream2_get_le32u(&ctx->gb);
  950. for (i = 0; i < 256; i++)
  951. ctx->codebook[i] = bytestream2_get_le16u(&ctx->gb);
  952. bytestream2_skip(&ctx->gb, 8); // skip pad
  953. av_dlog(ctx->avctx, "subcodec %d\n", hdr->codec);
  954. return 0;
  955. }
  956. static void fill_frame(uint16_t *pbuf, int buf_size, uint16_t color)
  957. {
  958. while (buf_size--)
  959. *pbuf++ = color;
  960. }
  961. static int copy_output(SANMVideoContext *ctx, SANMFrameHeader *hdr)
  962. {
  963. uint8_t *dst;
  964. const uint8_t *src = (uint8_t*) ctx->frm0;
  965. int ret, dstpitch, height = ctx->height;
  966. int srcpitch = ctx->pitch * (hdr ? sizeof(ctx->frm0[0]) : 1);
  967. if ((ret = ctx->avctx->get_buffer(ctx->avctx, ctx->output)) < 0) {
  968. av_log(ctx->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  969. return ret;
  970. }
  971. dst = ctx->output->data[0];
  972. dstpitch = ctx->output->linesize[0];
  973. while (height--) {
  974. memcpy(dst, src, srcpitch);
  975. src += srcpitch;
  976. dst += dstpitch;
  977. }
  978. return 0;
  979. }
  980. static int decode_frame(AVCodecContext *avctx, void *data,
  981. int *got_frame_ptr, AVPacket *pkt)
  982. {
  983. SANMVideoContext *ctx = avctx->priv_data;
  984. int i, ret;
  985. bytestream2_init(&ctx->gb, pkt->data, pkt->size);
  986. if (ctx->output->data[0])
  987. avctx->release_buffer(avctx, ctx->output);
  988. if (!ctx->version) {
  989. int to_store = 0;
  990. while (bytestream2_get_bytes_left(&ctx->gb) >= 8) {
  991. uint32_t sig, size;
  992. int pos;
  993. sig = bytestream2_get_be32u(&ctx->gb);
  994. size = bytestream2_get_be32u(&ctx->gb);
  995. pos = bytestream2_tell(&ctx->gb);
  996. if (bytestream2_get_bytes_left(&ctx->gb) < size) {
  997. av_log(avctx, AV_LOG_ERROR, "incorrect chunk size %d\n", size);
  998. break;
  999. }
  1000. switch (sig) {
  1001. case MKBETAG('N', 'P', 'A', 'L'):
  1002. if (size != 256 * 3) {
  1003. av_log(avctx, AV_LOG_ERROR, "incorrect palette block size %d\n",
  1004. size);
  1005. return AVERROR_INVALIDDATA;
  1006. }
  1007. for (i = 0; i < 256; i++)
  1008. ctx->pal[i] = 0xFF << 24 | bytestream2_get_be24u(&ctx->gb);
  1009. break;
  1010. case MKBETAG('F', 'O', 'B', 'J'):
  1011. if (size < 16)
  1012. return AVERROR_INVALIDDATA;
  1013. if (ret = process_frame_obj(ctx))
  1014. return ret;
  1015. break;
  1016. case MKBETAG('X', 'P', 'A', 'L'):
  1017. if (size == 6 || size == 4) {
  1018. uint8_t tmp[3];
  1019. int j;
  1020. for (i = 0; i < 256; i++) {
  1021. for (j = 0; j < 3; j++) {
  1022. int t = (ctx->pal[i] >> (16 - j * 8)) & 0xFF;
  1023. tmp[j] = av_clip_uint8((t * 129 + ctx->delta_pal[i * 3 + j]) >> 7);
  1024. }
  1025. ctx->pal[i] = 0xFF << 24 | AV_RB24(tmp);
  1026. }
  1027. } else {
  1028. if (size < 768 * 2 + 4) {
  1029. av_log(avctx, AV_LOG_ERROR, "incorrect palette change block size %d\n",
  1030. size);
  1031. return AVERROR_INVALIDDATA;
  1032. }
  1033. bytestream2_skipu(&ctx->gb, 4);
  1034. for (i = 0; i < 768; i++)
  1035. ctx->delta_pal[i] = bytestream2_get_le16u(&ctx->gb);
  1036. if (size >= 768 * 5 + 4) {
  1037. for (i = 0; i < 256; i++)
  1038. ctx->pal[i] = 0xFF << 24 | bytestream2_get_be24u(&ctx->gb);
  1039. } else {
  1040. memset(ctx->pal, 0, sizeof(ctx->pal));
  1041. }
  1042. }
  1043. break;
  1044. case MKBETAG('S', 'T', 'O', 'R'):
  1045. to_store = 1;
  1046. break;
  1047. case MKBETAG('F', 'T', 'C', 'H'):
  1048. memcpy(ctx->frm0, ctx->stored_frame, ctx->buf_size);
  1049. break;
  1050. default:
  1051. bytestream2_skip(&ctx->gb, size);
  1052. av_log(avctx, AV_LOG_DEBUG, "unknown/unsupported chunk %x\n", sig);
  1053. break;
  1054. }
  1055. bytestream2_seek(&ctx->gb, pos + size, SEEK_SET);
  1056. if (size & 1)
  1057. bytestream2_skip(&ctx->gb, 1);
  1058. }
  1059. if (to_store)
  1060. memcpy(ctx->stored_frame, ctx->frm0, ctx->buf_size);
  1061. if ((ret = copy_output(ctx, NULL)))
  1062. return ret;
  1063. memcpy(ctx->output->data[1], ctx->pal, 1024);
  1064. } else {
  1065. SANMFrameHeader header;
  1066. if ((ret = read_frame_header(ctx, &header)))
  1067. return ret;
  1068. ctx->rotate_code = header.rotate_code;
  1069. if ((ctx->output->key_frame = !header.seq_num)) {
  1070. ctx->output->pict_type = AV_PICTURE_TYPE_I;
  1071. fill_frame(ctx->frm1, ctx->npixels, header.bg_color);
  1072. fill_frame(ctx->frm2, ctx->npixels, header.bg_color);
  1073. } else {
  1074. ctx->output->pict_type = AV_PICTURE_TYPE_P;
  1075. }
  1076. if (header.codec < FF_ARRAY_ELEMS(v1_decoders)) {
  1077. if ((ret = v1_decoders[header.codec](ctx))) {
  1078. av_log(avctx, AV_LOG_ERROR,
  1079. "subcodec %d: error decoding frame\n", header.codec);
  1080. return ret;
  1081. }
  1082. } else {
  1083. av_log_ask_for_sample(avctx, "subcodec %d is not implemented\n",
  1084. header.codec);
  1085. return AVERROR_PATCHWELCOME;
  1086. }
  1087. if ((ret = copy_output(ctx, &header)))
  1088. return ret;
  1089. }
  1090. if (ctx->rotate_code)
  1091. rotate_bufs(ctx, ctx->rotate_code);
  1092. *got_frame_ptr = 1;
  1093. *(AVFrame*)data = *ctx->output;
  1094. return pkt->size;
  1095. }
  1096. AVCodec ff_sanm_decoder = {
  1097. .name = "sanm",
  1098. .type = AVMEDIA_TYPE_VIDEO,
  1099. .id = AV_CODEC_ID_SANM,
  1100. .priv_data_size = sizeof(SANMVideoContext),
  1101. .init = decode_init,
  1102. .close = decode_end,
  1103. .decode = decode_frame,
  1104. .capabilities = CODEC_CAP_DR1,
  1105. .long_name = NULL_IF_CONFIG_SMALL("LucasArts SMUSH video"),
  1106. };