You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

495 lines
15KB

  1. /*
  2. * Mirillis FIC decoder
  3. *
  4. * Copyright (c) 2014 Konstantin Shishkov
  5. * Copyright (c) 2014 Derek Buitenhuis
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include "libavutil/common.h"
  24. #include "libavutil/opt.h"
  25. #include "avcodec.h"
  26. #include "internal.h"
  27. #include "get_bits.h"
  28. #include "golomb.h"
  29. typedef struct FICThreadContext {
  30. DECLARE_ALIGNED(16, int16_t, block)[64];
  31. uint8_t *src;
  32. int slice_h;
  33. int src_size;
  34. int y_off;
  35. int p_frame;
  36. } FICThreadContext;
  37. typedef struct FICContext {
  38. AVClass *class;
  39. AVCodecContext *avctx;
  40. AVFrame *frame;
  41. AVFrame *final_frame;
  42. FICThreadContext *slice_data;
  43. int slice_data_size;
  44. const uint8_t *qmat;
  45. enum AVPictureType cur_frame_type;
  46. int aligned_width, aligned_height;
  47. int num_slices, slice_h;
  48. uint8_t cursor_buf[4096];
  49. int skip_cursor;
  50. } FICContext;
  51. static const uint8_t fic_qmat_hq[64] = {
  52. 1, 2, 2, 2, 3, 3, 3, 4,
  53. 2, 2, 2, 3, 3, 3, 4, 4,
  54. 2, 2, 3, 3, 3, 4, 4, 4,
  55. 2, 2, 3, 3, 3, 4, 4, 5,
  56. 2, 3, 3, 3, 4, 4, 5, 6,
  57. 3, 3, 3, 4, 4, 5, 6, 7,
  58. 3, 3, 3, 4, 4, 5, 7, 7,
  59. 3, 3, 4, 4, 5, 7, 7, 7,
  60. };
  61. static const uint8_t fic_qmat_lq[64] = {
  62. 1, 5, 6, 7, 8, 9, 9, 11,
  63. 5, 5, 7, 8, 9, 9, 11, 12,
  64. 6, 7, 8, 9, 9, 11, 11, 12,
  65. 7, 7, 8, 9, 9, 11, 12, 13,
  66. 7, 8, 9, 9, 10, 11, 13, 16,
  67. 8, 9, 9, 10, 11, 13, 16, 19,
  68. 8, 9, 9, 11, 12, 15, 18, 23,
  69. 9, 9, 11, 12, 15, 18, 23, 27
  70. };
  71. static const uint8_t fic_header[7] = { 0, 0, 1, 'F', 'I', 'C', 'V' };
  72. #define FIC_HEADER_SIZE 27
  73. #define CURSOR_OFFSET 59
  74. static av_always_inline void fic_idct(int16_t *blk, int step, int shift, int rnd)
  75. {
  76. const unsigned t0 = 27246 * blk[3 * step] + 18405 * blk[5 * step];
  77. const unsigned t1 = 27246 * blk[5 * step] - 18405 * blk[3 * step];
  78. const unsigned t2 = 6393 * blk[7 * step] + 32139 * blk[1 * step];
  79. const unsigned t3 = 6393 * blk[1 * step] - 32139 * blk[7 * step];
  80. const unsigned t4 = 5793U * ((int)(t2 + t0 + 0x800) >> 12);
  81. const unsigned t5 = 5793U * ((int)(t3 + t1 + 0x800) >> 12);
  82. const unsigned t6 = t2 - t0;
  83. const unsigned t7 = t3 - t1;
  84. const unsigned t8 = 17734 * blk[2 * step] - 42813 * blk[6 * step];
  85. const unsigned t9 = 17734 * blk[6 * step] + 42814 * blk[2 * step];
  86. const unsigned tA = (blk[0 * step] - blk[4 * step]) * 32768 + rnd;
  87. const unsigned tB = (blk[0 * step] + blk[4 * step]) * 32768 + rnd;
  88. blk[0 * step] = (int)( t4 + t9 + tB) >> shift;
  89. blk[1 * step] = (int)( t6 + t7 + t8 + tA) >> shift;
  90. blk[2 * step] = (int)( t6 - t7 - t8 + tA) >> shift;
  91. blk[3 * step] = (int)( t5 - t9 + tB) >> shift;
  92. blk[4 * step] = (int)( -t5 - t9 + tB) >> shift;
  93. blk[5 * step] = (int)(-(t6 - t7) - t8 + tA) >> shift;
  94. blk[6 * step] = (int)(-(t6 + t7) + t8 + tA) >> shift;
  95. blk[7 * step] = (int)( -t4 + t9 + tB) >> shift;
  96. }
  97. static void fic_idct_put(uint8_t *dst, int stride, int16_t *block)
  98. {
  99. int i, j;
  100. int16_t *ptr;
  101. ptr = block;
  102. fic_idct(ptr++, 8, 13, (1 << 12) + (1 << 17));
  103. for (i = 1; i < 8; i++) {
  104. fic_idct(ptr, 8, 13, 1 << 12);
  105. ptr++;
  106. }
  107. ptr = block;
  108. for (i = 0; i < 8; i++) {
  109. fic_idct(ptr, 1, 20, 0);
  110. ptr += 8;
  111. }
  112. ptr = block;
  113. for (j = 0; j < 8; j++) {
  114. for (i = 0; i < 8; i++)
  115. dst[i] = av_clip_uint8(ptr[i]);
  116. dst += stride;
  117. ptr += 8;
  118. }
  119. }
  120. static int fic_decode_block(FICContext *ctx, GetBitContext *gb,
  121. uint8_t *dst, int stride, int16_t *block, int *is_p)
  122. {
  123. int i, num_coeff;
  124. if (get_bits_left(gb) < 8)
  125. return AVERROR_INVALIDDATA;
  126. /* Is it a skip block? */
  127. if (get_bits1(gb)) {
  128. *is_p = 1;
  129. return 0;
  130. }
  131. memset(block, 0, sizeof(*block) * 64);
  132. num_coeff = get_bits(gb, 7);
  133. if (num_coeff > 64)
  134. return AVERROR_INVALIDDATA;
  135. for (i = 0; i < num_coeff; i++) {
  136. int v = get_se_golomb(gb);
  137. if (v < -2048 || v > 2048)
  138. return AVERROR_INVALIDDATA;
  139. block[ff_zigzag_direct[i]] = v *
  140. ctx->qmat[ff_zigzag_direct[i]];
  141. }
  142. fic_idct_put(dst, stride, block);
  143. return 0;
  144. }
  145. static int fic_decode_slice(AVCodecContext *avctx, void *tdata)
  146. {
  147. FICContext *ctx = avctx->priv_data;
  148. FICThreadContext *tctx = tdata;
  149. GetBitContext gb;
  150. uint8_t *src = tctx->src;
  151. int slice_h = tctx->slice_h;
  152. int src_size = tctx->src_size;
  153. int y_off = tctx->y_off;
  154. int x, y, p;
  155. init_get_bits(&gb, src, src_size * 8);
  156. for (p = 0; p < 3; p++) {
  157. int stride = ctx->frame->linesize[p];
  158. uint8_t* dst = ctx->frame->data[p] + (y_off >> !!p) * stride;
  159. for (y = 0; y < (slice_h >> !!p); y += 8) {
  160. for (x = 0; x < (ctx->aligned_width >> !!p); x += 8) {
  161. int ret;
  162. if ((ret = fic_decode_block(ctx, &gb, dst + x, stride,
  163. tctx->block, &tctx->p_frame)) != 0)
  164. return ret;
  165. }
  166. dst += 8 * stride;
  167. }
  168. }
  169. return 0;
  170. }
  171. static av_always_inline void fic_alpha_blend(uint8_t *dst, uint8_t *src,
  172. int size, uint8_t *alpha)
  173. {
  174. int i;
  175. for (i = 0; i < size; i++)
  176. dst[i] += ((src[i] - dst[i]) * alpha[i]) >> 8;
  177. }
  178. static void fic_draw_cursor(AVCodecContext *avctx, int cur_x, int cur_y)
  179. {
  180. FICContext *ctx = avctx->priv_data;
  181. uint8_t *ptr = ctx->cursor_buf;
  182. uint8_t *dstptr[3];
  183. uint8_t planes[4][1024];
  184. uint8_t chroma[3][256];
  185. int i, j, p;
  186. /* Convert to YUVA444. */
  187. for (i = 0; i < 1024; i++) {
  188. planes[0][i] = (( 25 * ptr[0] + 129 * ptr[1] + 66 * ptr[2]) / 255) + 16;
  189. planes[1][i] = ((-38 * ptr[0] + 112 * ptr[1] + -74 * ptr[2]) / 255) + 128;
  190. planes[2][i] = ((-18 * ptr[0] + 112 * ptr[1] + -94 * ptr[2]) / 255) + 128;
  191. planes[3][i] = ptr[3];
  192. ptr += 4;
  193. }
  194. /* Subsample chroma. */
  195. for (i = 0; i < 32; i += 2)
  196. for (j = 0; j < 32; j += 2)
  197. for (p = 0; p < 3; p++)
  198. chroma[p][16 * (i / 2) + j / 2] = (planes[p + 1][32 * i + j ] +
  199. planes[p + 1][32 * i + j + 1] +
  200. planes[p + 1][32 * (i + 1) + j ] +
  201. planes[p + 1][32 * (i + 1) + j + 1]) / 4;
  202. /* Seek to x/y pos of cursor. */
  203. for (i = 0; i < 3; i++)
  204. dstptr[i] = ctx->final_frame->data[i] +
  205. (ctx->final_frame->linesize[i] * (cur_y >> !!i)) +
  206. (cur_x >> !!i) + !!i;
  207. /* Copy. */
  208. for (i = 0; i < FFMIN(32, avctx->height - cur_y) - 1; i += 2) {
  209. int lsize = FFMIN(32, avctx->width - cur_x);
  210. int csize = lsize / 2;
  211. fic_alpha_blend(dstptr[0],
  212. planes[0] + i * 32, lsize, planes[3] + i * 32);
  213. fic_alpha_blend(dstptr[0] + ctx->final_frame->linesize[0],
  214. planes[0] + (i + 1) * 32, lsize, planes[3] + (i + 1) * 32);
  215. fic_alpha_blend(dstptr[1],
  216. chroma[0] + (i / 2) * 16, csize, chroma[2] + (i / 2) * 16);
  217. fic_alpha_blend(dstptr[2],
  218. chroma[1] + (i / 2) * 16, csize, chroma[2] + (i / 2) * 16);
  219. dstptr[0] += ctx->final_frame->linesize[0] * 2;
  220. dstptr[1] += ctx->final_frame->linesize[1];
  221. dstptr[2] += ctx->final_frame->linesize[2];
  222. }
  223. }
  224. static int fic_decode_frame(AVCodecContext *avctx, void *data,
  225. int *got_frame, AVPacket *avpkt)
  226. {
  227. FICContext *ctx = avctx->priv_data;
  228. uint8_t *src = avpkt->data;
  229. int ret;
  230. int slice, nslices;
  231. int msize;
  232. int tsize;
  233. int cur_x, cur_y;
  234. int skip_cursor = ctx->skip_cursor;
  235. uint8_t *sdata;
  236. if ((ret = ff_reget_buffer(avctx, ctx->frame)) < 0)
  237. return ret;
  238. /* Header + at least one slice (4) */
  239. if (avpkt->size < FIC_HEADER_SIZE + 4) {
  240. av_log(avctx, AV_LOG_ERROR, "Frame data is too small.\n");
  241. return AVERROR_INVALIDDATA;
  242. }
  243. /* Check for header. */
  244. if (memcmp(src, fic_header, 7))
  245. av_log(avctx, AV_LOG_WARNING, "Invalid FIC Header.\n");
  246. /* Is it a skip frame? */
  247. if (src[17]) {
  248. if (!ctx->final_frame) {
  249. av_log(avctx, AV_LOG_WARNING, "Initial frame is skipped\n");
  250. return AVERROR_INVALIDDATA;
  251. }
  252. goto skip;
  253. }
  254. nslices = src[13];
  255. if (!nslices) {
  256. av_log(avctx, AV_LOG_ERROR, "Zero slices found.\n");
  257. return AVERROR_INVALIDDATA;
  258. }
  259. /* High or Low Quality Matrix? */
  260. ctx->qmat = src[23] ? fic_qmat_hq : fic_qmat_lq;
  261. /* Skip cursor data. */
  262. tsize = AV_RB24(src + 24);
  263. if (tsize > avpkt->size - FIC_HEADER_SIZE) {
  264. av_log(avctx, AV_LOG_ERROR,
  265. "Packet is too small to contain cursor (%d vs %d bytes).\n",
  266. tsize, avpkt->size - FIC_HEADER_SIZE);
  267. return AVERROR_INVALIDDATA;
  268. }
  269. if (!tsize || !AV_RL16(src + 37) || !AV_RL16(src + 39))
  270. skip_cursor = 1;
  271. if (!skip_cursor && tsize < 32) {
  272. av_log(avctx, AV_LOG_WARNING,
  273. "Cursor data too small. Skipping cursor.\n");
  274. skip_cursor = 1;
  275. }
  276. /* Cursor position. */
  277. cur_x = AV_RL16(src + 33);
  278. cur_y = AV_RL16(src + 35);
  279. if (!skip_cursor && (cur_x > avctx->width || cur_y > avctx->height)) {
  280. av_log(avctx, AV_LOG_DEBUG,
  281. "Invalid cursor position: (%d,%d). Skipping cursor.\n",
  282. cur_x, cur_y);
  283. skip_cursor = 1;
  284. }
  285. if (!skip_cursor && (AV_RL16(src + 37) != 32 || AV_RL16(src + 39) != 32)) {
  286. av_log(avctx, AV_LOG_WARNING,
  287. "Invalid cursor size. Skipping cursor.\n");
  288. skip_cursor = 1;
  289. }
  290. if (!skip_cursor && avpkt->size < CURSOR_OFFSET + sizeof(ctx->cursor_buf)) {
  291. skip_cursor = 1;
  292. }
  293. /* Slice height for all but the last slice. */
  294. ctx->slice_h = 16 * (ctx->aligned_height >> 4) / nslices;
  295. if (ctx->slice_h % 16)
  296. ctx->slice_h = FFALIGN(ctx->slice_h - 16, 16);
  297. /* First slice offset and remaining data. */
  298. sdata = src + tsize + FIC_HEADER_SIZE + 4 * nslices;
  299. msize = avpkt->size - nslices * 4 - tsize - FIC_HEADER_SIZE;
  300. if (msize <= 0) {
  301. av_log(avctx, AV_LOG_ERROR, "Not enough frame data to decode.\n");
  302. return AVERROR_INVALIDDATA;
  303. }
  304. /* Allocate slice data. */
  305. av_fast_malloc(&ctx->slice_data, &ctx->slice_data_size,
  306. nslices * sizeof(ctx->slice_data[0]));
  307. if (!ctx->slice_data_size) {
  308. av_log(avctx, AV_LOG_ERROR, "Could not allocate slice data.\n");
  309. return AVERROR(ENOMEM);
  310. }
  311. memset(ctx->slice_data, 0, nslices * sizeof(ctx->slice_data[0]));
  312. for (slice = 0; slice < nslices; slice++) {
  313. unsigned slice_off = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4);
  314. unsigned slice_size;
  315. int y_off = ctx->slice_h * slice;
  316. int slice_h = ctx->slice_h;
  317. /*
  318. * Either read the slice size, or consume all data left.
  319. * Also, special case the last slight height.
  320. */
  321. if (slice == nslices - 1) {
  322. slice_size = msize;
  323. slice_h = FFALIGN(avctx->height - ctx->slice_h * (nslices - 1), 16);
  324. } else {
  325. slice_size = AV_RB32(src + tsize + FIC_HEADER_SIZE + slice * 4 + 4);
  326. if (slice_size < slice_off)
  327. return AVERROR_INVALIDDATA;
  328. }
  329. if (slice_size < slice_off || slice_size > msize)
  330. continue;
  331. slice_size -= slice_off;
  332. ctx->slice_data[slice].src = sdata + slice_off;
  333. ctx->slice_data[slice].src_size = slice_size;
  334. ctx->slice_data[slice].slice_h = slice_h;
  335. ctx->slice_data[slice].y_off = y_off;
  336. }
  337. if ((ret = avctx->execute(avctx, fic_decode_slice, ctx->slice_data,
  338. NULL, nslices, sizeof(ctx->slice_data[0]))) < 0)
  339. return ret;
  340. ctx->frame->key_frame = 1;
  341. ctx->frame->pict_type = AV_PICTURE_TYPE_I;
  342. for (slice = 0; slice < nslices; slice++) {
  343. if (ctx->slice_data[slice].p_frame) {
  344. ctx->frame->key_frame = 0;
  345. ctx->frame->pict_type = AV_PICTURE_TYPE_P;
  346. break;
  347. }
  348. }
  349. av_frame_free(&ctx->final_frame);
  350. ctx->final_frame = av_frame_clone(ctx->frame);
  351. if (!ctx->final_frame) {
  352. av_log(avctx, AV_LOG_ERROR, "Could not clone frame buffer.\n");
  353. return AVERROR(ENOMEM);
  354. }
  355. /* Make sure we use a user-supplied buffer. */
  356. if ((ret = ff_reget_buffer(avctx, ctx->final_frame)) < 0) {
  357. av_log(avctx, AV_LOG_ERROR, "Could not make frame writable.\n");
  358. return ret;
  359. }
  360. /* Draw cursor. */
  361. if (!skip_cursor) {
  362. memcpy(ctx->cursor_buf, src + CURSOR_OFFSET, sizeof(ctx->cursor_buf));
  363. fic_draw_cursor(avctx, cur_x, cur_y);
  364. }
  365. skip:
  366. *got_frame = 1;
  367. if ((ret = av_frame_ref(data, ctx->final_frame)) < 0)
  368. return ret;
  369. return avpkt->size;
  370. }
  371. static av_cold int fic_decode_close(AVCodecContext *avctx)
  372. {
  373. FICContext *ctx = avctx->priv_data;
  374. av_freep(&ctx->slice_data);
  375. av_frame_free(&ctx->final_frame);
  376. av_frame_free(&ctx->frame);
  377. return 0;
  378. }
  379. static av_cold int fic_decode_init(AVCodecContext *avctx)
  380. {
  381. FICContext *ctx = avctx->priv_data;
  382. /* Initialize various context values */
  383. ctx->avctx = avctx;
  384. ctx->aligned_width = FFALIGN(avctx->width, 16);
  385. ctx->aligned_height = FFALIGN(avctx->height, 16);
  386. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  387. avctx->bits_per_raw_sample = 8;
  388. ctx->frame = av_frame_alloc();
  389. if (!ctx->frame)
  390. return AVERROR(ENOMEM);
  391. return 0;
  392. }
  393. static const AVOption options[] = {
  394. { "skip_cursor", "skip the cursor", offsetof(FICContext, skip_cursor), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
  395. { NULL },
  396. };
  397. static const AVClass fic_decoder_class = {
  398. .class_name = "FIC encoder",
  399. .item_name = av_default_item_name,
  400. .option = options,
  401. .version = LIBAVUTIL_VERSION_INT,
  402. };
  403. AVCodec ff_fic_decoder = {
  404. .name = "fic",
  405. .long_name = NULL_IF_CONFIG_SMALL("Mirillis FIC"),
  406. .type = AVMEDIA_TYPE_VIDEO,
  407. .id = AV_CODEC_ID_FIC,
  408. .priv_data_size = sizeof(FICContext),
  409. .init = fic_decode_init,
  410. .decode = fic_decode_frame,
  411. .close = fic_decode_close,
  412. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
  413. .priv_class = &fic_decoder_class,
  414. };