You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

699 lines
21KB

  1. /*
  2. * Lagarith lossless decoder
  3. * Copyright (c) 2009 Nathan Caldwell <saintdev (at) gmail.com>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Lagarith lossless decoder
  24. * @author Nathan Caldwell
  25. */
  26. #include "avcodec.h"
  27. #include "get_bits.h"
  28. #include "mathops.h"
  29. #include "dsputil.h"
  30. #include "lagarithrac.h"
  31. #include "thread.h"
  32. enum LagarithFrameType {
  33. FRAME_RAW = 1, /**< uncompressed */
  34. FRAME_U_RGB24 = 2, /**< unaligned RGB24 */
  35. FRAME_ARITH_YUY2 = 3, /**< arithmetic coded YUY2 */
  36. FRAME_ARITH_RGB24 = 4, /**< arithmetic coded RGB24 */
  37. FRAME_SOLID_GRAY = 5, /**< solid grayscale color frame */
  38. FRAME_SOLID_COLOR = 6, /**< solid non-grayscale color frame */
  39. FRAME_OLD_ARITH_RGB = 7, /**< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */
  40. FRAME_ARITH_RGBA = 8, /**< arithmetic coded RGBA */
  41. FRAME_SOLID_RGBA = 9, /**< solid RGBA color frame */
  42. FRAME_ARITH_YV12 = 10, /**< arithmetic coded YV12 */
  43. FRAME_REDUCED_RES = 11, /**< reduced resolution YV12 frame */
  44. };
  45. typedef struct LagarithContext {
  46. AVCodecContext *avctx;
  47. AVFrame picture;
  48. DSPContext dsp;
  49. int zeros; /**< number of consecutive zero bytes encountered */
  50. int zeros_rem; /**< number of zero bytes remaining to output */
  51. uint8_t *rgb_planes;
  52. int rgb_stride;
  53. } LagarithContext;
  54. /**
  55. * Compute the 52bit mantissa of 1/(double)denom.
  56. * This crazy format uses floats in an entropy coder and we have to match x86
  57. * rounding exactly, thus ordinary floats aren't portable enough.
  58. * @param denom denominator
  59. * @return 52bit mantissa
  60. * @see softfloat_mul
  61. */
  62. static uint64_t softfloat_reciprocal(uint32_t denom)
  63. {
  64. int shift = av_log2(denom - 1) + 1;
  65. uint64_t ret = (1ULL << 52) / denom;
  66. uint64_t err = (1ULL << 52) - ret * denom;
  67. ret <<= shift;
  68. err <<= shift;
  69. err += denom / 2;
  70. return ret + err / denom;
  71. }
  72. /**
  73. * (uint32_t)(x*f), where f has the given mantissa, and exponent 0
  74. * Used in combination with softfloat_reciprocal computes x/(double)denom.
  75. * @param x 32bit integer factor
  76. * @param mantissa mantissa of f with exponent 0
  77. * @return 32bit integer value (x*f)
  78. * @see softfloat_reciprocal
  79. */
  80. static uint32_t softfloat_mul(uint32_t x, uint64_t mantissa)
  81. {
  82. uint64_t l = x * (mantissa & 0xffffffff);
  83. uint64_t h = x * (mantissa >> 32);
  84. h += l >> 32;
  85. l &= 0xffffffff;
  86. l += 1 << av_log2(h >> 21);
  87. h += l >> 32;
  88. return h >> 20;
  89. }
  90. static uint8_t lag_calc_zero_run(int8_t x)
  91. {
  92. return (x << 1) ^ (x >> 7);
  93. }
  94. static int lag_decode_prob(GetBitContext *gb, uint32_t *value)
  95. {
  96. static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
  97. int i;
  98. int bit = 0;
  99. int bits = 0;
  100. int prevbit = 0;
  101. unsigned val;
  102. for (i = 0; i < 7; i++) {
  103. if (prevbit && bit)
  104. break;
  105. prevbit = bit;
  106. bit = get_bits1(gb);
  107. if (bit && !prevbit)
  108. bits += series[i];
  109. }
  110. bits--;
  111. if (bits < 0 || bits > 31) {
  112. *value = 0;
  113. return -1;
  114. } else if (bits == 0) {
  115. *value = 0;
  116. return 0;
  117. }
  118. val = get_bits_long(gb, bits);
  119. val |= 1 << bits;
  120. *value = val - 1;
  121. return 0;
  122. }
  123. static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
  124. {
  125. int i, j, scale_factor;
  126. unsigned prob, cumulative_target;
  127. unsigned cumul_prob = 0;
  128. unsigned scaled_cumul_prob = 0;
  129. rac->prob[0] = 0;
  130. rac->prob[257] = UINT_MAX;
  131. /* Read probabilities from bitstream */
  132. for (i = 1; i < 257; i++) {
  133. if (lag_decode_prob(gb, &rac->prob[i]) < 0) {
  134. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability encountered.\n");
  135. return -1;
  136. }
  137. if ((uint64_t)cumul_prob + rac->prob[i] > UINT_MAX) {
  138. av_log(rac->avctx, AV_LOG_ERROR, "Integer overflow encountered in cumulative probability calculation.\n");
  139. return -1;
  140. }
  141. cumul_prob += rac->prob[i];
  142. if (!rac->prob[i]) {
  143. if (lag_decode_prob(gb, &prob)) {
  144. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability run encountered.\n");
  145. return -1;
  146. }
  147. if (prob > 257 - i)
  148. prob = 257 - i;
  149. for (j = 0; j < prob; j++)
  150. rac->prob[++i] = 0;
  151. }
  152. }
  153. if (!cumul_prob) {
  154. av_log(rac->avctx, AV_LOG_ERROR, "All probabilities are 0!\n");
  155. return -1;
  156. }
  157. /* Scale probabilities so cumulative probability is an even power of 2. */
  158. scale_factor = av_log2(cumul_prob);
  159. if (cumul_prob & (cumul_prob - 1)) {
  160. uint64_t mul = softfloat_reciprocal(cumul_prob);
  161. for (i = 1; i < 257; i++) {
  162. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  163. scaled_cumul_prob += rac->prob[i];
  164. }
  165. scale_factor++;
  166. cumulative_target = 1 << scale_factor;
  167. if (scaled_cumul_prob > cumulative_target) {
  168. av_log(rac->avctx, AV_LOG_ERROR,
  169. "Scaled probabilities are larger than target!\n");
  170. return -1;
  171. }
  172. scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
  173. for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
  174. if (rac->prob[i]) {
  175. rac->prob[i]++;
  176. scaled_cumul_prob--;
  177. }
  178. /* Comment from reference source:
  179. * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
  180. * // since the compression change is negligable and fixing it
  181. * // breaks backwards compatibilty
  182. * b =- (signed int)b;
  183. * b &= 0xFF;
  184. * } else {
  185. * b++;
  186. * b &= 0x7f;
  187. * }
  188. */
  189. }
  190. }
  191. rac->scale = scale_factor;
  192. /* Fill probability array with cumulative probability for each symbol. */
  193. for (i = 1; i < 257; i++)
  194. rac->prob[i] += rac->prob[i - 1];
  195. return 0;
  196. }
  197. static void add_lag_median_prediction(uint8_t *dst, uint8_t *src1,
  198. uint8_t *diff, int w, int *left,
  199. int *left_top)
  200. {
  201. /* This is almost identical to add_hfyu_median_prediction in dsputil.h.
  202. * However the &0xFF on the gradient predictor yealds incorrect output
  203. * for lagarith.
  204. */
  205. int i;
  206. uint8_t l, lt;
  207. l = *left;
  208. lt = *left_top;
  209. for (i = 0; i < w; i++) {
  210. l = mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
  211. lt = src1[i];
  212. dst[i] = l;
  213. }
  214. *left = l;
  215. *left_top = lt;
  216. }
  217. static void lag_pred_line(LagarithContext *l, uint8_t *buf,
  218. int width, int stride, int line)
  219. {
  220. int L, TL;
  221. if (!line) {
  222. /* Left prediction only for first line */
  223. L = l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1,
  224. width - 1, buf[0]);
  225. } else {
  226. /* Left pixel is actually prev_row[width] */
  227. L = buf[width - stride - 1];
  228. if (line == 1) {
  229. /* Second line, left predict first pixel, the rest of the line is median predicted
  230. * NOTE: In the case of RGB this pixel is top predicted */
  231. TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L;
  232. } else {
  233. /* Top left is 2 rows back, last pixel */
  234. TL = buf[width - (2 * stride) - 1];
  235. }
  236. add_lag_median_prediction(buf, buf - stride, buf,
  237. width, &L, &TL);
  238. }
  239. }
  240. static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
  241. int width, int stride, int line,
  242. int is_luma)
  243. {
  244. int L, TL;
  245. if (!line) {
  246. if (is_luma) {
  247. buf++;
  248. width--;
  249. }
  250. l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1, width - 1, buf[0]);
  251. return;
  252. }
  253. if (line == 1) {
  254. const int HEAD = is_luma ? 4 : 2;
  255. int i;
  256. L = buf[width - stride - 1];
  257. TL = buf[HEAD - stride - 1];
  258. for (i = 0; i < HEAD; i++) {
  259. L += buf[i];
  260. buf[i] = L;
  261. }
  262. buf += HEAD;
  263. width -= HEAD;
  264. } else {
  265. TL = buf[width - (2 * stride) - 1];
  266. L = buf[width - stride - 1];
  267. }
  268. l->dsp.add_hfyu_median_prediction(buf, buf - stride, buf, width,
  269. &L, &TL);
  270. }
  271. static int lag_decode_line(LagarithContext *l, lag_rac *rac,
  272. uint8_t *dst, int width, int stride,
  273. int esc_count)
  274. {
  275. int i = 0;
  276. int ret = 0;
  277. if (!esc_count)
  278. esc_count = -1;
  279. /* Output any zeros remaining from the previous run */
  280. handle_zeros:
  281. if (l->zeros_rem) {
  282. int count = FFMIN(l->zeros_rem, width - i);
  283. memset(dst + i, 0, count);
  284. i += count;
  285. l->zeros_rem -= count;
  286. }
  287. while (i < width) {
  288. dst[i] = lag_get_rac(rac);
  289. ret++;
  290. if (dst[i])
  291. l->zeros = 0;
  292. else
  293. l->zeros++;
  294. i++;
  295. if (l->zeros == esc_count) {
  296. int index = lag_get_rac(rac);
  297. ret++;
  298. l->zeros = 0;
  299. l->zeros_rem = lag_calc_zero_run(index);
  300. goto handle_zeros;
  301. }
  302. }
  303. return ret;
  304. }
  305. static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
  306. const uint8_t *src, const uint8_t *src_end,
  307. int width, int esc_count)
  308. {
  309. int i = 0;
  310. int count;
  311. uint8_t zero_run = 0;
  312. const uint8_t *src_start = src;
  313. uint8_t mask1 = -(esc_count < 2);
  314. uint8_t mask2 = -(esc_count < 3);
  315. uint8_t *end = dst + (width - 2);
  316. output_zeros:
  317. if (l->zeros_rem) {
  318. count = FFMIN(l->zeros_rem, width - i);
  319. memset(dst, 0, count);
  320. l->zeros_rem -= count;
  321. dst += count;
  322. }
  323. while (dst < end) {
  324. i = 0;
  325. while (!zero_run && dst + i < end) {
  326. i++;
  327. if (src + i >= src_end)
  328. return AVERROR_INVALIDDATA;
  329. zero_run =
  330. !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
  331. }
  332. if (zero_run) {
  333. zero_run = 0;
  334. i += esc_count;
  335. memcpy(dst, src, i);
  336. dst += i;
  337. l->zeros_rem = lag_calc_zero_run(src[i]);
  338. src += i + 1;
  339. goto output_zeros;
  340. } else {
  341. memcpy(dst, src, i);
  342. src += i;
  343. dst += i;
  344. }
  345. }
  346. return src_start - src;
  347. }
  348. static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
  349. int width, int height, int stride,
  350. const uint8_t *src, int src_size)
  351. {
  352. int i = 0;
  353. int read = 0;
  354. uint32_t length;
  355. uint32_t offset = 1;
  356. int esc_count = src[0];
  357. GetBitContext gb;
  358. lag_rac rac;
  359. const uint8_t *src_end = src + src_size;
  360. rac.avctx = l->avctx;
  361. l->zeros = 0;
  362. if (esc_count < 4) {
  363. length = width * height;
  364. if (esc_count && AV_RL32(src + 1) < length) {
  365. length = AV_RL32(src + 1);
  366. offset += 4;
  367. }
  368. init_get_bits(&gb, src + offset, src_size * 8);
  369. if (lag_read_prob_header(&rac, &gb) < 0)
  370. return -1;
  371. ff_lag_rac_init(&rac, &gb, length - stride);
  372. for (i = 0; i < height; i++)
  373. read += lag_decode_line(l, &rac, dst + (i * stride), width,
  374. stride, esc_count);
  375. if (read > length)
  376. av_log(l->avctx, AV_LOG_WARNING,
  377. "Output more bytes than length (%d of %d)\n", read,
  378. length);
  379. } else if (esc_count < 8) {
  380. esc_count -= 4;
  381. if (esc_count > 0) {
  382. /* Zero run coding only, no range coding. */
  383. for (i = 0; i < height; i++) {
  384. int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
  385. src_end, width, esc_count);
  386. if (res < 0)
  387. return res;
  388. src += res;
  389. }
  390. } else {
  391. if (src_size < width * height)
  392. return AVERROR_INVALIDDATA; // buffer not big enough
  393. /* Plane is stored uncompressed */
  394. for (i = 0; i < height; i++) {
  395. memcpy(dst + (i * stride), src, width);
  396. src += width;
  397. }
  398. }
  399. } else if (esc_count == 0xff) {
  400. /* Plane is a solid run of given value */
  401. for (i = 0; i < height; i++)
  402. memset(dst + i * stride, src[1], width);
  403. /* Do not apply prediction.
  404. Note: memset to 0 above, setting first value to src[1]
  405. and applying prediction gives the same result. */
  406. return 0;
  407. } else {
  408. av_log(l->avctx, AV_LOG_ERROR,
  409. "Invalid zero run escape code! (%#x)\n", esc_count);
  410. return -1;
  411. }
  412. if (l->avctx->pix_fmt != PIX_FMT_YUV422P) {
  413. for (i = 0; i < height; i++) {
  414. lag_pred_line(l, dst, width, stride, i);
  415. dst += stride;
  416. }
  417. } else {
  418. for (i = 0; i < height; i++) {
  419. lag_pred_line_yuy2(l, dst, width, stride, i,
  420. width == l->avctx->width);
  421. dst += stride;
  422. }
  423. }
  424. return 0;
  425. }
  426. /**
  427. * Decode a frame.
  428. * @param avctx codec context
  429. * @param data output AVFrame
  430. * @param data_size size of output data or 0 if no picture is returned
  431. * @param avpkt input packet
  432. * @return number of consumed bytes on success or negative if decode fails
  433. */
  434. static int lag_decode_frame(AVCodecContext *avctx,
  435. void *data, int *data_size, AVPacket *avpkt)
  436. {
  437. const uint8_t *buf = avpkt->data;
  438. int buf_size = avpkt->size;
  439. LagarithContext *l = avctx->priv_data;
  440. AVFrame *const p = &l->picture;
  441. uint8_t frametype = 0;
  442. uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
  443. uint32_t offs[4];
  444. uint8_t *srcs[4], *dst;
  445. int i, j, planes = 3;
  446. AVFrame *picture = data;
  447. if (p->data[0])
  448. ff_thread_release_buffer(avctx, p);
  449. p->reference = 0;
  450. p->key_frame = 1;
  451. frametype = buf[0];
  452. offset_gu = AV_RL32(buf + 1);
  453. offset_bv = AV_RL32(buf + 5);
  454. switch (frametype) {
  455. case FRAME_SOLID_RGBA:
  456. avctx->pix_fmt = PIX_FMT_RGB32;
  457. if (ff_thread_get_buffer(avctx, p) < 0) {
  458. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  459. return -1;
  460. }
  461. dst = p->data[0];
  462. for (j = 0; j < avctx->height; j++) {
  463. for (i = 0; i < avctx->width; i++)
  464. AV_WN32(dst + i * 4, offset_gu);
  465. dst += p->linesize[0];
  466. }
  467. break;
  468. case FRAME_ARITH_RGBA:
  469. avctx->pix_fmt = PIX_FMT_RGB32;
  470. planes = 4;
  471. offset_ry += 4;
  472. offs[3] = AV_RL32(buf + 9);
  473. case FRAME_ARITH_RGB24:
  474. case FRAME_U_RGB24:
  475. if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
  476. avctx->pix_fmt = PIX_FMT_RGB24;
  477. if (ff_thread_get_buffer(avctx, p) < 0) {
  478. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  479. return -1;
  480. }
  481. offs[0] = offset_bv;
  482. offs[1] = offset_gu;
  483. offs[2] = offset_ry;
  484. if (!l->rgb_planes) {
  485. l->rgb_stride = FFALIGN(avctx->width, 16);
  486. l->rgb_planes = av_malloc(l->rgb_stride * avctx->height * planes + 1);
  487. if (!l->rgb_planes) {
  488. av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
  489. return AVERROR(ENOMEM);
  490. }
  491. }
  492. for (i = 0; i < planes; i++)
  493. srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
  494. if (offset_ry >= buf_size ||
  495. offset_gu >= buf_size ||
  496. offset_bv >= buf_size ||
  497. (planes == 4 && offs[3] >= buf_size)) {
  498. av_log(avctx, AV_LOG_ERROR,
  499. "Invalid frame offsets\n");
  500. return AVERROR_INVALIDDATA;
  501. }
  502. for (i = 0; i < planes; i++)
  503. lag_decode_arith_plane(l, srcs[i],
  504. avctx->width, avctx->height,
  505. -l->rgb_stride, buf + offs[i],
  506. buf_size - offs[i]);
  507. dst = p->data[0];
  508. for (i = 0; i < planes; i++)
  509. srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
  510. for (j = 0; j < avctx->height; j++) {
  511. for (i = 0; i < avctx->width; i++) {
  512. uint8_t r, g, b, a;
  513. r = srcs[0][i];
  514. g = srcs[1][i];
  515. b = srcs[2][i];
  516. r += g;
  517. b += g;
  518. if (frametype == FRAME_ARITH_RGBA) {
  519. a = srcs[3][i];
  520. AV_WN32(dst + i * 4, MKBETAG(a, r, g, b));
  521. } else {
  522. dst[i * 3 + 0] = r;
  523. dst[i * 3 + 1] = g;
  524. dst[i * 3 + 2] = b;
  525. }
  526. }
  527. dst += p->linesize[0];
  528. for (i = 0; i < planes; i++)
  529. srcs[i] += l->rgb_stride;
  530. }
  531. break;
  532. case FRAME_ARITH_YUY2:
  533. avctx->pix_fmt = PIX_FMT_YUV422P;
  534. if (ff_thread_get_buffer(avctx, p) < 0) {
  535. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  536. return -1;
  537. }
  538. if (offset_ry >= buf_size ||
  539. offset_gu >= buf_size ||
  540. offset_bv >= buf_size) {
  541. av_log(avctx, AV_LOG_ERROR,
  542. "Invalid frame offsets\n");
  543. return AVERROR_INVALIDDATA;
  544. }
  545. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  546. p->linesize[0], buf + offset_ry,
  547. buf_size - offset_ry);
  548. lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
  549. avctx->height, p->linesize[1],
  550. buf + offset_gu, buf_size - offset_gu);
  551. lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
  552. avctx->height, p->linesize[2],
  553. buf + offset_bv, buf_size - offset_bv);
  554. break;
  555. case FRAME_ARITH_YV12:
  556. avctx->pix_fmt = PIX_FMT_YUV420P;
  557. if (ff_thread_get_buffer(avctx, p) < 0) {
  558. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  559. return -1;
  560. }
  561. if (offset_ry >= buf_size ||
  562. offset_gu >= buf_size ||
  563. offset_bv >= buf_size) {
  564. av_log(avctx, AV_LOG_ERROR,
  565. "Invalid frame offsets\n");
  566. return AVERROR_INVALIDDATA;
  567. }
  568. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  569. p->linesize[0], buf + offset_ry,
  570. buf_size - offset_ry);
  571. lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
  572. avctx->height / 2, p->linesize[2],
  573. buf + offset_gu, buf_size - offset_gu);
  574. lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
  575. avctx->height / 2, p->linesize[1],
  576. buf + offset_bv, buf_size - offset_bv);
  577. break;
  578. default:
  579. av_log(avctx, AV_LOG_ERROR,
  580. "Unsupported Lagarith frame type: %#x\n", frametype);
  581. return -1;
  582. }
  583. *picture = *p;
  584. *data_size = sizeof(AVFrame);
  585. return buf_size;
  586. }
  587. static av_cold int lag_decode_init(AVCodecContext *avctx)
  588. {
  589. LagarithContext *l = avctx->priv_data;
  590. l->avctx = avctx;
  591. ff_dsputil_init(&l->dsp, avctx);
  592. return 0;
  593. }
  594. static av_cold int lag_decode_end(AVCodecContext *avctx)
  595. {
  596. LagarithContext *l = avctx->priv_data;
  597. if (l->picture.data[0])
  598. ff_thread_release_buffer(avctx, &l->picture);
  599. av_freep(&l->rgb_planes);
  600. return 0;
  601. }
  602. AVCodec ff_lagarith_decoder = {
  603. .name = "lagarith",
  604. .type = AVMEDIA_TYPE_VIDEO,
  605. .id = AV_CODEC_ID_LAGARITH,
  606. .priv_data_size = sizeof(LagarithContext),
  607. .init = lag_decode_init,
  608. .close = lag_decode_end,
  609. .decode = lag_decode_frame,
  610. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  611. .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
  612. };