You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

698 lines
21KB

  1. /*
  2. * Lagarith lossless decoder
  3. * Copyright (c) 2009 Nathan Caldwell <saintdev (at) gmail.com>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Lagarith lossless decoder
  24. * @author Nathan Caldwell
  25. */
  26. #include "avcodec.h"
  27. #include "get_bits.h"
  28. #include "mathops.h"
  29. #include "dsputil.h"
  30. #include "lagarithrac.h"
  31. enum LagarithFrameType {
  32. FRAME_RAW = 1, /**< uncompressed */
  33. FRAME_U_RGB24 = 2, /**< unaligned RGB24 */
  34. FRAME_ARITH_YUY2 = 3, /**< arithmetic coded YUY2 */
  35. FRAME_ARITH_RGB24 = 4, /**< arithmetic coded RGB24 */
  36. FRAME_SOLID_GRAY = 5, /**< solid grayscale color frame */
  37. FRAME_SOLID_COLOR = 6, /**< solid non-grayscale color frame */
  38. FRAME_OLD_ARITH_RGB = 7, /**< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */
  39. FRAME_ARITH_RGBA = 8, /**< arithmetic coded RGBA */
  40. FRAME_SOLID_RGBA = 9, /**< solid RGBA color frame */
  41. FRAME_ARITH_YV12 = 10, /**< arithmetic coded YV12 */
  42. FRAME_REDUCED_RES = 11, /**< reduced resolution YV12 frame */
  43. };
  44. typedef struct LagarithContext {
  45. AVCodecContext *avctx;
  46. AVFrame picture;
  47. DSPContext dsp;
  48. int zeros; /**< number of consecutive zero bytes encountered */
  49. int zeros_rem; /**< number of zero bytes remaining to output */
  50. uint8_t *rgb_planes;
  51. int rgb_stride;
  52. } LagarithContext;
  53. /**
  54. * Compute the 52bit mantissa of 1/(double)denom.
  55. * This crazy format uses floats in an entropy coder and we have to match x86
  56. * rounding exactly, thus ordinary floats aren't portable enough.
  57. * @param denom denominator
  58. * @return 52bit mantissa
  59. * @see softfloat_mul
  60. */
  61. static uint64_t softfloat_reciprocal(uint32_t denom)
  62. {
  63. int shift = av_log2(denom - 1) + 1;
  64. uint64_t ret = (1ULL << 52) / denom;
  65. uint64_t err = (1ULL << 52) - ret * denom;
  66. ret <<= shift;
  67. err <<= shift;
  68. err += denom / 2;
  69. return ret + err / denom;
  70. }
  71. /**
  72. * (uint32_t)(x*f), where f has the given mantissa, and exponent 0
  73. * Used in combination with softfloat_reciprocal computes x/(double)denom.
  74. * @param x 32bit integer factor
  75. * @param mantissa mantissa of f with exponent 0
  76. * @return 32bit integer value (x*f)
  77. * @see softfloat_reciprocal
  78. */
  79. static uint32_t softfloat_mul(uint32_t x, uint64_t mantissa)
  80. {
  81. uint64_t l = x * (mantissa & 0xffffffff);
  82. uint64_t h = x * (mantissa >> 32);
  83. h += l >> 32;
  84. l &= 0xffffffff;
  85. l += 1 << av_log2(h >> 21);
  86. h += l >> 32;
  87. return h >> 20;
  88. }
  89. static uint8_t lag_calc_zero_run(int8_t x)
  90. {
  91. return (x << 1) ^ (x >> 7);
  92. }
  93. static int lag_decode_prob(GetBitContext *gb, uint32_t *value)
  94. {
  95. static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
  96. int i;
  97. int bit = 0;
  98. int bits = 0;
  99. int prevbit = 0;
  100. unsigned val;
  101. for (i = 0; i < 7; i++) {
  102. if (prevbit && bit)
  103. break;
  104. prevbit = bit;
  105. bit = get_bits1(gb);
  106. if (bit && !prevbit)
  107. bits += series[i];
  108. }
  109. bits--;
  110. if (bits < 0 || bits > 31) {
  111. *value = 0;
  112. return -1;
  113. } else if (bits == 0) {
  114. *value = 0;
  115. return 0;
  116. }
  117. val = get_bits_long(gb, bits);
  118. val |= 1 << bits;
  119. *value = val - 1;
  120. return 0;
  121. }
  122. static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
  123. {
  124. int i, j, scale_factor;
  125. unsigned prob, cumulative_target;
  126. unsigned cumul_prob = 0;
  127. unsigned scaled_cumul_prob = 0;
  128. rac->prob[0] = 0;
  129. rac->prob[257] = UINT_MAX;
  130. /* Read probabilities from bitstream */
  131. for (i = 1; i < 257; i++) {
  132. if (lag_decode_prob(gb, &rac->prob[i]) < 0) {
  133. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability encountered.\n");
  134. return -1;
  135. }
  136. if ((uint64_t)cumul_prob + rac->prob[i] > UINT_MAX) {
  137. av_log(rac->avctx, AV_LOG_ERROR, "Integer overflow encountered in cumulative probability calculation.\n");
  138. return -1;
  139. }
  140. cumul_prob += rac->prob[i];
  141. if (!rac->prob[i]) {
  142. if (lag_decode_prob(gb, &prob)) {
  143. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability run encountered.\n");
  144. return -1;
  145. }
  146. if (prob > 257 - i)
  147. prob = 257 - i;
  148. for (j = 0; j < prob; j++)
  149. rac->prob[++i] = 0;
  150. }
  151. }
  152. if (!cumul_prob) {
  153. av_log(rac->avctx, AV_LOG_ERROR, "All probabilities are 0!\n");
  154. return -1;
  155. }
  156. /* Scale probabilities so cumulative probability is an even power of 2. */
  157. scale_factor = av_log2(cumul_prob);
  158. if (cumul_prob & (cumul_prob - 1)) {
  159. uint64_t mul = softfloat_reciprocal(cumul_prob);
  160. for (i = 1; i < 257; i++) {
  161. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  162. scaled_cumul_prob += rac->prob[i];
  163. }
  164. scale_factor++;
  165. cumulative_target = 1 << scale_factor;
  166. if (scaled_cumul_prob > cumulative_target) {
  167. av_log(rac->avctx, AV_LOG_ERROR,
  168. "Scaled probabilities are larger than target!\n");
  169. return -1;
  170. }
  171. scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
  172. for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
  173. if (rac->prob[i]) {
  174. rac->prob[i]++;
  175. scaled_cumul_prob--;
  176. }
  177. /* Comment from reference source:
  178. * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
  179. * // since the compression change is negligable and fixing it
  180. * // breaks backwards compatibilty
  181. * b =- (signed int)b;
  182. * b &= 0xFF;
  183. * } else {
  184. * b++;
  185. * b &= 0x7f;
  186. * }
  187. */
  188. }
  189. }
  190. rac->scale = scale_factor;
  191. /* Fill probability array with cumulative probability for each symbol. */
  192. for (i = 1; i < 257; i++)
  193. rac->prob[i] += rac->prob[i - 1];
  194. return 0;
  195. }
  196. static void add_lag_median_prediction(uint8_t *dst, uint8_t *src1,
  197. uint8_t *diff, int w, int *left,
  198. int *left_top)
  199. {
  200. /* This is almost identical to add_hfyu_median_prediction in dsputil.h.
  201. * However the &0xFF on the gradient predictor yealds incorrect output
  202. * for lagarith.
  203. */
  204. int i;
  205. uint8_t l, lt;
  206. l = *left;
  207. lt = *left_top;
  208. for (i = 0; i < w; i++) {
  209. l = mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
  210. lt = src1[i];
  211. dst[i] = l;
  212. }
  213. *left = l;
  214. *left_top = lt;
  215. }
  216. static void lag_pred_line(LagarithContext *l, uint8_t *buf,
  217. int width, int stride, int line)
  218. {
  219. int L, TL;
  220. if (!line) {
  221. /* Left prediction only for first line */
  222. L = l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1,
  223. width - 1, buf[0]);
  224. } else {
  225. /* Left pixel is actually prev_row[width] */
  226. L = buf[width - stride - 1];
  227. if (line == 1) {
  228. /* Second line, left predict first pixel, the rest of the line is median predicted
  229. * NOTE: In the case of RGB this pixel is top predicted */
  230. TL = l->avctx->pix_fmt == PIX_FMT_YUV420P ? buf[-stride] : L;
  231. } else {
  232. /* Top left is 2 rows back, last pixel */
  233. TL = buf[width - (2 * stride) - 1];
  234. }
  235. add_lag_median_prediction(buf, buf - stride, buf,
  236. width, &L, &TL);
  237. }
  238. }
  239. static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
  240. int width, int stride, int line,
  241. int is_luma)
  242. {
  243. int L, TL;
  244. if (!line) {
  245. if (is_luma) {
  246. buf++;
  247. width--;
  248. }
  249. l->dsp.add_hfyu_left_prediction(buf + 1, buf + 1, width - 1, buf[0]);
  250. return;
  251. }
  252. if (line == 1) {
  253. const int HEAD = is_luma ? 4 : 2;
  254. int i;
  255. L = buf[width - stride - 1];
  256. TL = buf[HEAD - stride - 1];
  257. for (i = 0; i < HEAD; i++) {
  258. L += buf[i];
  259. buf[i] = L;
  260. }
  261. buf += HEAD;
  262. width -= HEAD;
  263. } else {
  264. TL = buf[width - (2 * stride) - 1];
  265. L = buf[width - stride - 1];
  266. }
  267. l->dsp.add_hfyu_median_prediction(buf, buf - stride, buf, width,
  268. &L, &TL);
  269. }
  270. static int lag_decode_line(LagarithContext *l, lag_rac *rac,
  271. uint8_t *dst, int width, int stride,
  272. int esc_count)
  273. {
  274. int i = 0;
  275. int ret = 0;
  276. if (!esc_count)
  277. esc_count = -1;
  278. /* Output any zeros remaining from the previous run */
  279. handle_zeros:
  280. if (l->zeros_rem) {
  281. int count = FFMIN(l->zeros_rem, width - i);
  282. memset(dst + i, 0, count);
  283. i += count;
  284. l->zeros_rem -= count;
  285. }
  286. while (i < width) {
  287. dst[i] = lag_get_rac(rac);
  288. ret++;
  289. if (dst[i])
  290. l->zeros = 0;
  291. else
  292. l->zeros++;
  293. i++;
  294. if (l->zeros == esc_count) {
  295. int index = lag_get_rac(rac);
  296. ret++;
  297. l->zeros = 0;
  298. l->zeros_rem = lag_calc_zero_run(index);
  299. goto handle_zeros;
  300. }
  301. }
  302. return ret;
  303. }
  304. static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
  305. const uint8_t *src, const uint8_t *src_end,
  306. int width, int esc_count)
  307. {
  308. int i = 0;
  309. int count;
  310. uint8_t zero_run = 0;
  311. const uint8_t *src_start = src;
  312. uint8_t mask1 = -(esc_count < 2);
  313. uint8_t mask2 = -(esc_count < 3);
  314. uint8_t *end = dst + (width - 2);
  315. output_zeros:
  316. if (l->zeros_rem) {
  317. count = FFMIN(l->zeros_rem, width - i);
  318. memset(dst, 0, count);
  319. l->zeros_rem -= count;
  320. dst += count;
  321. }
  322. while (dst < end) {
  323. i = 0;
  324. while (!zero_run && dst + i < end) {
  325. i++;
  326. if (src + i >= src_end)
  327. return AVERROR_INVALIDDATA;
  328. zero_run =
  329. !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
  330. }
  331. if (zero_run) {
  332. zero_run = 0;
  333. i += esc_count;
  334. memcpy(dst, src, i);
  335. dst += i;
  336. l->zeros_rem = lag_calc_zero_run(src[i]);
  337. src += i + 1;
  338. goto output_zeros;
  339. } else {
  340. memcpy(dst, src, i);
  341. src += i;
  342. dst += i;
  343. }
  344. }
  345. return src_start - src;
  346. }
  347. static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
  348. int width, int height, int stride,
  349. const uint8_t *src, int src_size)
  350. {
  351. int i = 0;
  352. int read = 0;
  353. uint32_t length;
  354. uint32_t offset = 1;
  355. int esc_count = src[0];
  356. GetBitContext gb;
  357. lag_rac rac;
  358. const uint8_t *src_end = src + src_size;
  359. rac.avctx = l->avctx;
  360. l->zeros = 0;
  361. if (esc_count < 4) {
  362. length = width * height;
  363. if (esc_count && AV_RL32(src + 1) < length) {
  364. length = AV_RL32(src + 1);
  365. offset += 4;
  366. }
  367. init_get_bits(&gb, src + offset, src_size * 8);
  368. if (lag_read_prob_header(&rac, &gb) < 0)
  369. return -1;
  370. ff_lag_rac_init(&rac, &gb, length - stride);
  371. for (i = 0; i < height; i++)
  372. read += lag_decode_line(l, &rac, dst + (i * stride), width,
  373. stride, esc_count);
  374. if (read > length)
  375. av_log(l->avctx, AV_LOG_WARNING,
  376. "Output more bytes than length (%d of %d)\n", read,
  377. length);
  378. } else if (esc_count < 8) {
  379. esc_count -= 4;
  380. if (esc_count > 0) {
  381. /* Zero run coding only, no range coding. */
  382. for (i = 0; i < height; i++) {
  383. int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
  384. src_end, width, esc_count);
  385. if (res < 0)
  386. return res;
  387. src += res;
  388. }
  389. } else {
  390. if (src_size < width * height)
  391. return AVERROR_INVALIDDATA; // buffer not big enough
  392. /* Plane is stored uncompressed */
  393. for (i = 0; i < height; i++) {
  394. memcpy(dst + (i * stride), src, width);
  395. src += width;
  396. }
  397. }
  398. } else if (esc_count == 0xff) {
  399. /* Plane is a solid run of given value */
  400. for (i = 0; i < height; i++)
  401. memset(dst + i * stride, src[1], width);
  402. /* Do not apply prediction.
  403. Note: memset to 0 above, setting first value to src[1]
  404. and applying prediction gives the same result. */
  405. return 0;
  406. } else {
  407. av_log(l->avctx, AV_LOG_ERROR,
  408. "Invalid zero run escape code! (%#x)\n", esc_count);
  409. return -1;
  410. }
  411. if (l->avctx->pix_fmt != PIX_FMT_YUV422P) {
  412. for (i = 0; i < height; i++) {
  413. lag_pred_line(l, dst, width, stride, i);
  414. dst += stride;
  415. }
  416. } else {
  417. for (i = 0; i < height; i++) {
  418. lag_pred_line_yuy2(l, dst, width, stride, i,
  419. width == l->avctx->width);
  420. dst += stride;
  421. }
  422. }
  423. return 0;
  424. }
  425. /**
  426. * Decode a frame.
  427. * @param avctx codec context
  428. * @param data output AVFrame
  429. * @param data_size size of output data or 0 if no picture is returned
  430. * @param avpkt input packet
  431. * @return number of consumed bytes on success or negative if decode fails
  432. */
  433. static int lag_decode_frame(AVCodecContext *avctx,
  434. void *data, int *data_size, AVPacket *avpkt)
  435. {
  436. const uint8_t *buf = avpkt->data;
  437. int buf_size = avpkt->size;
  438. LagarithContext *l = avctx->priv_data;
  439. AVFrame *const p = &l->picture;
  440. uint8_t frametype = 0;
  441. uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
  442. uint32_t offs[4];
  443. uint8_t *srcs[4], *dst;
  444. int i, j, planes = 3;
  445. AVFrame *picture = data;
  446. if (p->data[0])
  447. avctx->release_buffer(avctx, p);
  448. p->reference = 0;
  449. p->key_frame = 1;
  450. frametype = buf[0];
  451. offset_gu = AV_RL32(buf + 1);
  452. offset_bv = AV_RL32(buf + 5);
  453. switch (frametype) {
  454. case FRAME_SOLID_RGBA:
  455. avctx->pix_fmt = PIX_FMT_RGB32;
  456. if (avctx->get_buffer(avctx, p) < 0) {
  457. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  458. return -1;
  459. }
  460. dst = p->data[0];
  461. for (j = 0; j < avctx->height; j++) {
  462. for (i = 0; i < avctx->width; i++)
  463. AV_WN32(dst + i * 4, offset_gu);
  464. dst += p->linesize[0];
  465. }
  466. break;
  467. case FRAME_ARITH_RGBA:
  468. avctx->pix_fmt = PIX_FMT_RGB32;
  469. planes = 4;
  470. offset_ry += 4;
  471. offs[3] = AV_RL32(buf + 9);
  472. case FRAME_ARITH_RGB24:
  473. case FRAME_U_RGB24:
  474. if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
  475. avctx->pix_fmt = PIX_FMT_RGB24;
  476. if (avctx->get_buffer(avctx, p) < 0) {
  477. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  478. return -1;
  479. }
  480. offs[0] = offset_bv;
  481. offs[1] = offset_gu;
  482. offs[2] = offset_ry;
  483. if (!l->rgb_planes) {
  484. l->rgb_stride = FFALIGN(avctx->width, 16);
  485. l->rgb_planes = av_malloc(l->rgb_stride * avctx->height * planes);
  486. if (!l->rgb_planes) {
  487. av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
  488. return AVERROR(ENOMEM);
  489. }
  490. }
  491. for (i = 0; i < planes; i++)
  492. srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
  493. if (offset_ry >= buf_size ||
  494. offset_gu >= buf_size ||
  495. offset_bv >= buf_size ||
  496. (planes == 4 && offs[3] >= buf_size)) {
  497. av_log(avctx, AV_LOG_ERROR,
  498. "Invalid frame offsets\n");
  499. return AVERROR_INVALIDDATA;
  500. }
  501. for (i = 0; i < planes; i++)
  502. lag_decode_arith_plane(l, srcs[i],
  503. avctx->width, avctx->height,
  504. -l->rgb_stride, buf + offs[i],
  505. buf_size - offs[i]);
  506. dst = p->data[0];
  507. for (i = 0; i < planes; i++)
  508. srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
  509. for (j = 0; j < avctx->height; j++) {
  510. for (i = 0; i < avctx->width; i++) {
  511. uint8_t r, g, b, a;
  512. r = srcs[0][i];
  513. g = srcs[1][i];
  514. b = srcs[2][i];
  515. r += g;
  516. b += g;
  517. if (frametype == FRAME_ARITH_RGBA) {
  518. a = srcs[3][i];
  519. AV_WN32(dst + i * 4, MKBETAG(a, r, g, b));
  520. } else {
  521. dst[i * 3 + 0] = r;
  522. dst[i * 3 + 1] = g;
  523. dst[i * 3 + 2] = b;
  524. }
  525. }
  526. dst += p->linesize[0];
  527. for (i = 0; i < planes; i++)
  528. srcs[i] += l->rgb_stride;
  529. }
  530. break;
  531. case FRAME_ARITH_YUY2:
  532. avctx->pix_fmt = PIX_FMT_YUV422P;
  533. if (avctx->get_buffer(avctx, p) < 0) {
  534. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  535. return -1;
  536. }
  537. if (offset_ry >= buf_size ||
  538. offset_gu >= buf_size ||
  539. offset_bv >= buf_size) {
  540. av_log(avctx, AV_LOG_ERROR,
  541. "Invalid frame offsets\n");
  542. return AVERROR_INVALIDDATA;
  543. }
  544. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  545. p->linesize[0], buf + offset_ry,
  546. buf_size - offset_ry);
  547. lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
  548. avctx->height, p->linesize[2],
  549. buf + offset_gu, buf_size - offset_gu);
  550. lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
  551. avctx->height, p->linesize[1],
  552. buf + offset_bv, buf_size - offset_bv);
  553. break;
  554. case FRAME_ARITH_YV12:
  555. avctx->pix_fmt = PIX_FMT_YUV420P;
  556. if (avctx->get_buffer(avctx, p) < 0) {
  557. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  558. return -1;
  559. }
  560. if (offset_ry >= buf_size ||
  561. offset_gu >= buf_size ||
  562. offset_bv >= buf_size) {
  563. av_log(avctx, AV_LOG_ERROR,
  564. "Invalid frame offsets\n");
  565. return AVERROR_INVALIDDATA;
  566. }
  567. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  568. p->linesize[0], buf + offset_ry,
  569. buf_size - offset_ry);
  570. lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
  571. avctx->height / 2, p->linesize[2],
  572. buf + offset_gu, buf_size - offset_gu);
  573. lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
  574. avctx->height / 2, p->linesize[1],
  575. buf + offset_bv, buf_size - offset_bv);
  576. break;
  577. default:
  578. av_log(avctx, AV_LOG_ERROR,
  579. "Unsupported Lagarith frame type: %#x\n", frametype);
  580. return -1;
  581. }
  582. *picture = *p;
  583. *data_size = sizeof(AVFrame);
  584. return buf_size;
  585. }
  586. static av_cold int lag_decode_init(AVCodecContext *avctx)
  587. {
  588. LagarithContext *l = avctx->priv_data;
  589. l->avctx = avctx;
  590. ff_dsputil_init(&l->dsp, avctx);
  591. return 0;
  592. }
  593. static av_cold int lag_decode_end(AVCodecContext *avctx)
  594. {
  595. LagarithContext *l = avctx->priv_data;
  596. if (l->picture.data[0])
  597. avctx->release_buffer(avctx, &l->picture);
  598. av_freep(&l->rgb_planes);
  599. return 0;
  600. }
  601. AVCodec ff_lagarith_decoder = {
  602. .name = "lagarith",
  603. .type = AVMEDIA_TYPE_VIDEO,
  604. .id = CODEC_ID_LAGARITH,
  605. .priv_data_size = sizeof(LagarithContext),
  606. .init = lag_decode_init,
  607. .close = lag_decode_end,
  608. .decode = lag_decode_frame,
  609. .capabilities = CODEC_CAP_DR1,
  610. .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
  611. };