You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

753 lines
23KB

  1. /*
  2. * Lagarith lossless decoder
  3. * Copyright (c) 2009 Nathan Caldwell <saintdev (at) gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Lagarith lossless decoder
  24. * @author Nathan Caldwell
  25. */
  26. #include <inttypes.h>
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "mathops.h"
  30. #include "lagarithrac.h"
  31. #include "lossless_videodsp.h"
  32. #include "thread.h"
  33. enum LagarithFrameType {
  34. FRAME_RAW = 1, /**< uncompressed */
  35. FRAME_U_RGB24 = 2, /**< unaligned RGB24 */
  36. FRAME_ARITH_YUY2 = 3, /**< arithmetic coded YUY2 */
  37. FRAME_ARITH_RGB24 = 4, /**< arithmetic coded RGB24 */
  38. FRAME_SOLID_GRAY = 5, /**< solid grayscale color frame */
  39. FRAME_SOLID_COLOR = 6, /**< solid non-grayscale color frame */
  40. FRAME_OLD_ARITH_RGB = 7, /**< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */
  41. FRAME_ARITH_RGBA = 8, /**< arithmetic coded RGBA */
  42. FRAME_SOLID_RGBA = 9, /**< solid RGBA color frame */
  43. FRAME_ARITH_YV12 = 10, /**< arithmetic coded YV12 */
  44. FRAME_REDUCED_RES = 11, /**< reduced resolution YV12 frame */
  45. };
  46. typedef struct LagarithContext {
  47. AVCodecContext *avctx;
  48. LLVidDSPContext llviddsp;
  49. int zeros; /**< number of consecutive zero bytes encountered */
  50. int zeros_rem; /**< number of zero bytes remaining to output */
  51. uint8_t *rgb_planes;
  52. int rgb_planes_allocated;
  53. int rgb_stride;
  54. } LagarithContext;
  55. /**
  56. * Compute the 52-bit mantissa of 1/(double)denom.
  57. * This crazy format uses floats in an entropy coder and we have to match x86
  58. * rounding exactly, thus ordinary floats aren't portable enough.
  59. * @param denom denominator
  60. * @return 52-bit mantissa
  61. * @see softfloat_mul
  62. */
  63. static uint64_t softfloat_reciprocal(uint32_t denom)
  64. {
  65. int shift = av_log2(denom - 1) + 1;
  66. uint64_t ret = (1ULL << 52) / denom;
  67. uint64_t err = (1ULL << 52) - ret * denom;
  68. ret <<= shift;
  69. err <<= shift;
  70. err += denom / 2;
  71. return ret + err / denom;
  72. }
  73. /**
  74. * (uint32_t)(x*f), where f has the given mantissa, and exponent 0
  75. * Used in combination with softfloat_reciprocal computes x/(double)denom.
  76. * @param x 32-bit integer factor
  77. * @param mantissa mantissa of f with exponent 0
  78. * @return 32-bit integer value (x*f)
  79. * @see softfloat_reciprocal
  80. */
  81. static uint32_t softfloat_mul(uint32_t x, uint64_t mantissa)
  82. {
  83. uint64_t l = x * (mantissa & 0xffffffff);
  84. uint64_t h = x * (mantissa >> 32);
  85. h += l >> 32;
  86. l &= 0xffffffff;
  87. l += 1 << av_log2(h >> 21);
  88. h += l >> 32;
  89. return h >> 20;
  90. }
  91. static uint8_t lag_calc_zero_run(int8_t x)
  92. {
  93. return (x << 1) ^ (x >> 7);
  94. }
  95. static int lag_decode_prob(GetBitContext *gb, uint32_t *value)
  96. {
  97. static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
  98. int i;
  99. int bit = 0;
  100. int bits = 0;
  101. int prevbit = 0;
  102. unsigned val;
  103. for (i = 0; i < 7; i++) {
  104. if (prevbit && bit)
  105. break;
  106. prevbit = bit;
  107. bit = get_bits1(gb);
  108. if (bit && !prevbit)
  109. bits += series[i];
  110. }
  111. bits--;
  112. if (bits < 0 || bits > 31) {
  113. *value = 0;
  114. return -1;
  115. } else if (bits == 0) {
  116. *value = 0;
  117. return 0;
  118. }
  119. val = get_bits_long(gb, bits);
  120. val |= 1U << bits;
  121. *value = val - 1;
  122. return 0;
  123. }
  124. static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
  125. {
  126. int i, j, scale_factor;
  127. unsigned prob, cumulative_target;
  128. unsigned cumul_prob = 0;
  129. unsigned scaled_cumul_prob = 0;
  130. rac->prob[0] = 0;
  131. rac->prob[257] = UINT_MAX;
  132. /* Read probabilities from bitstream */
  133. for (i = 1; i < 257; i++) {
  134. if (lag_decode_prob(gb, &rac->prob[i]) < 0) {
  135. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability encountered.\n");
  136. return -1;
  137. }
  138. if ((uint64_t)cumul_prob + rac->prob[i] > UINT_MAX) {
  139. av_log(rac->avctx, AV_LOG_ERROR, "Integer overflow encountered in cumulative probability calculation.\n");
  140. return -1;
  141. }
  142. cumul_prob += rac->prob[i];
  143. if (!rac->prob[i]) {
  144. if (lag_decode_prob(gb, &prob)) {
  145. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability run encountered.\n");
  146. return -1;
  147. }
  148. if (prob > 256 - i)
  149. prob = 256 - i;
  150. for (j = 0; j < prob; j++)
  151. rac->prob[++i] = 0;
  152. }
  153. }
  154. if (!cumul_prob) {
  155. av_log(rac->avctx, AV_LOG_ERROR, "All probabilities are 0!\n");
  156. return -1;
  157. }
  158. /* Scale probabilities so cumulative probability is an even power of 2. */
  159. scale_factor = av_log2(cumul_prob);
  160. if (cumul_prob & (cumul_prob - 1)) {
  161. uint64_t mul = softfloat_reciprocal(cumul_prob);
  162. for (i = 1; i <= 128; i++) {
  163. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  164. scaled_cumul_prob += rac->prob[i];
  165. }
  166. if (scaled_cumul_prob <= 0) {
  167. av_log(rac->avctx, AV_LOG_ERROR, "Scaled probabilities invalid\n");
  168. return AVERROR_INVALIDDATA;
  169. }
  170. for (; i < 257; i++) {
  171. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  172. scaled_cumul_prob += rac->prob[i];
  173. }
  174. scale_factor++;
  175. cumulative_target = 1 << scale_factor;
  176. if (scaled_cumul_prob > cumulative_target) {
  177. av_log(rac->avctx, AV_LOG_ERROR,
  178. "Scaled probabilities are larger than target!\n");
  179. return -1;
  180. }
  181. scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
  182. for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
  183. if (rac->prob[i]) {
  184. rac->prob[i]++;
  185. scaled_cumul_prob--;
  186. }
  187. /* Comment from reference source:
  188. * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
  189. * // since the compression change is negligible and fixing it
  190. * // breaks backwards compatibility
  191. * b =- (signed int)b;
  192. * b &= 0xFF;
  193. * } else {
  194. * b++;
  195. * b &= 0x7f;
  196. * }
  197. */
  198. }
  199. }
  200. rac->scale = scale_factor;
  201. /* Fill probability array with cumulative probability for each symbol. */
  202. for (i = 1; i < 257; i++)
  203. rac->prob[i] += rac->prob[i - 1];
  204. return 0;
  205. }
  206. static void add_lag_median_prediction(uint8_t *dst, uint8_t *src1,
  207. uint8_t *diff, int w, int *left,
  208. int *left_top)
  209. {
  210. /* This is almost identical to add_hfyu_median_pred in huffyuvdsp.h.
  211. * However the &0xFF on the gradient predictor yields incorrect output
  212. * for lagarith.
  213. */
  214. int i;
  215. uint8_t l, lt;
  216. l = *left;
  217. lt = *left_top;
  218. for (i = 0; i < w; i++) {
  219. l = mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
  220. lt = src1[i];
  221. dst[i] = l;
  222. }
  223. *left = l;
  224. *left_top = lt;
  225. }
  226. static void lag_pred_line(LagarithContext *l, uint8_t *buf,
  227. int width, int stride, int line)
  228. {
  229. int L, TL;
  230. if (!line) {
  231. /* Left prediction only for first line */
  232. L = l->llviddsp.add_left_pred(buf, buf, width, 0);
  233. } else {
  234. /* Left pixel is actually prev_row[width] */
  235. L = buf[width - stride - 1];
  236. if (line == 1) {
  237. /* Second line, left predict first pixel, the rest of the line is median predicted
  238. * NOTE: In the case of RGB this pixel is top predicted */
  239. TL = l->avctx->pix_fmt == AV_PIX_FMT_YUV420P ? buf[-stride] : L;
  240. } else {
  241. /* Top left is 2 rows back, last pixel */
  242. TL = buf[width - (2 * stride) - 1];
  243. }
  244. add_lag_median_prediction(buf, buf - stride, buf,
  245. width, &L, &TL);
  246. }
  247. }
  248. static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
  249. int width, int stride, int line,
  250. int is_luma)
  251. {
  252. int L, TL;
  253. if (!line) {
  254. L= buf[0];
  255. if (is_luma)
  256. buf[0] = 0;
  257. l->llviddsp.add_left_pred(buf, buf, width, 0);
  258. if (is_luma)
  259. buf[0] = L;
  260. return;
  261. }
  262. if (line == 1) {
  263. const int HEAD = is_luma ? 4 : 2;
  264. int i;
  265. L = buf[width - stride - 1];
  266. TL = buf[HEAD - stride - 1];
  267. for (i = 0; i < HEAD; i++) {
  268. L += buf[i];
  269. buf[i] = L;
  270. }
  271. for (; i < width; i++) {
  272. L = mid_pred(L & 0xFF, buf[i - stride], (L + buf[i - stride] - TL) & 0xFF) + buf[i];
  273. TL = buf[i - stride];
  274. buf[i] = L;
  275. }
  276. } else {
  277. TL = buf[width - (2 * stride) - 1];
  278. L = buf[width - stride - 1];
  279. l->llviddsp.add_median_pred(buf, buf - stride, buf, width, &L, &TL);
  280. }
  281. }
  282. static int lag_decode_line(LagarithContext *l, lag_rac *rac,
  283. uint8_t *dst, int width, int stride,
  284. int esc_count)
  285. {
  286. int i = 0;
  287. int ret = 0;
  288. if (!esc_count)
  289. esc_count = -1;
  290. /* Output any zeros remaining from the previous run */
  291. handle_zeros:
  292. if (l->zeros_rem) {
  293. int count = FFMIN(l->zeros_rem, width - i);
  294. memset(dst + i, 0, count);
  295. i += count;
  296. l->zeros_rem -= count;
  297. }
  298. while (i < width) {
  299. dst[i] = lag_get_rac(rac);
  300. ret++;
  301. if (dst[i])
  302. l->zeros = 0;
  303. else
  304. l->zeros++;
  305. i++;
  306. if (l->zeros == esc_count) {
  307. int index = lag_get_rac(rac);
  308. ret++;
  309. l->zeros = 0;
  310. l->zeros_rem = lag_calc_zero_run(index);
  311. goto handle_zeros;
  312. }
  313. }
  314. return ret;
  315. }
  316. static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
  317. const uint8_t *src, const uint8_t *src_end,
  318. int width, int esc_count)
  319. {
  320. int i = 0;
  321. int count;
  322. uint8_t zero_run = 0;
  323. const uint8_t *src_start = src;
  324. uint8_t mask1 = -(esc_count < 2);
  325. uint8_t mask2 = -(esc_count < 3);
  326. uint8_t *end = dst + (width - 2);
  327. avpriv_request_sample(l->avctx, "zero_run_line");
  328. memset(dst, 0, width);
  329. output_zeros:
  330. if (l->zeros_rem) {
  331. count = FFMIN(l->zeros_rem, width - i);
  332. if (end - dst < count) {
  333. av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
  334. return AVERROR_INVALIDDATA;
  335. }
  336. memset(dst, 0, count);
  337. l->zeros_rem -= count;
  338. dst += count;
  339. }
  340. while (dst < end) {
  341. i = 0;
  342. while (!zero_run && dst + i < end) {
  343. i++;
  344. if (i+2 >= src_end - src)
  345. return AVERROR_INVALIDDATA;
  346. zero_run =
  347. !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
  348. }
  349. if (zero_run) {
  350. zero_run = 0;
  351. i += esc_count;
  352. memcpy(dst, src, i);
  353. dst += i;
  354. l->zeros_rem = lag_calc_zero_run(src[i]);
  355. src += i + 1;
  356. goto output_zeros;
  357. } else {
  358. memcpy(dst, src, i);
  359. src += i;
  360. dst += i;
  361. }
  362. }
  363. return src - src_start;
  364. }
  365. static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
  366. int width, int height, int stride,
  367. const uint8_t *src, int src_size)
  368. {
  369. int i = 0;
  370. int read = 0;
  371. uint32_t length;
  372. uint32_t offset = 1;
  373. int esc_count;
  374. GetBitContext gb;
  375. lag_rac rac;
  376. const uint8_t *src_end = src + src_size;
  377. int ret;
  378. rac.avctx = l->avctx;
  379. l->zeros = 0;
  380. if(src_size < 2)
  381. return AVERROR_INVALIDDATA;
  382. esc_count = src[0];
  383. if (esc_count < 4) {
  384. length = width * height;
  385. if(src_size < 5)
  386. return AVERROR_INVALIDDATA;
  387. if (esc_count && AV_RL32(src + 1) < length) {
  388. length = AV_RL32(src + 1);
  389. offset += 4;
  390. }
  391. if ((ret = init_get_bits8(&gb, src + offset, src_size - offset)) < 0)
  392. return ret;
  393. if (lag_read_prob_header(&rac, &gb) < 0)
  394. return -1;
  395. ff_lag_rac_init(&rac, &gb, length - stride);
  396. for (i = 0; i < height; i++)
  397. read += lag_decode_line(l, &rac, dst + (i * stride), width,
  398. stride, esc_count);
  399. if (read > length)
  400. av_log(l->avctx, AV_LOG_WARNING,
  401. "Output more bytes than length (%d of %"PRIu32")\n", read,
  402. length);
  403. } else if (esc_count < 8) {
  404. esc_count -= 4;
  405. src ++;
  406. src_size --;
  407. if (esc_count > 0) {
  408. /* Zero run coding only, no range coding. */
  409. for (i = 0; i < height; i++) {
  410. int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
  411. src_end, width, esc_count);
  412. if (res < 0)
  413. return res;
  414. src += res;
  415. }
  416. } else {
  417. if (src_size < width * height)
  418. return AVERROR_INVALIDDATA; // buffer not big enough
  419. /* Plane is stored uncompressed */
  420. for (i = 0; i < height; i++) {
  421. memcpy(dst + (i * stride), src, width);
  422. src += width;
  423. }
  424. }
  425. } else if (esc_count == 0xff) {
  426. /* Plane is a solid run of given value */
  427. for (i = 0; i < height; i++)
  428. memset(dst + i * stride, src[1], width);
  429. /* Do not apply prediction.
  430. Note: memset to 0 above, setting first value to src[1]
  431. and applying prediction gives the same result. */
  432. return 0;
  433. } else {
  434. av_log(l->avctx, AV_LOG_ERROR,
  435. "Invalid zero run escape code! (%#x)\n", esc_count);
  436. return -1;
  437. }
  438. if (l->avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
  439. for (i = 0; i < height; i++) {
  440. lag_pred_line(l, dst, width, stride, i);
  441. dst += stride;
  442. }
  443. } else {
  444. for (i = 0; i < height; i++) {
  445. lag_pred_line_yuy2(l, dst, width, stride, i,
  446. width == l->avctx->width);
  447. dst += stride;
  448. }
  449. }
  450. return 0;
  451. }
  452. /**
  453. * Decode a frame.
  454. * @param avctx codec context
  455. * @param data output AVFrame
  456. * @param data_size size of output data or 0 if no picture is returned
  457. * @param avpkt input packet
  458. * @return number of consumed bytes on success or negative if decode fails
  459. */
  460. static int lag_decode_frame(AVCodecContext *avctx,
  461. void *data, int *got_frame, AVPacket *avpkt)
  462. {
  463. const uint8_t *buf = avpkt->data;
  464. unsigned int buf_size = avpkt->size;
  465. LagarithContext *l = avctx->priv_data;
  466. ThreadFrame frame = { .f = data };
  467. AVFrame *const p = data;
  468. uint8_t frametype = 0;
  469. uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
  470. uint32_t offs[4];
  471. uint8_t *srcs[4], *dst;
  472. int i, j, planes = 3;
  473. int ret;
  474. p->key_frame = 1;
  475. frametype = buf[0];
  476. offset_gu = AV_RL32(buf + 1);
  477. offset_bv = AV_RL32(buf + 5);
  478. switch (frametype) {
  479. case FRAME_SOLID_RGBA:
  480. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  481. case FRAME_SOLID_GRAY:
  482. if (frametype == FRAME_SOLID_GRAY)
  483. if (avctx->bits_per_coded_sample == 24) {
  484. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  485. } else {
  486. avctx->pix_fmt = AV_PIX_FMT_0RGB32;
  487. planes = 4;
  488. }
  489. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  490. return ret;
  491. dst = p->data[0];
  492. if (frametype == FRAME_SOLID_RGBA) {
  493. for (j = 0; j < avctx->height; j++) {
  494. for (i = 0; i < avctx->width; i++)
  495. AV_WN32(dst + i * 4, offset_gu);
  496. dst += p->linesize[0];
  497. }
  498. } else {
  499. for (j = 0; j < avctx->height; j++) {
  500. memset(dst, buf[1], avctx->width * planes);
  501. dst += p->linesize[0];
  502. }
  503. }
  504. break;
  505. case FRAME_SOLID_COLOR:
  506. if (avctx->bits_per_coded_sample == 24) {
  507. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  508. } else {
  509. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  510. offset_gu |= 0xFFU << 24;
  511. }
  512. if ((ret = ff_thread_get_buffer(avctx, &frame,0)) < 0)
  513. return ret;
  514. dst = p->data[0];
  515. for (j = 0; j < avctx->height; j++) {
  516. for (i = 0; i < avctx->width; i++)
  517. if (avctx->bits_per_coded_sample == 24) {
  518. AV_WB24(dst + i * 3, offset_gu);
  519. } else {
  520. AV_WN32(dst + i * 4, offset_gu);
  521. }
  522. dst += p->linesize[0];
  523. }
  524. break;
  525. case FRAME_ARITH_RGBA:
  526. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  527. planes = 4;
  528. offset_ry += 4;
  529. offs[3] = AV_RL32(buf + 9);
  530. case FRAME_ARITH_RGB24:
  531. case FRAME_U_RGB24:
  532. if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
  533. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  534. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  535. return ret;
  536. offs[0] = offset_bv;
  537. offs[1] = offset_gu;
  538. offs[2] = offset_ry;
  539. l->rgb_stride = FFALIGN(avctx->width, 16);
  540. av_fast_malloc(&l->rgb_planes, &l->rgb_planes_allocated,
  541. l->rgb_stride * avctx->height * planes + 1);
  542. if (!l->rgb_planes) {
  543. av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
  544. return AVERROR(ENOMEM);
  545. }
  546. for (i = 0; i < planes; i++)
  547. srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
  548. for (i = 0; i < planes; i++)
  549. if (buf_size <= offs[i]) {
  550. av_log(avctx, AV_LOG_ERROR,
  551. "Invalid frame offsets\n");
  552. return AVERROR_INVALIDDATA;
  553. }
  554. for (i = 0; i < planes; i++)
  555. lag_decode_arith_plane(l, srcs[i],
  556. avctx->width, avctx->height,
  557. -l->rgb_stride, buf + offs[i],
  558. buf_size - offs[i]);
  559. dst = p->data[0];
  560. for (i = 0; i < planes; i++)
  561. srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
  562. for (j = 0; j < avctx->height; j++) {
  563. for (i = 0; i < avctx->width; i++) {
  564. uint8_t r, g, b, a;
  565. r = srcs[0][i];
  566. g = srcs[1][i];
  567. b = srcs[2][i];
  568. r += g;
  569. b += g;
  570. if (frametype == FRAME_ARITH_RGBA) {
  571. a = srcs[3][i];
  572. AV_WN32(dst + i * 4, MKBETAG(a, r, g, b));
  573. } else {
  574. dst[i * 3 + 0] = r;
  575. dst[i * 3 + 1] = g;
  576. dst[i * 3 + 2] = b;
  577. }
  578. }
  579. dst += p->linesize[0];
  580. for (i = 0; i < planes; i++)
  581. srcs[i] += l->rgb_stride;
  582. }
  583. break;
  584. case FRAME_ARITH_YUY2:
  585. avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  586. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  587. return ret;
  588. if (offset_ry >= buf_size ||
  589. offset_gu >= buf_size ||
  590. offset_bv >= buf_size) {
  591. av_log(avctx, AV_LOG_ERROR,
  592. "Invalid frame offsets\n");
  593. return AVERROR_INVALIDDATA;
  594. }
  595. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  596. p->linesize[0], buf + offset_ry,
  597. buf_size - offset_ry);
  598. lag_decode_arith_plane(l, p->data[1], (avctx->width + 1) / 2,
  599. avctx->height, p->linesize[1],
  600. buf + offset_gu, buf_size - offset_gu);
  601. lag_decode_arith_plane(l, p->data[2], (avctx->width + 1) / 2,
  602. avctx->height, p->linesize[2],
  603. buf + offset_bv, buf_size - offset_bv);
  604. break;
  605. case FRAME_ARITH_YV12:
  606. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  607. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  608. return ret;
  609. if (buf_size <= offset_ry || buf_size <= offset_gu || buf_size <= offset_bv) {
  610. return AVERROR_INVALIDDATA;
  611. }
  612. if (offset_ry >= buf_size ||
  613. offset_gu >= buf_size ||
  614. offset_bv >= buf_size) {
  615. av_log(avctx, AV_LOG_ERROR,
  616. "Invalid frame offsets\n");
  617. return AVERROR_INVALIDDATA;
  618. }
  619. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  620. p->linesize[0], buf + offset_ry,
  621. buf_size - offset_ry);
  622. lag_decode_arith_plane(l, p->data[2], (avctx->width + 1) / 2,
  623. (avctx->height + 1) / 2, p->linesize[2],
  624. buf + offset_gu, buf_size - offset_gu);
  625. lag_decode_arith_plane(l, p->data[1], (avctx->width + 1) / 2,
  626. (avctx->height + 1) / 2, p->linesize[1],
  627. buf + offset_bv, buf_size - offset_bv);
  628. break;
  629. default:
  630. av_log(avctx, AV_LOG_ERROR,
  631. "Unsupported Lagarith frame type: %#"PRIx8"\n", frametype);
  632. return AVERROR_PATCHWELCOME;
  633. }
  634. *got_frame = 1;
  635. return buf_size;
  636. }
  637. static av_cold int lag_decode_init(AVCodecContext *avctx)
  638. {
  639. LagarithContext *l = avctx->priv_data;
  640. l->avctx = avctx;
  641. ff_llviddsp_init(&l->llviddsp);
  642. return 0;
  643. }
  644. static av_cold int lag_decode_end(AVCodecContext *avctx)
  645. {
  646. LagarithContext *l = avctx->priv_data;
  647. av_freep(&l->rgb_planes);
  648. return 0;
  649. }
  650. AVCodec ff_lagarith_decoder = {
  651. .name = "lagarith",
  652. .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
  653. .type = AVMEDIA_TYPE_VIDEO,
  654. .id = AV_CODEC_ID_LAGARITH,
  655. .priv_data_size = sizeof(LagarithContext),
  656. .init = lag_decode_init,
  657. .close = lag_decode_end,
  658. .decode = lag_decode_frame,
  659. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  660. };