You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

755 lines
23KB

  1. /*
  2. * Lagarith lossless decoder
  3. * Copyright (c) 2009 Nathan Caldwell <saintdev (at) gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Lagarith lossless decoder
  24. * @author Nathan Caldwell
  25. */
  26. #include <inttypes.h>
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "mathops.h"
  30. #include "dsputil.h"
  31. #include "lagarithrac.h"
  32. #include "thread.h"
  33. enum LagarithFrameType {
  34. FRAME_RAW = 1, /**< uncompressed */
  35. FRAME_U_RGB24 = 2, /**< unaligned RGB24 */
  36. FRAME_ARITH_YUY2 = 3, /**< arithmetic coded YUY2 */
  37. FRAME_ARITH_RGB24 = 4, /**< arithmetic coded RGB24 */
  38. FRAME_SOLID_GRAY = 5, /**< solid grayscale color frame */
  39. FRAME_SOLID_COLOR = 6, /**< solid non-grayscale color frame */
  40. FRAME_OLD_ARITH_RGB = 7, /**< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */
  41. FRAME_ARITH_RGBA = 8, /**< arithmetic coded RGBA */
  42. FRAME_SOLID_RGBA = 9, /**< solid RGBA color frame */
  43. FRAME_ARITH_YV12 = 10, /**< arithmetic coded YV12 */
  44. FRAME_REDUCED_RES = 11, /**< reduced resolution YV12 frame */
  45. };
  46. typedef struct LagarithContext {
  47. AVCodecContext *avctx;
  48. DSPContext dsp;
  49. int zeros; /**< number of consecutive zero bytes encountered */
  50. int zeros_rem; /**< number of zero bytes remaining to output */
  51. uint8_t *rgb_planes;
  52. int rgb_planes_allocated;
  53. int rgb_stride;
  54. } LagarithContext;
  55. /**
  56. * Compute the 52bit mantissa of 1/(double)denom.
  57. * This crazy format uses floats in an entropy coder and we have to match x86
  58. * rounding exactly, thus ordinary floats aren't portable enough.
  59. * @param denom denominator
  60. * @return 52bit mantissa
  61. * @see softfloat_mul
  62. */
  63. static uint64_t softfloat_reciprocal(uint32_t denom)
  64. {
  65. int shift = av_log2(denom - 1) + 1;
  66. uint64_t ret = (1ULL << 52) / denom;
  67. uint64_t err = (1ULL << 52) - ret * denom;
  68. ret <<= shift;
  69. err <<= shift;
  70. err += denom / 2;
  71. return ret + err / denom;
  72. }
  73. /**
  74. * (uint32_t)(x*f), where f has the given mantissa, and exponent 0
  75. * Used in combination with softfloat_reciprocal computes x/(double)denom.
  76. * @param x 32bit integer factor
  77. * @param mantissa mantissa of f with exponent 0
  78. * @return 32bit integer value (x*f)
  79. * @see softfloat_reciprocal
  80. */
  81. static uint32_t softfloat_mul(uint32_t x, uint64_t mantissa)
  82. {
  83. uint64_t l = x * (mantissa & 0xffffffff);
  84. uint64_t h = x * (mantissa >> 32);
  85. h += l >> 32;
  86. l &= 0xffffffff;
  87. l += 1 << av_log2(h >> 21);
  88. h += l >> 32;
  89. return h >> 20;
  90. }
  91. static uint8_t lag_calc_zero_run(int8_t x)
  92. {
  93. return (x << 1) ^ (x >> 7);
  94. }
  95. static int lag_decode_prob(GetBitContext *gb, uint32_t *value)
  96. {
  97. static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
  98. int i;
  99. int bit = 0;
  100. int bits = 0;
  101. int prevbit = 0;
  102. unsigned val;
  103. for (i = 0; i < 7; i++) {
  104. if (prevbit && bit)
  105. break;
  106. prevbit = bit;
  107. bit = get_bits1(gb);
  108. if (bit && !prevbit)
  109. bits += series[i];
  110. }
  111. bits--;
  112. if (bits < 0 || bits > 31) {
  113. *value = 0;
  114. return -1;
  115. } else if (bits == 0) {
  116. *value = 0;
  117. return 0;
  118. }
  119. val = get_bits_long(gb, bits);
  120. val |= 1 << bits;
  121. *value = val - 1;
  122. return 0;
  123. }
  124. static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
  125. {
  126. int i, j, scale_factor;
  127. unsigned prob, cumulative_target;
  128. unsigned cumul_prob = 0;
  129. unsigned scaled_cumul_prob = 0;
  130. rac->prob[0] = 0;
  131. rac->prob[257] = UINT_MAX;
  132. /* Read probabilities from bitstream */
  133. for (i = 1; i < 257; i++) {
  134. if (lag_decode_prob(gb, &rac->prob[i]) < 0) {
  135. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability encountered.\n");
  136. return -1;
  137. }
  138. if ((uint64_t)cumul_prob + rac->prob[i] > UINT_MAX) {
  139. av_log(rac->avctx, AV_LOG_ERROR, "Integer overflow encountered in cumulative probability calculation.\n");
  140. return -1;
  141. }
  142. cumul_prob += rac->prob[i];
  143. if (!rac->prob[i]) {
  144. if (lag_decode_prob(gb, &prob)) {
  145. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability run encountered.\n");
  146. return -1;
  147. }
  148. if (prob > 256 - i)
  149. prob = 256 - i;
  150. for (j = 0; j < prob; j++)
  151. rac->prob[++i] = 0;
  152. }
  153. }
  154. if (!cumul_prob) {
  155. av_log(rac->avctx, AV_LOG_ERROR, "All probabilities are 0!\n");
  156. return -1;
  157. }
  158. /* Scale probabilities so cumulative probability is an even power of 2. */
  159. scale_factor = av_log2(cumul_prob);
  160. if (cumul_prob & (cumul_prob - 1)) {
  161. uint64_t mul = softfloat_reciprocal(cumul_prob);
  162. for (i = 1; i <= 128; i++) {
  163. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  164. scaled_cumul_prob += rac->prob[i];
  165. }
  166. if (scaled_cumul_prob <= 0) {
  167. av_log(rac->avctx, AV_LOG_ERROR, "Scaled probabilities invalid\n");
  168. return AVERROR_INVALIDDATA;
  169. }
  170. for (; i < 257; i++) {
  171. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  172. scaled_cumul_prob += rac->prob[i];
  173. }
  174. scale_factor++;
  175. cumulative_target = 1 << scale_factor;
  176. if (scaled_cumul_prob > cumulative_target) {
  177. av_log(rac->avctx, AV_LOG_ERROR,
  178. "Scaled probabilities are larger than target!\n");
  179. return -1;
  180. }
  181. scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
  182. for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
  183. if (rac->prob[i]) {
  184. rac->prob[i]++;
  185. scaled_cumul_prob--;
  186. }
  187. /* Comment from reference source:
  188. * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
  189. * // since the compression change is negligible and fixing it
  190. * // breaks backwards compatibility
  191. * b =- (signed int)b;
  192. * b &= 0xFF;
  193. * } else {
  194. * b++;
  195. * b &= 0x7f;
  196. * }
  197. */
  198. }
  199. }
  200. rac->scale = scale_factor;
  201. /* Fill probability array with cumulative probability for each symbol. */
  202. for (i = 1; i < 257; i++)
  203. rac->prob[i] += rac->prob[i - 1];
  204. return 0;
  205. }
  206. static void add_lag_median_prediction(uint8_t *dst, uint8_t *src1,
  207. uint8_t *diff, int w, int *left,
  208. int *left_top)
  209. {
  210. /* This is almost identical to add_hfyu_median_prediction in dsputil.h.
  211. * However the &0xFF on the gradient predictor yealds incorrect output
  212. * for lagarith.
  213. */
  214. int i;
  215. uint8_t l, lt;
  216. l = *left;
  217. lt = *left_top;
  218. for (i = 0; i < w; i++) {
  219. l = mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
  220. lt = src1[i];
  221. dst[i] = l;
  222. }
  223. *left = l;
  224. *left_top = lt;
  225. }
  226. static void lag_pred_line(LagarithContext *l, uint8_t *buf,
  227. int width, int stride, int line)
  228. {
  229. int L, TL;
  230. if (!line) {
  231. /* Left prediction only for first line */
  232. L = l->dsp.add_hfyu_left_prediction(buf, buf,
  233. width, 0);
  234. } else {
  235. /* Left pixel is actually prev_row[width] */
  236. L = buf[width - stride - 1];
  237. if (line == 1) {
  238. /* Second line, left predict first pixel, the rest of the line is median predicted
  239. * NOTE: In the case of RGB this pixel is top predicted */
  240. TL = l->avctx->pix_fmt == AV_PIX_FMT_YUV420P ? buf[-stride] : L;
  241. } else {
  242. /* Top left is 2 rows back, last pixel */
  243. TL = buf[width - (2 * stride) - 1];
  244. }
  245. add_lag_median_prediction(buf, buf - stride, buf,
  246. width, &L, &TL);
  247. }
  248. }
  249. static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
  250. int width, int stride, int line,
  251. int is_luma)
  252. {
  253. int L, TL;
  254. if (!line) {
  255. L= buf[0];
  256. if (is_luma)
  257. buf[0] = 0;
  258. l->dsp.add_hfyu_left_prediction(buf, buf, width, 0);
  259. if (is_luma)
  260. buf[0] = L;
  261. return;
  262. }
  263. if (line == 1) {
  264. const int HEAD = is_luma ? 4 : 2;
  265. int i;
  266. L = buf[width - stride - 1];
  267. TL = buf[HEAD - stride - 1];
  268. for (i = 0; i < HEAD; i++) {
  269. L += buf[i];
  270. buf[i] = L;
  271. }
  272. for (; i < width; i++) {
  273. L = mid_pred(L & 0xFF, buf[i - stride], (L + buf[i - stride] - TL) & 0xFF) + buf[i];
  274. TL = buf[i - stride];
  275. buf[i] = L;
  276. }
  277. } else {
  278. TL = buf[width - (2 * stride) - 1];
  279. L = buf[width - stride - 1];
  280. l->dsp.add_hfyu_median_prediction(buf, buf - stride, buf, width,
  281. &L, &TL);
  282. }
  283. }
  284. static int lag_decode_line(LagarithContext *l, lag_rac *rac,
  285. uint8_t *dst, int width, int stride,
  286. int esc_count)
  287. {
  288. int i = 0;
  289. int ret = 0;
  290. if (!esc_count)
  291. esc_count = -1;
  292. /* Output any zeros remaining from the previous run */
  293. handle_zeros:
  294. if (l->zeros_rem) {
  295. int count = FFMIN(l->zeros_rem, width - i);
  296. memset(dst + i, 0, count);
  297. i += count;
  298. l->zeros_rem -= count;
  299. }
  300. while (i < width) {
  301. dst[i] = lag_get_rac(rac);
  302. ret++;
  303. if (dst[i])
  304. l->zeros = 0;
  305. else
  306. l->zeros++;
  307. i++;
  308. if (l->zeros == esc_count) {
  309. int index = lag_get_rac(rac);
  310. ret++;
  311. l->zeros = 0;
  312. l->zeros_rem = lag_calc_zero_run(index);
  313. goto handle_zeros;
  314. }
  315. }
  316. return ret;
  317. }
  318. static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
  319. const uint8_t *src, const uint8_t *src_end,
  320. int width, int esc_count)
  321. {
  322. int i = 0;
  323. int count;
  324. uint8_t zero_run = 0;
  325. const uint8_t *src_start = src;
  326. uint8_t mask1 = -(esc_count < 2);
  327. uint8_t mask2 = -(esc_count < 3);
  328. uint8_t *end = dst + (width - 2);
  329. avpriv_request_sample(l->avctx, "zero_run_line");
  330. memset(dst, 0, width);
  331. output_zeros:
  332. if (l->zeros_rem) {
  333. count = FFMIN(l->zeros_rem, width - i);
  334. if (end - dst < count) {
  335. av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
  336. return AVERROR_INVALIDDATA;
  337. }
  338. memset(dst, 0, count);
  339. l->zeros_rem -= count;
  340. dst += count;
  341. }
  342. while (dst < end) {
  343. i = 0;
  344. while (!zero_run && dst + i < end) {
  345. i++;
  346. if (i+2 >= src_end - src)
  347. return AVERROR_INVALIDDATA;
  348. zero_run =
  349. !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
  350. }
  351. if (zero_run) {
  352. zero_run = 0;
  353. i += esc_count;
  354. memcpy(dst, src, i);
  355. dst += i;
  356. l->zeros_rem = lag_calc_zero_run(src[i]);
  357. src += i + 1;
  358. goto output_zeros;
  359. } else {
  360. memcpy(dst, src, i);
  361. src += i;
  362. dst += i;
  363. }
  364. }
  365. return src - src_start;
  366. }
  367. static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
  368. int width, int height, int stride,
  369. const uint8_t *src, int src_size)
  370. {
  371. int i = 0;
  372. int read = 0;
  373. uint32_t length;
  374. uint32_t offset = 1;
  375. int esc_count;
  376. GetBitContext gb;
  377. lag_rac rac;
  378. const uint8_t *src_end = src + src_size;
  379. int ret;
  380. rac.avctx = l->avctx;
  381. l->zeros = 0;
  382. if(src_size < 2)
  383. return AVERROR_INVALIDDATA;
  384. esc_count = src[0];
  385. if (esc_count < 4) {
  386. length = width * height;
  387. if(src_size < 5)
  388. return AVERROR_INVALIDDATA;
  389. if (esc_count && AV_RL32(src + 1) < length) {
  390. length = AV_RL32(src + 1);
  391. offset += 4;
  392. }
  393. if ((ret = init_get_bits8(&gb, src + offset, src_size - offset)) < 0)
  394. return ret;
  395. if (lag_read_prob_header(&rac, &gb) < 0)
  396. return -1;
  397. ff_lag_rac_init(&rac, &gb, length - stride);
  398. for (i = 0; i < height; i++)
  399. read += lag_decode_line(l, &rac, dst + (i * stride), width,
  400. stride, esc_count);
  401. if (read > length)
  402. av_log(l->avctx, AV_LOG_WARNING,
  403. "Output more bytes than length (%d of %"PRIu32")\n", read,
  404. length);
  405. } else if (esc_count < 8) {
  406. esc_count -= 4;
  407. src ++;
  408. src_size --;
  409. if (esc_count > 0) {
  410. /* Zero run coding only, no range coding. */
  411. for (i = 0; i < height; i++) {
  412. int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
  413. src_end, width, esc_count);
  414. if (res < 0)
  415. return res;
  416. src += res;
  417. }
  418. } else {
  419. if (src_size < width * height)
  420. return AVERROR_INVALIDDATA; // buffer not big enough
  421. /* Plane is stored uncompressed */
  422. for (i = 0; i < height; i++) {
  423. memcpy(dst + (i * stride), src, width);
  424. src += width;
  425. }
  426. }
  427. } else if (esc_count == 0xff) {
  428. /* Plane is a solid run of given value */
  429. for (i = 0; i < height; i++)
  430. memset(dst + i * stride, src[1], width);
  431. /* Do not apply prediction.
  432. Note: memset to 0 above, setting first value to src[1]
  433. and applying prediction gives the same result. */
  434. return 0;
  435. } else {
  436. av_log(l->avctx, AV_LOG_ERROR,
  437. "Invalid zero run escape code! (%#x)\n", esc_count);
  438. return -1;
  439. }
  440. if (l->avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
  441. for (i = 0; i < height; i++) {
  442. lag_pred_line(l, dst, width, stride, i);
  443. dst += stride;
  444. }
  445. } else {
  446. for (i = 0; i < height; i++) {
  447. lag_pred_line_yuy2(l, dst, width, stride, i,
  448. width == l->avctx->width);
  449. dst += stride;
  450. }
  451. }
  452. return 0;
  453. }
  454. /**
  455. * Decode a frame.
  456. * @param avctx codec context
  457. * @param data output AVFrame
  458. * @param data_size size of output data or 0 if no picture is returned
  459. * @param avpkt input packet
  460. * @return number of consumed bytes on success or negative if decode fails
  461. */
  462. static int lag_decode_frame(AVCodecContext *avctx,
  463. void *data, int *got_frame, AVPacket *avpkt)
  464. {
  465. const uint8_t *buf = avpkt->data;
  466. unsigned int buf_size = avpkt->size;
  467. LagarithContext *l = avctx->priv_data;
  468. ThreadFrame frame = { .f = data };
  469. AVFrame *const p = data;
  470. uint8_t frametype = 0;
  471. uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
  472. uint32_t offs[4];
  473. uint8_t *srcs[4], *dst;
  474. int i, j, planes = 3;
  475. int ret;
  476. p->key_frame = 1;
  477. frametype = buf[0];
  478. offset_gu = AV_RL32(buf + 1);
  479. offset_bv = AV_RL32(buf + 5);
  480. switch (frametype) {
  481. case FRAME_SOLID_RGBA:
  482. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  483. case FRAME_SOLID_GRAY:
  484. if (frametype == FRAME_SOLID_GRAY)
  485. if (avctx->bits_per_coded_sample == 24) {
  486. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  487. } else {
  488. avctx->pix_fmt = AV_PIX_FMT_0RGB32;
  489. planes = 4;
  490. }
  491. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  492. return ret;
  493. dst = p->data[0];
  494. if (frametype == FRAME_SOLID_RGBA) {
  495. for (j = 0; j < avctx->height; j++) {
  496. for (i = 0; i < avctx->width; i++)
  497. AV_WN32(dst + i * 4, offset_gu);
  498. dst += p->linesize[0];
  499. }
  500. } else {
  501. for (j = 0; j < avctx->height; j++) {
  502. memset(dst, buf[1], avctx->width * planes);
  503. dst += p->linesize[0];
  504. }
  505. }
  506. break;
  507. case FRAME_SOLID_COLOR:
  508. if (avctx->bits_per_coded_sample == 24) {
  509. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  510. } else {
  511. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  512. offset_gu |= 0xFFU << 24;
  513. }
  514. if ((ret = ff_thread_get_buffer(avctx, &frame,0)) < 0)
  515. return ret;
  516. dst = p->data[0];
  517. for (j = 0; j < avctx->height; j++) {
  518. for (i = 0; i < avctx->width; i++)
  519. if (avctx->bits_per_coded_sample == 24) {
  520. AV_WB24(dst + i * 3, offset_gu);
  521. } else {
  522. AV_WN32(dst + i * 4, offset_gu);
  523. }
  524. dst += p->linesize[0];
  525. }
  526. break;
  527. case FRAME_ARITH_RGBA:
  528. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  529. planes = 4;
  530. offset_ry += 4;
  531. offs[3] = AV_RL32(buf + 9);
  532. case FRAME_ARITH_RGB24:
  533. case FRAME_U_RGB24:
  534. if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
  535. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  536. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  537. return ret;
  538. offs[0] = offset_bv;
  539. offs[1] = offset_gu;
  540. offs[2] = offset_ry;
  541. l->rgb_stride = FFALIGN(avctx->width, 16);
  542. av_fast_malloc(&l->rgb_planes, &l->rgb_planes_allocated,
  543. l->rgb_stride * avctx->height * planes + 1);
  544. if (!l->rgb_planes) {
  545. av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
  546. return AVERROR(ENOMEM);
  547. }
  548. for (i = 0; i < planes; i++)
  549. srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
  550. for (i = 0; i < planes; i++)
  551. if (buf_size <= offs[i]) {
  552. av_log(avctx, AV_LOG_ERROR,
  553. "Invalid frame offsets\n");
  554. return AVERROR_INVALIDDATA;
  555. }
  556. for (i = 0; i < planes; i++)
  557. lag_decode_arith_plane(l, srcs[i],
  558. avctx->width, avctx->height,
  559. -l->rgb_stride, buf + offs[i],
  560. buf_size - offs[i]);
  561. dst = p->data[0];
  562. for (i = 0; i < planes; i++)
  563. srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
  564. for (j = 0; j < avctx->height; j++) {
  565. for (i = 0; i < avctx->width; i++) {
  566. uint8_t r, g, b, a;
  567. r = srcs[0][i];
  568. g = srcs[1][i];
  569. b = srcs[2][i];
  570. r += g;
  571. b += g;
  572. if (frametype == FRAME_ARITH_RGBA) {
  573. a = srcs[3][i];
  574. AV_WN32(dst + i * 4, MKBETAG(a, r, g, b));
  575. } else {
  576. dst[i * 3 + 0] = r;
  577. dst[i * 3 + 1] = g;
  578. dst[i * 3 + 2] = b;
  579. }
  580. }
  581. dst += p->linesize[0];
  582. for (i = 0; i < planes; i++)
  583. srcs[i] += l->rgb_stride;
  584. }
  585. break;
  586. case FRAME_ARITH_YUY2:
  587. avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  588. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  589. return ret;
  590. if (offset_ry >= buf_size ||
  591. offset_gu >= buf_size ||
  592. offset_bv >= buf_size) {
  593. av_log(avctx, AV_LOG_ERROR,
  594. "Invalid frame offsets\n");
  595. return AVERROR_INVALIDDATA;
  596. }
  597. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  598. p->linesize[0], buf + offset_ry,
  599. buf_size - offset_ry);
  600. lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
  601. avctx->height, p->linesize[1],
  602. buf + offset_gu, buf_size - offset_gu);
  603. lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
  604. avctx->height, p->linesize[2],
  605. buf + offset_bv, buf_size - offset_bv);
  606. break;
  607. case FRAME_ARITH_YV12:
  608. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  609. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  610. return ret;
  611. if (buf_size <= offset_ry || buf_size <= offset_gu || buf_size <= offset_bv) {
  612. return AVERROR_INVALIDDATA;
  613. }
  614. if (offset_ry >= buf_size ||
  615. offset_gu >= buf_size ||
  616. offset_bv >= buf_size) {
  617. av_log(avctx, AV_LOG_ERROR,
  618. "Invalid frame offsets\n");
  619. return AVERROR_INVALIDDATA;
  620. }
  621. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  622. p->linesize[0], buf + offset_ry,
  623. buf_size - offset_ry);
  624. lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
  625. avctx->height / 2, p->linesize[2],
  626. buf + offset_gu, buf_size - offset_gu);
  627. lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
  628. avctx->height / 2, p->linesize[1],
  629. buf + offset_bv, buf_size - offset_bv);
  630. break;
  631. default:
  632. av_log(avctx, AV_LOG_ERROR,
  633. "Unsupported Lagarith frame type: %#"PRIu8"\n", frametype);
  634. return AVERROR_PATCHWELCOME;
  635. }
  636. *got_frame = 1;
  637. return buf_size;
  638. }
  639. static av_cold int lag_decode_init(AVCodecContext *avctx)
  640. {
  641. LagarithContext *l = avctx->priv_data;
  642. l->avctx = avctx;
  643. ff_dsputil_init(&l->dsp, avctx);
  644. return 0;
  645. }
  646. static av_cold int lag_decode_end(AVCodecContext *avctx)
  647. {
  648. LagarithContext *l = avctx->priv_data;
  649. av_freep(&l->rgb_planes);
  650. return 0;
  651. }
  652. AVCodec ff_lagarith_decoder = {
  653. .name = "lagarith",
  654. .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
  655. .type = AVMEDIA_TYPE_VIDEO,
  656. .id = AV_CODEC_ID_LAGARITH,
  657. .priv_data_size = sizeof(LagarithContext),
  658. .init = lag_decode_init,
  659. .close = lag_decode_end,
  660. .decode = lag_decode_frame,
  661. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  662. };