You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

736 lines
23KB

  1. /*
  2. * Lagarith lossless decoder
  3. * Copyright (c) 2009 Nathan Caldwell <saintdev (at) gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Lagarith lossless decoder
  24. * @author Nathan Caldwell
  25. */
  26. #include <inttypes.h>
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "mathops.h"
  30. #include "lagarithrac.h"
  31. #include "lossless_videodsp.h"
  32. #include "thread.h"
  33. enum LagarithFrameType {
  34. FRAME_RAW = 1, /**< uncompressed */
  35. FRAME_U_RGB24 = 2, /**< unaligned RGB24 */
  36. FRAME_ARITH_YUY2 = 3, /**< arithmetic coded YUY2 */
  37. FRAME_ARITH_RGB24 = 4, /**< arithmetic coded RGB24 */
  38. FRAME_SOLID_GRAY = 5, /**< solid grayscale color frame */
  39. FRAME_SOLID_COLOR = 6, /**< solid non-grayscale color frame */
  40. FRAME_OLD_ARITH_RGB = 7, /**< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */
  41. FRAME_ARITH_RGBA = 8, /**< arithmetic coded RGBA */
  42. FRAME_SOLID_RGBA = 9, /**< solid RGBA color frame */
  43. FRAME_ARITH_YV12 = 10, /**< arithmetic coded YV12 */
  44. FRAME_REDUCED_RES = 11, /**< reduced resolution YV12 frame */
  45. };
  46. typedef struct LagarithContext {
  47. AVCodecContext *avctx;
  48. LLVidDSPContext llviddsp;
  49. int zeros; /**< number of consecutive zero bytes encountered */
  50. int zeros_rem; /**< number of zero bytes remaining to output */
  51. } LagarithContext;
  52. /**
  53. * Compute the 52-bit mantissa of 1/(double)denom.
  54. * This crazy format uses floats in an entropy coder and we have to match x86
  55. * rounding exactly, thus ordinary floats aren't portable enough.
  56. * @param denom denominator
  57. * @return 52-bit mantissa
  58. * @see softfloat_mul
  59. */
  60. static uint64_t softfloat_reciprocal(uint32_t denom)
  61. {
  62. int shift = av_log2(denom - 1) + 1;
  63. uint64_t ret = (1ULL << 52) / denom;
  64. uint64_t err = (1ULL << 52) - ret * denom;
  65. ret <<= shift;
  66. err <<= shift;
  67. err += denom / 2;
  68. return ret + err / denom;
  69. }
  70. /**
  71. * (uint32_t)(x*f), where f has the given mantissa, and exponent 0
  72. * Used in combination with softfloat_reciprocal computes x/(double)denom.
  73. * @param x 32-bit integer factor
  74. * @param mantissa mantissa of f with exponent 0
  75. * @return 32-bit integer value (x*f)
  76. * @see softfloat_reciprocal
  77. */
  78. static uint32_t softfloat_mul(uint32_t x, uint64_t mantissa)
  79. {
  80. uint64_t l = x * (mantissa & 0xffffffff);
  81. uint64_t h = x * (mantissa >> 32);
  82. h += l >> 32;
  83. l &= 0xffffffff;
  84. l += 1LL << av_log2(h >> 21);
  85. h += l >> 32;
  86. return h >> 20;
  87. }
  88. static uint8_t lag_calc_zero_run(int8_t x)
  89. {
  90. return (x * 2) ^ (x >> 7);
  91. }
  92. static int lag_decode_prob(GetBitContext *gb, uint32_t *value)
  93. {
  94. static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
  95. int i;
  96. int bit = 0;
  97. int bits = 0;
  98. int prevbit = 0;
  99. unsigned val;
  100. for (i = 0; i < 7; i++) {
  101. if (prevbit && bit)
  102. break;
  103. prevbit = bit;
  104. bit = get_bits1(gb);
  105. if (bit && !prevbit)
  106. bits += series[i];
  107. }
  108. bits--;
  109. if (bits < 0 || bits > 31) {
  110. *value = 0;
  111. return -1;
  112. } else if (bits == 0) {
  113. *value = 0;
  114. return 0;
  115. }
  116. val = get_bits_long(gb, bits);
  117. val |= 1U << bits;
  118. *value = val - 1;
  119. return 0;
  120. }
  121. static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
  122. {
  123. int i, j, scale_factor;
  124. unsigned prob, cumulative_target;
  125. unsigned cumul_prob = 0;
  126. unsigned scaled_cumul_prob = 0;
  127. int nnz = 0;
  128. rac->prob[0] = 0;
  129. rac->prob[257] = UINT_MAX;
  130. /* Read probabilities from bitstream */
  131. for (i = 1; i < 257; i++) {
  132. if (lag_decode_prob(gb, &rac->prob[i]) < 0) {
  133. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability encountered.\n");
  134. return -1;
  135. }
  136. if ((uint64_t)cumul_prob + rac->prob[i] > UINT_MAX) {
  137. av_log(rac->avctx, AV_LOG_ERROR, "Integer overflow encountered in cumulative probability calculation.\n");
  138. return -1;
  139. }
  140. cumul_prob += rac->prob[i];
  141. if (!rac->prob[i]) {
  142. if (lag_decode_prob(gb, &prob)) {
  143. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability run encountered.\n");
  144. return -1;
  145. }
  146. if (prob > 256 - i)
  147. prob = 256 - i;
  148. for (j = 0; j < prob; j++)
  149. rac->prob[++i] = 0;
  150. }else {
  151. nnz++;
  152. }
  153. }
  154. if (!cumul_prob) {
  155. av_log(rac->avctx, AV_LOG_ERROR, "All probabilities are 0!\n");
  156. return -1;
  157. }
  158. if (nnz == 1 && (show_bits_long(gb, 32) & 0xFFFFFF)) {
  159. return AVERROR_INVALIDDATA;
  160. }
  161. /* Scale probabilities so cumulative probability is an even power of 2. */
  162. scale_factor = av_log2(cumul_prob);
  163. if (cumul_prob & (cumul_prob - 1)) {
  164. uint64_t mul = softfloat_reciprocal(cumul_prob);
  165. for (i = 1; i <= 128; i++) {
  166. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  167. scaled_cumul_prob += rac->prob[i];
  168. }
  169. if (scaled_cumul_prob <= 0) {
  170. av_log(rac->avctx, AV_LOG_ERROR, "Scaled probabilities invalid\n");
  171. return AVERROR_INVALIDDATA;
  172. }
  173. for (; i < 257; i++) {
  174. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  175. scaled_cumul_prob += rac->prob[i];
  176. }
  177. scale_factor++;
  178. if (scale_factor >= 32U)
  179. return AVERROR_INVALIDDATA;
  180. cumulative_target = 1U << scale_factor;
  181. if (scaled_cumul_prob > cumulative_target) {
  182. av_log(rac->avctx, AV_LOG_ERROR,
  183. "Scaled probabilities are larger than target!\n");
  184. return -1;
  185. }
  186. scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
  187. for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
  188. if (rac->prob[i]) {
  189. rac->prob[i]++;
  190. scaled_cumul_prob--;
  191. }
  192. /* Comment from reference source:
  193. * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
  194. * // since the compression change is negligible and fixing it
  195. * // breaks backwards compatibility
  196. * b =- (signed int)b;
  197. * b &= 0xFF;
  198. * } else {
  199. * b++;
  200. * b &= 0x7f;
  201. * }
  202. */
  203. }
  204. }
  205. if (scale_factor > 23)
  206. return AVERROR_INVALIDDATA;
  207. rac->scale = scale_factor;
  208. /* Fill probability array with cumulative probability for each symbol. */
  209. for (i = 1; i < 257; i++)
  210. rac->prob[i] += rac->prob[i - 1];
  211. return 0;
  212. }
  213. static void add_lag_median_prediction(uint8_t *dst, uint8_t *src1,
  214. uint8_t *diff, int w, int *left,
  215. int *left_top)
  216. {
  217. /* This is almost identical to add_hfyu_median_pred in huffyuvdsp.h.
  218. * However the &0xFF on the gradient predictor yields incorrect output
  219. * for lagarith.
  220. */
  221. int i;
  222. uint8_t l, lt;
  223. l = *left;
  224. lt = *left_top;
  225. for (i = 0; i < w; i++) {
  226. l = mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
  227. lt = src1[i];
  228. dst[i] = l;
  229. }
  230. *left = l;
  231. *left_top = lt;
  232. }
  233. static void lag_pred_line(LagarithContext *l, uint8_t *buf,
  234. int width, int stride, int line)
  235. {
  236. int L, TL;
  237. if (!line) {
  238. /* Left prediction only for first line */
  239. L = l->llviddsp.add_left_pred(buf, buf, width, 0);
  240. } else {
  241. /* Left pixel is actually prev_row[width] */
  242. L = buf[width - stride - 1];
  243. if (line == 1) {
  244. /* Second line, left predict first pixel, the rest of the line is median predicted
  245. * NOTE: In the case of RGB this pixel is top predicted */
  246. TL = l->avctx->pix_fmt == AV_PIX_FMT_YUV420P ? buf[-stride] : L;
  247. } else {
  248. /* Top left is 2 rows back, last pixel */
  249. TL = buf[width - (2 * stride) - 1];
  250. }
  251. add_lag_median_prediction(buf, buf - stride, buf,
  252. width, &L, &TL);
  253. }
  254. }
  255. static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
  256. int width, int stride, int line,
  257. int is_luma)
  258. {
  259. int L, TL;
  260. if (!line) {
  261. L= buf[0];
  262. if (is_luma)
  263. buf[0] = 0;
  264. l->llviddsp.add_left_pred(buf, buf, width, 0);
  265. if (is_luma)
  266. buf[0] = L;
  267. return;
  268. }
  269. if (line == 1) {
  270. const int HEAD = is_luma ? 4 : 2;
  271. int i;
  272. L = buf[width - stride - 1];
  273. TL = buf[HEAD - stride - 1];
  274. for (i = 0; i < HEAD; i++) {
  275. L += buf[i];
  276. buf[i] = L;
  277. }
  278. for (; i < width; i++) {
  279. L = mid_pred(L & 0xFF, buf[i - stride], (L + buf[i - stride] - TL) & 0xFF) + buf[i];
  280. TL = buf[i - stride];
  281. buf[i] = L;
  282. }
  283. } else {
  284. TL = buf[width - (2 * stride) - 1];
  285. L = buf[width - stride - 1];
  286. l->llviddsp.add_median_pred(buf, buf - stride, buf, width, &L, &TL);
  287. }
  288. }
  289. static int lag_decode_line(LagarithContext *l, lag_rac *rac,
  290. uint8_t *dst, int width, int stride,
  291. int esc_count)
  292. {
  293. int i = 0;
  294. int ret = 0;
  295. if (!esc_count)
  296. esc_count = -1;
  297. /* Output any zeros remaining from the previous run */
  298. handle_zeros:
  299. if (l->zeros_rem) {
  300. int count = FFMIN(l->zeros_rem, width - i);
  301. memset(dst + i, 0, count);
  302. i += count;
  303. l->zeros_rem -= count;
  304. }
  305. while (i < width) {
  306. dst[i] = lag_get_rac(rac);
  307. ret++;
  308. if (dst[i])
  309. l->zeros = 0;
  310. else
  311. l->zeros++;
  312. i++;
  313. if (l->zeros == esc_count) {
  314. int index = lag_get_rac(rac);
  315. ret++;
  316. l->zeros = 0;
  317. l->zeros_rem = lag_calc_zero_run(index);
  318. goto handle_zeros;
  319. }
  320. }
  321. return ret;
  322. }
  323. static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
  324. const uint8_t *src, const uint8_t *src_end,
  325. int width, int esc_count)
  326. {
  327. int i = 0;
  328. int count;
  329. uint8_t zero_run = 0;
  330. const uint8_t *src_start = src;
  331. uint8_t mask1 = -(esc_count < 2);
  332. uint8_t mask2 = -(esc_count < 3);
  333. uint8_t *end = dst + (width - 2);
  334. avpriv_request_sample(l->avctx, "zero_run_line");
  335. memset(dst, 0, width);
  336. output_zeros:
  337. if (l->zeros_rem) {
  338. count = FFMIN(l->zeros_rem, width - i);
  339. if (end - dst < count) {
  340. av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
  341. return AVERROR_INVALIDDATA;
  342. }
  343. memset(dst, 0, count);
  344. l->zeros_rem -= count;
  345. dst += count;
  346. }
  347. while (dst < end) {
  348. i = 0;
  349. while (!zero_run && dst + i < end) {
  350. i++;
  351. if (i+2 >= src_end - src)
  352. return AVERROR_INVALIDDATA;
  353. zero_run =
  354. !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
  355. }
  356. if (zero_run) {
  357. zero_run = 0;
  358. i += esc_count;
  359. memcpy(dst, src, i);
  360. dst += i;
  361. l->zeros_rem = lag_calc_zero_run(src[i]);
  362. src += i + 1;
  363. goto output_zeros;
  364. } else {
  365. memcpy(dst, src, i);
  366. src += i;
  367. dst += i;
  368. }
  369. }
  370. return src - src_start;
  371. }
  372. static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
  373. int width, int height, int stride,
  374. const uint8_t *src, int src_size)
  375. {
  376. int i = 0;
  377. int read = 0;
  378. uint32_t length;
  379. uint32_t offset = 1;
  380. int esc_count;
  381. GetBitContext gb;
  382. lag_rac rac;
  383. const uint8_t *src_end = src + src_size;
  384. int ret;
  385. rac.avctx = l->avctx;
  386. l->zeros = 0;
  387. if(src_size < 2)
  388. return AVERROR_INVALIDDATA;
  389. esc_count = src[0];
  390. if (esc_count < 4) {
  391. length = width * height;
  392. if(src_size < 5)
  393. return AVERROR_INVALIDDATA;
  394. if (esc_count && AV_RL32(src + 1) < length) {
  395. length = AV_RL32(src + 1);
  396. offset += 4;
  397. }
  398. if ((ret = init_get_bits8(&gb, src + offset, src_size - offset)) < 0)
  399. return ret;
  400. if (lag_read_prob_header(&rac, &gb) < 0)
  401. return -1;
  402. ff_lag_rac_init(&rac, &gb, length - stride);
  403. for (i = 0; i < height; i++) {
  404. if (rac.overread > MAX_OVERREAD)
  405. return AVERROR_INVALIDDATA;
  406. read += lag_decode_line(l, &rac, dst + (i * stride), width,
  407. stride, esc_count);
  408. }
  409. if (read > length)
  410. av_log(l->avctx, AV_LOG_WARNING,
  411. "Output more bytes than length (%d of %"PRIu32")\n", read,
  412. length);
  413. } else if (esc_count < 8) {
  414. esc_count -= 4;
  415. src ++;
  416. src_size --;
  417. if (esc_count > 0) {
  418. /* Zero run coding only, no range coding. */
  419. for (i = 0; i < height; i++) {
  420. int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
  421. src_end, width, esc_count);
  422. if (res < 0)
  423. return res;
  424. src += res;
  425. }
  426. } else {
  427. if (src_size < width * height)
  428. return AVERROR_INVALIDDATA; // buffer not big enough
  429. /* Plane is stored uncompressed */
  430. for (i = 0; i < height; i++) {
  431. memcpy(dst + (i * stride), src, width);
  432. src += width;
  433. }
  434. }
  435. } else if (esc_count == 0xff) {
  436. /* Plane is a solid run of given value */
  437. for (i = 0; i < height; i++)
  438. memset(dst + i * stride, src[1], width);
  439. /* Do not apply prediction.
  440. Note: memset to 0 above, setting first value to src[1]
  441. and applying prediction gives the same result. */
  442. return 0;
  443. } else {
  444. av_log(l->avctx, AV_LOG_ERROR,
  445. "Invalid zero run escape code! (%#x)\n", esc_count);
  446. return -1;
  447. }
  448. if (l->avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
  449. for (i = 0; i < height; i++) {
  450. lag_pred_line(l, dst, width, stride, i);
  451. dst += stride;
  452. }
  453. } else {
  454. for (i = 0; i < height; i++) {
  455. lag_pred_line_yuy2(l, dst, width, stride, i,
  456. width == l->avctx->width);
  457. dst += stride;
  458. }
  459. }
  460. return 0;
  461. }
  462. /**
  463. * Decode a frame.
  464. * @param avctx codec context
  465. * @param data output AVFrame
  466. * @param data_size size of output data or 0 if no picture is returned
  467. * @param avpkt input packet
  468. * @return number of consumed bytes on success or negative if decode fails
  469. */
  470. static int lag_decode_frame(AVCodecContext *avctx,
  471. void *data, int *got_frame, AVPacket *avpkt)
  472. {
  473. const uint8_t *buf = avpkt->data;
  474. unsigned int buf_size = avpkt->size;
  475. LagarithContext *l = avctx->priv_data;
  476. ThreadFrame frame = { .f = data };
  477. AVFrame *const p = data;
  478. uint8_t frametype;
  479. uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
  480. uint32_t offs[4];
  481. uint8_t *srcs[4];
  482. int i, j, planes = 3;
  483. int ret;
  484. p->key_frame = 1;
  485. p->pict_type = AV_PICTURE_TYPE_I;
  486. frametype = buf[0];
  487. offset_gu = AV_RL32(buf + 1);
  488. offset_bv = AV_RL32(buf + 5);
  489. switch (frametype) {
  490. case FRAME_SOLID_RGBA:
  491. avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  492. case FRAME_SOLID_GRAY:
  493. if (frametype == FRAME_SOLID_GRAY)
  494. if (avctx->bits_per_coded_sample == 24) {
  495. avctx->pix_fmt = AV_PIX_FMT_GBRP;
  496. } else {
  497. avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  498. planes = 4;
  499. }
  500. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  501. return ret;
  502. if (frametype == FRAME_SOLID_RGBA) {
  503. for (i = 0; i < avctx->height; i++) {
  504. memset(p->data[0] + i * p->linesize[0], buf[2], avctx->width);
  505. memset(p->data[1] + i * p->linesize[1], buf[1], avctx->width);
  506. memset(p->data[2] + i * p->linesize[2], buf[3], avctx->width);
  507. memset(p->data[3] + i * p->linesize[3], buf[4], avctx->width);
  508. }
  509. } else {
  510. for (i = 0; i < avctx->height; i++) {
  511. for (j = 0; j < planes; j++)
  512. memset(p->data[j] + i * p->linesize[j], buf[1], avctx->width);
  513. }
  514. }
  515. break;
  516. case FRAME_SOLID_COLOR:
  517. if (avctx->bits_per_coded_sample == 24) {
  518. avctx->pix_fmt = AV_PIX_FMT_GBRP;
  519. } else {
  520. avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  521. }
  522. if ((ret = ff_thread_get_buffer(avctx, &frame,0)) < 0)
  523. return ret;
  524. for (i = 0; i < avctx->height; i++) {
  525. memset(p->data[0] + i * p->linesize[0], buf[2], avctx->width);
  526. memset(p->data[1] + i * p->linesize[1], buf[1], avctx->width);
  527. memset(p->data[2] + i * p->linesize[2], buf[3], avctx->width);
  528. if (avctx->pix_fmt == AV_PIX_FMT_GBRAP)
  529. memset(p->data[3] + i * p->linesize[3], 0xFFu, avctx->width);
  530. }
  531. break;
  532. case FRAME_ARITH_RGBA:
  533. avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  534. planes = 4;
  535. offset_ry += 4;
  536. offs[3] = AV_RL32(buf + 9);
  537. case FRAME_ARITH_RGB24:
  538. case FRAME_U_RGB24:
  539. if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
  540. avctx->pix_fmt = AV_PIX_FMT_GBRP;
  541. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  542. return ret;
  543. offs[0] = offset_bv;
  544. offs[1] = offset_gu;
  545. offs[2] = offset_ry;
  546. for (i = 0; i < planes; i++)
  547. srcs[i] = p->data[i] + (avctx->height - 1) * p->linesize[i];
  548. for (i = 0; i < planes; i++)
  549. if (buf_size <= offs[i]) {
  550. av_log(avctx, AV_LOG_ERROR,
  551. "Invalid frame offsets\n");
  552. return AVERROR_INVALIDDATA;
  553. }
  554. for (i = 0; i < planes; i++)
  555. lag_decode_arith_plane(l, srcs[i],
  556. avctx->width, avctx->height,
  557. -p->linesize[i], buf + offs[i],
  558. buf_size - offs[i]);
  559. for (i = 0; i < avctx->height; i++) {
  560. l->llviddsp.add_bytes(p->data[0] + i * p->linesize[0], p->data[1] + i * p->linesize[1], avctx->width);
  561. l->llviddsp.add_bytes(p->data[2] + i * p->linesize[2], p->data[1] + i * p->linesize[1], avctx->width);
  562. }
  563. FFSWAP(uint8_t*, p->data[0], p->data[1]);
  564. FFSWAP(int, p->linesize[0], p->linesize[1]);
  565. FFSWAP(uint8_t*, p->data[2], p->data[1]);
  566. FFSWAP(int, p->linesize[2], p->linesize[1]);
  567. break;
  568. case FRAME_ARITH_YUY2:
  569. avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  570. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  571. return ret;
  572. if (offset_ry >= buf_size ||
  573. offset_gu >= buf_size ||
  574. offset_bv >= buf_size) {
  575. av_log(avctx, AV_LOG_ERROR,
  576. "Invalid frame offsets\n");
  577. return AVERROR_INVALIDDATA;
  578. }
  579. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  580. p->linesize[0], buf + offset_ry,
  581. buf_size - offset_ry);
  582. lag_decode_arith_plane(l, p->data[1], (avctx->width + 1) / 2,
  583. avctx->height, p->linesize[1],
  584. buf + offset_gu, buf_size - offset_gu);
  585. lag_decode_arith_plane(l, p->data[2], (avctx->width + 1) / 2,
  586. avctx->height, p->linesize[2],
  587. buf + offset_bv, buf_size - offset_bv);
  588. break;
  589. case FRAME_ARITH_YV12:
  590. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  591. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  592. return ret;
  593. if (offset_ry >= buf_size ||
  594. offset_gu >= buf_size ||
  595. offset_bv >= buf_size) {
  596. av_log(avctx, AV_LOG_ERROR,
  597. "Invalid frame offsets\n");
  598. return AVERROR_INVALIDDATA;
  599. }
  600. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  601. p->linesize[0], buf + offset_ry,
  602. buf_size - offset_ry);
  603. lag_decode_arith_plane(l, p->data[2], (avctx->width + 1) / 2,
  604. (avctx->height + 1) / 2, p->linesize[2],
  605. buf + offset_gu, buf_size - offset_gu);
  606. lag_decode_arith_plane(l, p->data[1], (avctx->width + 1) / 2,
  607. (avctx->height + 1) / 2, p->linesize[1],
  608. buf + offset_bv, buf_size - offset_bv);
  609. break;
  610. default:
  611. av_log(avctx, AV_LOG_ERROR,
  612. "Unsupported Lagarith frame type: %#"PRIx8"\n", frametype);
  613. return AVERROR_PATCHWELCOME;
  614. }
  615. *got_frame = 1;
  616. return buf_size;
  617. }
  618. static av_cold int lag_decode_init(AVCodecContext *avctx)
  619. {
  620. LagarithContext *l = avctx->priv_data;
  621. l->avctx = avctx;
  622. ff_llviddsp_init(&l->llviddsp);
  623. return 0;
  624. }
  625. #if HAVE_THREADS
  626. static av_cold int lag_decode_init_thread_copy(AVCodecContext *avctx)
  627. {
  628. LagarithContext *l = avctx->priv_data;
  629. l->avctx = avctx;
  630. return 0;
  631. }
  632. #endif
  633. AVCodec ff_lagarith_decoder = {
  634. .name = "lagarith",
  635. .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
  636. .type = AVMEDIA_TYPE_VIDEO,
  637. .id = AV_CODEC_ID_LAGARITH,
  638. .priv_data_size = sizeof(LagarithContext),
  639. .init = lag_decode_init,
  640. .init_thread_copy = ONLY_IF_THREADS_ENABLED(lag_decode_init_thread_copy),
  641. .decode = lag_decode_frame,
  642. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  643. };