You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

775 lines
23KB

  1. /*
  2. * Lagarith lossless decoder
  3. * Copyright (c) 2009 Nathan Caldwell <saintdev (at) gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Lagarith lossless decoder
  24. * @author Nathan Caldwell
  25. */
  26. #include <inttypes.h>
  27. #include "avcodec.h"
  28. #include "get_bits.h"
  29. #include "mathops.h"
  30. #include "lagarithrac.h"
  31. #include "lossless_videodsp.h"
  32. #include "thread.h"
  33. enum LagarithFrameType {
  34. FRAME_RAW = 1, /**< uncompressed */
  35. FRAME_U_RGB24 = 2, /**< unaligned RGB24 */
  36. FRAME_ARITH_YUY2 = 3, /**< arithmetic coded YUY2 */
  37. FRAME_ARITH_RGB24 = 4, /**< arithmetic coded RGB24 */
  38. FRAME_SOLID_GRAY = 5, /**< solid grayscale color frame */
  39. FRAME_SOLID_COLOR = 6, /**< solid non-grayscale color frame */
  40. FRAME_OLD_ARITH_RGB = 7, /**< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */
  41. FRAME_ARITH_RGBA = 8, /**< arithmetic coded RGBA */
  42. FRAME_SOLID_RGBA = 9, /**< solid RGBA color frame */
  43. FRAME_ARITH_YV12 = 10, /**< arithmetic coded YV12 */
  44. FRAME_REDUCED_RES = 11, /**< reduced resolution YV12 frame */
  45. };
  46. typedef struct LagarithContext {
  47. AVCodecContext *avctx;
  48. LLVidDSPContext llviddsp;
  49. int zeros; /**< number of consecutive zero bytes encountered */
  50. int zeros_rem; /**< number of zero bytes remaining to output */
  51. uint8_t *rgb_planes;
  52. int rgb_planes_allocated;
  53. int rgb_stride;
  54. } LagarithContext;
  55. /**
  56. * Compute the 52-bit mantissa of 1/(double)denom.
  57. * This crazy format uses floats in an entropy coder and we have to match x86
  58. * rounding exactly, thus ordinary floats aren't portable enough.
  59. * @param denom denominator
  60. * @return 52-bit mantissa
  61. * @see softfloat_mul
  62. */
  63. static uint64_t softfloat_reciprocal(uint32_t denom)
  64. {
  65. int shift = av_log2(denom - 1) + 1;
  66. uint64_t ret = (1ULL << 52) / denom;
  67. uint64_t err = (1ULL << 52) - ret * denom;
  68. ret <<= shift;
  69. err <<= shift;
  70. err += denom / 2;
  71. return ret + err / denom;
  72. }
  73. /**
  74. * (uint32_t)(x*f), where f has the given mantissa, and exponent 0
  75. * Used in combination with softfloat_reciprocal computes x/(double)denom.
  76. * @param x 32-bit integer factor
  77. * @param mantissa mantissa of f with exponent 0
  78. * @return 32-bit integer value (x*f)
  79. * @see softfloat_reciprocal
  80. */
  81. static uint32_t softfloat_mul(uint32_t x, uint64_t mantissa)
  82. {
  83. uint64_t l = x * (mantissa & 0xffffffff);
  84. uint64_t h = x * (mantissa >> 32);
  85. h += l >> 32;
  86. l &= 0xffffffff;
  87. l += 1LL << av_log2(h >> 21);
  88. h += l >> 32;
  89. return h >> 20;
  90. }
  91. static uint8_t lag_calc_zero_run(int8_t x)
  92. {
  93. return (x * 2) ^ (x >> 7);
  94. }
  95. static int lag_decode_prob(GetBitContext *gb, uint32_t *value)
  96. {
  97. static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
  98. int i;
  99. int bit = 0;
  100. int bits = 0;
  101. int prevbit = 0;
  102. unsigned val;
  103. for (i = 0; i < 7; i++) {
  104. if (prevbit && bit)
  105. break;
  106. prevbit = bit;
  107. bit = get_bits1(gb);
  108. if (bit && !prevbit)
  109. bits += series[i];
  110. }
  111. bits--;
  112. if (bits < 0 || bits > 31) {
  113. *value = 0;
  114. return -1;
  115. } else if (bits == 0) {
  116. *value = 0;
  117. return 0;
  118. }
  119. val = get_bits_long(gb, bits);
  120. val |= 1U << bits;
  121. *value = val - 1;
  122. return 0;
  123. }
  124. static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
  125. {
  126. int i, j, scale_factor;
  127. unsigned prob, cumulative_target;
  128. unsigned cumul_prob = 0;
  129. unsigned scaled_cumul_prob = 0;
  130. int nnz = 0;
  131. rac->prob[0] = 0;
  132. rac->prob[257] = UINT_MAX;
  133. /* Read probabilities from bitstream */
  134. for (i = 1; i < 257; i++) {
  135. if (lag_decode_prob(gb, &rac->prob[i]) < 0) {
  136. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability encountered.\n");
  137. return -1;
  138. }
  139. if ((uint64_t)cumul_prob + rac->prob[i] > UINT_MAX) {
  140. av_log(rac->avctx, AV_LOG_ERROR, "Integer overflow encountered in cumulative probability calculation.\n");
  141. return -1;
  142. }
  143. cumul_prob += rac->prob[i];
  144. if (!rac->prob[i]) {
  145. if (lag_decode_prob(gb, &prob)) {
  146. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability run encountered.\n");
  147. return -1;
  148. }
  149. if (prob > 256 - i)
  150. prob = 256 - i;
  151. for (j = 0; j < prob; j++)
  152. rac->prob[++i] = 0;
  153. }else {
  154. nnz++;
  155. }
  156. }
  157. if (!cumul_prob) {
  158. av_log(rac->avctx, AV_LOG_ERROR, "All probabilities are 0!\n");
  159. return -1;
  160. }
  161. if (nnz == 1 && (show_bits_long(gb, 32) & 0xFFFFFF)) {
  162. return AVERROR_INVALIDDATA;
  163. }
  164. /* Scale probabilities so cumulative probability is an even power of 2. */
  165. scale_factor = av_log2(cumul_prob);
  166. if (cumul_prob & (cumul_prob - 1)) {
  167. uint64_t mul = softfloat_reciprocal(cumul_prob);
  168. for (i = 1; i <= 128; i++) {
  169. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  170. scaled_cumul_prob += rac->prob[i];
  171. }
  172. if (scaled_cumul_prob <= 0) {
  173. av_log(rac->avctx, AV_LOG_ERROR, "Scaled probabilities invalid\n");
  174. return AVERROR_INVALIDDATA;
  175. }
  176. for (; i < 257; i++) {
  177. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  178. scaled_cumul_prob += rac->prob[i];
  179. }
  180. scale_factor++;
  181. if (scale_factor >= 32U)
  182. return AVERROR_INVALIDDATA;
  183. cumulative_target = 1U << scale_factor;
  184. if (scaled_cumul_prob > cumulative_target) {
  185. av_log(rac->avctx, AV_LOG_ERROR,
  186. "Scaled probabilities are larger than target!\n");
  187. return -1;
  188. }
  189. scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
  190. for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
  191. if (rac->prob[i]) {
  192. rac->prob[i]++;
  193. scaled_cumul_prob--;
  194. }
  195. /* Comment from reference source:
  196. * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
  197. * // since the compression change is negligible and fixing it
  198. * // breaks backwards compatibility
  199. * b =- (signed int)b;
  200. * b &= 0xFF;
  201. * } else {
  202. * b++;
  203. * b &= 0x7f;
  204. * }
  205. */
  206. }
  207. }
  208. rac->scale = scale_factor;
  209. /* Fill probability array with cumulative probability for each symbol. */
  210. for (i = 1; i < 257; i++)
  211. rac->prob[i] += rac->prob[i - 1];
  212. return 0;
  213. }
  214. static void add_lag_median_prediction(uint8_t *dst, uint8_t *src1,
  215. uint8_t *diff, int w, int *left,
  216. int *left_top)
  217. {
  218. /* This is almost identical to add_hfyu_median_pred in huffyuvdsp.h.
  219. * However the &0xFF on the gradient predictor yields incorrect output
  220. * for lagarith.
  221. */
  222. int i;
  223. uint8_t l, lt;
  224. l = *left;
  225. lt = *left_top;
  226. for (i = 0; i < w; i++) {
  227. l = mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
  228. lt = src1[i];
  229. dst[i] = l;
  230. }
  231. *left = l;
  232. *left_top = lt;
  233. }
  234. static void lag_pred_line(LagarithContext *l, uint8_t *buf,
  235. int width, int stride, int line)
  236. {
  237. int L, TL;
  238. if (!line) {
  239. /* Left prediction only for first line */
  240. L = l->llviddsp.add_left_pred(buf, buf, width, 0);
  241. } else {
  242. /* Left pixel is actually prev_row[width] */
  243. L = buf[width - stride - 1];
  244. if (line == 1) {
  245. /* Second line, left predict first pixel, the rest of the line is median predicted
  246. * NOTE: In the case of RGB this pixel is top predicted */
  247. TL = l->avctx->pix_fmt == AV_PIX_FMT_YUV420P ? buf[-stride] : L;
  248. } else {
  249. /* Top left is 2 rows back, last pixel */
  250. TL = buf[width - (2 * stride) - 1];
  251. }
  252. add_lag_median_prediction(buf, buf - stride, buf,
  253. width, &L, &TL);
  254. }
  255. }
  256. static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
  257. int width, int stride, int line,
  258. int is_luma)
  259. {
  260. int L, TL;
  261. if (!line) {
  262. L= buf[0];
  263. if (is_luma)
  264. buf[0] = 0;
  265. l->llviddsp.add_left_pred(buf, buf, width, 0);
  266. if (is_luma)
  267. buf[0] = L;
  268. return;
  269. }
  270. if (line == 1) {
  271. const int HEAD = is_luma ? 4 : 2;
  272. int i;
  273. L = buf[width - stride - 1];
  274. TL = buf[HEAD - stride - 1];
  275. for (i = 0; i < HEAD; i++) {
  276. L += buf[i];
  277. buf[i] = L;
  278. }
  279. for (; i < width; i++) {
  280. L = mid_pred(L & 0xFF, buf[i - stride], (L + buf[i - stride] - TL) & 0xFF) + buf[i];
  281. TL = buf[i - stride];
  282. buf[i] = L;
  283. }
  284. } else {
  285. TL = buf[width - (2 * stride) - 1];
  286. L = buf[width - stride - 1];
  287. l->llviddsp.add_median_pred(buf, buf - stride, buf, width, &L, &TL);
  288. }
  289. }
  290. static int lag_decode_line(LagarithContext *l, lag_rac *rac,
  291. uint8_t *dst, int width, int stride,
  292. int esc_count)
  293. {
  294. int i = 0;
  295. int ret = 0;
  296. if (!esc_count)
  297. esc_count = -1;
  298. /* Output any zeros remaining from the previous run */
  299. handle_zeros:
  300. if (l->zeros_rem) {
  301. int count = FFMIN(l->zeros_rem, width - i);
  302. memset(dst + i, 0, count);
  303. i += count;
  304. l->zeros_rem -= count;
  305. }
  306. while (i < width) {
  307. dst[i] = lag_get_rac(rac);
  308. ret++;
  309. if (dst[i])
  310. l->zeros = 0;
  311. else
  312. l->zeros++;
  313. i++;
  314. if (l->zeros == esc_count) {
  315. int index = lag_get_rac(rac);
  316. ret++;
  317. l->zeros = 0;
  318. l->zeros_rem = lag_calc_zero_run(index);
  319. goto handle_zeros;
  320. }
  321. }
  322. return ret;
  323. }
  324. static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
  325. const uint8_t *src, const uint8_t *src_end,
  326. int width, int esc_count)
  327. {
  328. int i = 0;
  329. int count;
  330. uint8_t zero_run = 0;
  331. const uint8_t *src_start = src;
  332. uint8_t mask1 = -(esc_count < 2);
  333. uint8_t mask2 = -(esc_count < 3);
  334. uint8_t *end = dst + (width - 2);
  335. avpriv_request_sample(l->avctx, "zero_run_line");
  336. memset(dst, 0, width);
  337. output_zeros:
  338. if (l->zeros_rem) {
  339. count = FFMIN(l->zeros_rem, width - i);
  340. if (end - dst < count) {
  341. av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
  342. return AVERROR_INVALIDDATA;
  343. }
  344. memset(dst, 0, count);
  345. l->zeros_rem -= count;
  346. dst += count;
  347. }
  348. while (dst < end) {
  349. i = 0;
  350. while (!zero_run && dst + i < end) {
  351. i++;
  352. if (i+2 >= src_end - src)
  353. return AVERROR_INVALIDDATA;
  354. zero_run =
  355. !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
  356. }
  357. if (zero_run) {
  358. zero_run = 0;
  359. i += esc_count;
  360. memcpy(dst, src, i);
  361. dst += i;
  362. l->zeros_rem = lag_calc_zero_run(src[i]);
  363. src += i + 1;
  364. goto output_zeros;
  365. } else {
  366. memcpy(dst, src, i);
  367. src += i;
  368. dst += i;
  369. }
  370. }
  371. return src - src_start;
  372. }
  373. static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
  374. int width, int height, int stride,
  375. const uint8_t *src, int src_size)
  376. {
  377. int i = 0;
  378. int read = 0;
  379. uint32_t length;
  380. uint32_t offset = 1;
  381. int esc_count;
  382. GetBitContext gb;
  383. lag_rac rac;
  384. const uint8_t *src_end = src + src_size;
  385. int ret;
  386. rac.avctx = l->avctx;
  387. l->zeros = 0;
  388. if(src_size < 2)
  389. return AVERROR_INVALIDDATA;
  390. esc_count = src[0];
  391. if (esc_count < 4) {
  392. length = width * height;
  393. if(src_size < 5)
  394. return AVERROR_INVALIDDATA;
  395. if (esc_count && AV_RL32(src + 1) < length) {
  396. length = AV_RL32(src + 1);
  397. offset += 4;
  398. }
  399. if ((ret = init_get_bits8(&gb, src + offset, src_size - offset)) < 0)
  400. return ret;
  401. if (lag_read_prob_header(&rac, &gb) < 0)
  402. return -1;
  403. ff_lag_rac_init(&rac, &gb, length - stride);
  404. for (i = 0; i < height; i++) {
  405. if (rac.overread > MAX_OVERREAD)
  406. return AVERROR_INVALIDDATA;
  407. read += lag_decode_line(l, &rac, dst + (i * stride), width,
  408. stride, esc_count);
  409. }
  410. if (read > length)
  411. av_log(l->avctx, AV_LOG_WARNING,
  412. "Output more bytes than length (%d of %"PRIu32")\n", read,
  413. length);
  414. } else if (esc_count < 8) {
  415. esc_count -= 4;
  416. src ++;
  417. src_size --;
  418. if (esc_count > 0) {
  419. /* Zero run coding only, no range coding. */
  420. for (i = 0; i < height; i++) {
  421. int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
  422. src_end, width, esc_count);
  423. if (res < 0)
  424. return res;
  425. src += res;
  426. }
  427. } else {
  428. if (src_size < width * height)
  429. return AVERROR_INVALIDDATA; // buffer not big enough
  430. /* Plane is stored uncompressed */
  431. for (i = 0; i < height; i++) {
  432. memcpy(dst + (i * stride), src, width);
  433. src += width;
  434. }
  435. }
  436. } else if (esc_count == 0xff) {
  437. /* Plane is a solid run of given value */
  438. for (i = 0; i < height; i++)
  439. memset(dst + i * stride, src[1], width);
  440. /* Do not apply prediction.
  441. Note: memset to 0 above, setting first value to src[1]
  442. and applying prediction gives the same result. */
  443. return 0;
  444. } else {
  445. av_log(l->avctx, AV_LOG_ERROR,
  446. "Invalid zero run escape code! (%#x)\n", esc_count);
  447. return -1;
  448. }
  449. if (l->avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
  450. for (i = 0; i < height; i++) {
  451. lag_pred_line(l, dst, width, stride, i);
  452. dst += stride;
  453. }
  454. } else {
  455. for (i = 0; i < height; i++) {
  456. lag_pred_line_yuy2(l, dst, width, stride, i,
  457. width == l->avctx->width);
  458. dst += stride;
  459. }
  460. }
  461. return 0;
  462. }
  463. /**
  464. * Decode a frame.
  465. * @param avctx codec context
  466. * @param data output AVFrame
  467. * @param data_size size of output data or 0 if no picture is returned
  468. * @param avpkt input packet
  469. * @return number of consumed bytes on success or negative if decode fails
  470. */
  471. static int lag_decode_frame(AVCodecContext *avctx,
  472. void *data, int *got_frame, AVPacket *avpkt)
  473. {
  474. const uint8_t *buf = avpkt->data;
  475. unsigned int buf_size = avpkt->size;
  476. LagarithContext *l = avctx->priv_data;
  477. ThreadFrame frame = { .f = data };
  478. AVFrame *const p = data;
  479. uint8_t frametype;
  480. uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
  481. uint32_t offs[4];
  482. uint8_t *srcs[4], *dst;
  483. int i, j, planes = 3;
  484. int ret;
  485. p->key_frame = 1;
  486. frametype = buf[0];
  487. offset_gu = AV_RL32(buf + 1);
  488. offset_bv = AV_RL32(buf + 5);
  489. switch (frametype) {
  490. case FRAME_SOLID_RGBA:
  491. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  492. case FRAME_SOLID_GRAY:
  493. if (frametype == FRAME_SOLID_GRAY)
  494. if (avctx->bits_per_coded_sample == 24) {
  495. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  496. } else {
  497. avctx->pix_fmt = AV_PIX_FMT_0RGB32;
  498. planes = 4;
  499. }
  500. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  501. return ret;
  502. dst = p->data[0];
  503. if (frametype == FRAME_SOLID_RGBA) {
  504. for (j = 0; j < avctx->height; j++) {
  505. for (i = 0; i < avctx->width; i++)
  506. AV_WN32(dst + i * 4, offset_gu);
  507. dst += p->linesize[0];
  508. }
  509. } else {
  510. for (j = 0; j < avctx->height; j++) {
  511. memset(dst, buf[1], avctx->width * planes);
  512. dst += p->linesize[0];
  513. }
  514. }
  515. break;
  516. case FRAME_SOLID_COLOR:
  517. if (avctx->bits_per_coded_sample == 24) {
  518. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  519. } else {
  520. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  521. offset_gu |= 0xFFU << 24;
  522. }
  523. if ((ret = ff_thread_get_buffer(avctx, &frame,0)) < 0)
  524. return ret;
  525. dst = p->data[0];
  526. for (j = 0; j < avctx->height; j++) {
  527. for (i = 0; i < avctx->width; i++)
  528. if (avctx->bits_per_coded_sample == 24) {
  529. AV_WB24(dst + i * 3, offset_gu);
  530. } else {
  531. AV_WN32(dst + i * 4, offset_gu);
  532. }
  533. dst += p->linesize[0];
  534. }
  535. break;
  536. case FRAME_ARITH_RGBA:
  537. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  538. planes = 4;
  539. offset_ry += 4;
  540. offs[3] = AV_RL32(buf + 9);
  541. case FRAME_ARITH_RGB24:
  542. case FRAME_U_RGB24:
  543. if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
  544. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  545. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  546. return ret;
  547. offs[0] = offset_bv;
  548. offs[1] = offset_gu;
  549. offs[2] = offset_ry;
  550. l->rgb_stride = FFALIGN(avctx->width, 16);
  551. av_fast_malloc(&l->rgb_planes, &l->rgb_planes_allocated,
  552. l->rgb_stride * avctx->height * planes + 1);
  553. if (!l->rgb_planes) {
  554. av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
  555. return AVERROR(ENOMEM);
  556. }
  557. for (i = 0; i < planes; i++)
  558. srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
  559. for (i = 0; i < planes; i++)
  560. if (buf_size <= offs[i]) {
  561. av_log(avctx, AV_LOG_ERROR,
  562. "Invalid frame offsets\n");
  563. return AVERROR_INVALIDDATA;
  564. }
  565. for (i = 0; i < planes; i++)
  566. lag_decode_arith_plane(l, srcs[i],
  567. avctx->width, avctx->height,
  568. -l->rgb_stride, buf + offs[i],
  569. buf_size - offs[i]);
  570. dst = p->data[0];
  571. for (i = 0; i < planes; i++)
  572. srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
  573. for (j = 0; j < avctx->height; j++) {
  574. for (i = 0; i < avctx->width; i++) {
  575. uint8_t r, g, b, a;
  576. r = srcs[0][i];
  577. g = srcs[1][i];
  578. b = srcs[2][i];
  579. r += g;
  580. b += g;
  581. if (frametype == FRAME_ARITH_RGBA) {
  582. a = srcs[3][i];
  583. AV_WN32(dst + i * 4, MKBETAG(a, r, g, b));
  584. } else {
  585. dst[i * 3 + 0] = r;
  586. dst[i * 3 + 1] = g;
  587. dst[i * 3 + 2] = b;
  588. }
  589. }
  590. dst += p->linesize[0];
  591. for (i = 0; i < planes; i++)
  592. srcs[i] += l->rgb_stride;
  593. }
  594. break;
  595. case FRAME_ARITH_YUY2:
  596. avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  597. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  598. return ret;
  599. if (offset_ry >= buf_size ||
  600. offset_gu >= buf_size ||
  601. offset_bv >= buf_size) {
  602. av_log(avctx, AV_LOG_ERROR,
  603. "Invalid frame offsets\n");
  604. return AVERROR_INVALIDDATA;
  605. }
  606. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  607. p->linesize[0], buf + offset_ry,
  608. buf_size - offset_ry);
  609. lag_decode_arith_plane(l, p->data[1], (avctx->width + 1) / 2,
  610. avctx->height, p->linesize[1],
  611. buf + offset_gu, buf_size - offset_gu);
  612. lag_decode_arith_plane(l, p->data[2], (avctx->width + 1) / 2,
  613. avctx->height, p->linesize[2],
  614. buf + offset_bv, buf_size - offset_bv);
  615. break;
  616. case FRAME_ARITH_YV12:
  617. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  618. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  619. return ret;
  620. if (buf_size <= offset_ry || buf_size <= offset_gu || buf_size <= offset_bv) {
  621. return AVERROR_INVALIDDATA;
  622. }
  623. if (offset_ry >= buf_size ||
  624. offset_gu >= buf_size ||
  625. offset_bv >= buf_size) {
  626. av_log(avctx, AV_LOG_ERROR,
  627. "Invalid frame offsets\n");
  628. return AVERROR_INVALIDDATA;
  629. }
  630. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  631. p->linesize[0], buf + offset_ry,
  632. buf_size - offset_ry);
  633. lag_decode_arith_plane(l, p->data[2], (avctx->width + 1) / 2,
  634. (avctx->height + 1) / 2, p->linesize[2],
  635. buf + offset_gu, buf_size - offset_gu);
  636. lag_decode_arith_plane(l, p->data[1], (avctx->width + 1) / 2,
  637. (avctx->height + 1) / 2, p->linesize[1],
  638. buf + offset_bv, buf_size - offset_bv);
  639. break;
  640. default:
  641. av_log(avctx, AV_LOG_ERROR,
  642. "Unsupported Lagarith frame type: %#"PRIx8"\n", frametype);
  643. return AVERROR_PATCHWELCOME;
  644. }
  645. *got_frame = 1;
  646. return buf_size;
  647. }
  648. static av_cold int lag_decode_init(AVCodecContext *avctx)
  649. {
  650. LagarithContext *l = avctx->priv_data;
  651. l->avctx = avctx;
  652. ff_llviddsp_init(&l->llviddsp);
  653. return 0;
  654. }
  655. #if HAVE_THREADS
  656. static av_cold int lag_decode_init_thread_copy(AVCodecContext *avctx)
  657. {
  658. LagarithContext *l = avctx->priv_data;
  659. l->avctx = avctx;
  660. return 0;
  661. }
  662. #endif
  663. static av_cold int lag_decode_end(AVCodecContext *avctx)
  664. {
  665. LagarithContext *l = avctx->priv_data;
  666. av_freep(&l->rgb_planes);
  667. return 0;
  668. }
  669. AVCodec ff_lagarith_decoder = {
  670. .name = "lagarith",
  671. .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
  672. .type = AVMEDIA_TYPE_VIDEO,
  673. .id = AV_CODEC_ID_LAGARITH,
  674. .priv_data_size = sizeof(LagarithContext),
  675. .init = lag_decode_init,
  676. .init_thread_copy = ONLY_IF_THREADS_ENABLED(lag_decode_init_thread_copy),
  677. .close = lag_decode_end,
  678. .decode = lag_decode_frame,
  679. .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
  680. };