You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

745 lines
23KB

  1. /*
  2. * Lagarith lossless decoder
  3. * Copyright (c) 2009 Nathan Caldwell <saintdev (at) gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Lagarith lossless decoder
  24. * @author Nathan Caldwell
  25. */
  26. #include "avcodec.h"
  27. #include "get_bits.h"
  28. #include "mathops.h"
  29. #include "dsputil.h"
  30. #include "lagarithrac.h"
  31. #include "thread.h"
  32. enum LagarithFrameType {
  33. FRAME_RAW = 1, /**< uncompressed */
  34. FRAME_U_RGB24 = 2, /**< unaligned RGB24 */
  35. FRAME_ARITH_YUY2 = 3, /**< arithmetic coded YUY2 */
  36. FRAME_ARITH_RGB24 = 4, /**< arithmetic coded RGB24 */
  37. FRAME_SOLID_GRAY = 5, /**< solid grayscale color frame */
  38. FRAME_SOLID_COLOR = 6, /**< solid non-grayscale color frame */
  39. FRAME_OLD_ARITH_RGB = 7, /**< obsolete arithmetic coded RGB (no longer encoded by upstream since version 1.1.0) */
  40. FRAME_ARITH_RGBA = 8, /**< arithmetic coded RGBA */
  41. FRAME_SOLID_RGBA = 9, /**< solid RGBA color frame */
  42. FRAME_ARITH_YV12 = 10, /**< arithmetic coded YV12 */
  43. FRAME_REDUCED_RES = 11, /**< reduced resolution YV12 frame */
  44. };
  45. typedef struct LagarithContext {
  46. AVCodecContext *avctx;
  47. DSPContext dsp;
  48. int zeros; /**< number of consecutive zero bytes encountered */
  49. int zeros_rem; /**< number of zero bytes remaining to output */
  50. uint8_t *rgb_planes;
  51. int rgb_stride;
  52. } LagarithContext;
  53. /**
  54. * Compute the 52bit mantissa of 1/(double)denom.
  55. * This crazy format uses floats in an entropy coder and we have to match x86
  56. * rounding exactly, thus ordinary floats aren't portable enough.
  57. * @param denom denominator
  58. * @return 52bit mantissa
  59. * @see softfloat_mul
  60. */
  61. static uint64_t softfloat_reciprocal(uint32_t denom)
  62. {
  63. int shift = av_log2(denom - 1) + 1;
  64. uint64_t ret = (1ULL << 52) / denom;
  65. uint64_t err = (1ULL << 52) - ret * denom;
  66. ret <<= shift;
  67. err <<= shift;
  68. err += denom / 2;
  69. return ret + err / denom;
  70. }
  71. /**
  72. * (uint32_t)(x*f), where f has the given mantissa, and exponent 0
  73. * Used in combination with softfloat_reciprocal computes x/(double)denom.
  74. * @param x 32bit integer factor
  75. * @param mantissa mantissa of f with exponent 0
  76. * @return 32bit integer value (x*f)
  77. * @see softfloat_reciprocal
  78. */
  79. static uint32_t softfloat_mul(uint32_t x, uint64_t mantissa)
  80. {
  81. uint64_t l = x * (mantissa & 0xffffffff);
  82. uint64_t h = x * (mantissa >> 32);
  83. h += l >> 32;
  84. l &= 0xffffffff;
  85. l += 1 << av_log2(h >> 21);
  86. h += l >> 32;
  87. return h >> 20;
  88. }
  89. static uint8_t lag_calc_zero_run(int8_t x)
  90. {
  91. return (x << 1) ^ (x >> 7);
  92. }
  93. static int lag_decode_prob(GetBitContext *gb, uint32_t *value)
  94. {
  95. static const uint8_t series[] = { 1, 2, 3, 5, 8, 13, 21 };
  96. int i;
  97. int bit = 0;
  98. int bits = 0;
  99. int prevbit = 0;
  100. unsigned val;
  101. for (i = 0; i < 7; i++) {
  102. if (prevbit && bit)
  103. break;
  104. prevbit = bit;
  105. bit = get_bits1(gb);
  106. if (bit && !prevbit)
  107. bits += series[i];
  108. }
  109. bits--;
  110. if (bits < 0 || bits > 31) {
  111. *value = 0;
  112. return -1;
  113. } else if (bits == 0) {
  114. *value = 0;
  115. return 0;
  116. }
  117. val = get_bits_long(gb, bits);
  118. val |= 1 << bits;
  119. *value = val - 1;
  120. return 0;
  121. }
  122. static int lag_read_prob_header(lag_rac *rac, GetBitContext *gb)
  123. {
  124. int i, j, scale_factor;
  125. unsigned prob, cumulative_target;
  126. unsigned cumul_prob = 0;
  127. unsigned scaled_cumul_prob = 0;
  128. rac->prob[0] = 0;
  129. rac->prob[257] = UINT_MAX;
  130. /* Read probabilities from bitstream */
  131. for (i = 1; i < 257; i++) {
  132. if (lag_decode_prob(gb, &rac->prob[i]) < 0) {
  133. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability encountered.\n");
  134. return -1;
  135. }
  136. if ((uint64_t)cumul_prob + rac->prob[i] > UINT_MAX) {
  137. av_log(rac->avctx, AV_LOG_ERROR, "Integer overflow encountered in cumulative probability calculation.\n");
  138. return -1;
  139. }
  140. cumul_prob += rac->prob[i];
  141. if (!rac->prob[i]) {
  142. if (lag_decode_prob(gb, &prob)) {
  143. av_log(rac->avctx, AV_LOG_ERROR, "Invalid probability run encountered.\n");
  144. return -1;
  145. }
  146. if (prob > 256 - i)
  147. prob = 256 - i;
  148. for (j = 0; j < prob; j++)
  149. rac->prob[++i] = 0;
  150. }
  151. }
  152. if (!cumul_prob) {
  153. av_log(rac->avctx, AV_LOG_ERROR, "All probabilities are 0!\n");
  154. return -1;
  155. }
  156. /* Scale probabilities so cumulative probability is an even power of 2. */
  157. scale_factor = av_log2(cumul_prob);
  158. if (cumul_prob & (cumul_prob - 1)) {
  159. uint64_t mul = softfloat_reciprocal(cumul_prob);
  160. for (i = 1; i <= 128; i++) {
  161. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  162. scaled_cumul_prob += rac->prob[i];
  163. }
  164. if (scaled_cumul_prob <= 0) {
  165. av_log(rac->avctx, AV_LOG_ERROR, "Scaled probabilities invalid\n");
  166. return AVERROR_INVALIDDATA;
  167. }
  168. for (; i < 257; i++) {
  169. rac->prob[i] = softfloat_mul(rac->prob[i], mul);
  170. scaled_cumul_prob += rac->prob[i];
  171. }
  172. scale_factor++;
  173. cumulative_target = 1 << scale_factor;
  174. if (scaled_cumul_prob > cumulative_target) {
  175. av_log(rac->avctx, AV_LOG_ERROR,
  176. "Scaled probabilities are larger than target!\n");
  177. return -1;
  178. }
  179. scaled_cumul_prob = cumulative_target - scaled_cumul_prob;
  180. for (i = 1; scaled_cumul_prob; i = (i & 0x7f) + 1) {
  181. if (rac->prob[i]) {
  182. rac->prob[i]++;
  183. scaled_cumul_prob--;
  184. }
  185. /* Comment from reference source:
  186. * if (b & 0x80 == 0) { // order of operations is 'wrong'; it has been left this way
  187. * // since the compression change is negligible and fixing it
  188. * // breaks backwards compatibility
  189. * b =- (signed int)b;
  190. * b &= 0xFF;
  191. * } else {
  192. * b++;
  193. * b &= 0x7f;
  194. * }
  195. */
  196. }
  197. }
  198. rac->scale = scale_factor;
  199. /* Fill probability array with cumulative probability for each symbol. */
  200. for (i = 1; i < 257; i++)
  201. rac->prob[i] += rac->prob[i - 1];
  202. return 0;
  203. }
  204. static void add_lag_median_prediction(uint8_t *dst, uint8_t *src1,
  205. uint8_t *diff, int w, int *left,
  206. int *left_top)
  207. {
  208. /* This is almost identical to add_hfyu_median_prediction in dsputil.h.
  209. * However the &0xFF on the gradient predictor yealds incorrect output
  210. * for lagarith.
  211. */
  212. int i;
  213. uint8_t l, lt;
  214. l = *left;
  215. lt = *left_top;
  216. for (i = 0; i < w; i++) {
  217. l = mid_pred(l, src1[i], l + src1[i] - lt) + diff[i];
  218. lt = src1[i];
  219. dst[i] = l;
  220. }
  221. *left = l;
  222. *left_top = lt;
  223. }
  224. static void lag_pred_line(LagarithContext *l, uint8_t *buf,
  225. int width, int stride, int line)
  226. {
  227. int L, TL;
  228. if (!line) {
  229. /* Left prediction only for first line */
  230. L = l->dsp.add_hfyu_left_prediction(buf, buf,
  231. width, 0);
  232. } else {
  233. /* Left pixel is actually prev_row[width] */
  234. L = buf[width - stride - 1];
  235. if (line == 1) {
  236. /* Second line, left predict first pixel, the rest of the line is median predicted
  237. * NOTE: In the case of RGB this pixel is top predicted */
  238. TL = l->avctx->pix_fmt == AV_PIX_FMT_YUV420P ? buf[-stride] : L;
  239. } else {
  240. /* Top left is 2 rows back, last pixel */
  241. TL = buf[width - (2 * stride) - 1];
  242. }
  243. add_lag_median_prediction(buf, buf - stride, buf,
  244. width, &L, &TL);
  245. }
  246. }
  247. static void lag_pred_line_yuy2(LagarithContext *l, uint8_t *buf,
  248. int width, int stride, int line,
  249. int is_luma)
  250. {
  251. int L, TL;
  252. if (!line) {
  253. L= buf[0];
  254. if (is_luma)
  255. buf[0] = 0;
  256. l->dsp.add_hfyu_left_prediction(buf, buf, width, 0);
  257. if (is_luma)
  258. buf[0] = L;
  259. return;
  260. }
  261. if (line == 1) {
  262. const int HEAD = is_luma ? 4 : 2;
  263. int i;
  264. L = buf[width - stride - 1];
  265. TL = buf[HEAD - stride - 1];
  266. for (i = 0; i < HEAD; i++) {
  267. L += buf[i];
  268. buf[i] = L;
  269. }
  270. for (; i<width; i++) {
  271. L = mid_pred(L&0xFF, buf[i-stride], (L + buf[i-stride] - TL)&0xFF) + buf[i];
  272. TL = buf[i-stride];
  273. buf[i]= L;
  274. }
  275. } else {
  276. TL = buf[width - (2 * stride) - 1];
  277. L = buf[width - stride - 1];
  278. l->dsp.add_hfyu_median_prediction(buf, buf - stride, buf, width,
  279. &L, &TL);
  280. }
  281. }
  282. static int lag_decode_line(LagarithContext *l, lag_rac *rac,
  283. uint8_t *dst, int width, int stride,
  284. int esc_count)
  285. {
  286. int i = 0;
  287. int ret = 0;
  288. if (!esc_count)
  289. esc_count = -1;
  290. /* Output any zeros remaining from the previous run */
  291. handle_zeros:
  292. if (l->zeros_rem) {
  293. int count = FFMIN(l->zeros_rem, width - i);
  294. memset(dst + i, 0, count);
  295. i += count;
  296. l->zeros_rem -= count;
  297. }
  298. while (i < width) {
  299. dst[i] = lag_get_rac(rac);
  300. ret++;
  301. if (dst[i])
  302. l->zeros = 0;
  303. else
  304. l->zeros++;
  305. i++;
  306. if (l->zeros == esc_count) {
  307. int index = lag_get_rac(rac);
  308. ret++;
  309. l->zeros = 0;
  310. l->zeros_rem = lag_calc_zero_run(index);
  311. goto handle_zeros;
  312. }
  313. }
  314. return ret;
  315. }
  316. static int lag_decode_zero_run_line(LagarithContext *l, uint8_t *dst,
  317. const uint8_t *src, const uint8_t *src_end,
  318. int width, int esc_count)
  319. {
  320. int i = 0;
  321. int count;
  322. uint8_t zero_run = 0;
  323. const uint8_t *src_start = src;
  324. uint8_t mask1 = -(esc_count < 2);
  325. uint8_t mask2 = -(esc_count < 3);
  326. uint8_t *end = dst + (width - 2);
  327. output_zeros:
  328. if (l->zeros_rem) {
  329. count = FFMIN(l->zeros_rem, width - i);
  330. if (end - dst < count) {
  331. av_log(l->avctx, AV_LOG_ERROR, "Too many zeros remaining.\n");
  332. return AVERROR_INVALIDDATA;
  333. }
  334. memset(dst, 0, count);
  335. l->zeros_rem -= count;
  336. dst += count;
  337. }
  338. while (dst < end) {
  339. i = 0;
  340. while (!zero_run && dst + i < end) {
  341. i++;
  342. if (i+2 >= src_end - src)
  343. return AVERROR_INVALIDDATA;
  344. zero_run =
  345. !(src[i] | (src[i + 1] & mask1) | (src[i + 2] & mask2));
  346. }
  347. if (zero_run) {
  348. zero_run = 0;
  349. i += esc_count;
  350. memcpy(dst, src, i);
  351. dst += i;
  352. l->zeros_rem = lag_calc_zero_run(src[i]);
  353. src += i + 1;
  354. goto output_zeros;
  355. } else {
  356. memcpy(dst, src, i);
  357. src += i;
  358. dst += i;
  359. }
  360. }
  361. return src - src_start;
  362. }
  363. static int lag_decode_arith_plane(LagarithContext *l, uint8_t *dst,
  364. int width, int height, int stride,
  365. const uint8_t *src, int src_size)
  366. {
  367. int i = 0;
  368. int read = 0;
  369. uint32_t length;
  370. uint32_t offset = 1;
  371. int esc_count;
  372. GetBitContext gb;
  373. lag_rac rac;
  374. const uint8_t *src_end = src + src_size;
  375. rac.avctx = l->avctx;
  376. l->zeros = 0;
  377. if(src_size < 2)
  378. return AVERROR_INVALIDDATA;
  379. esc_count = src[0];
  380. if (esc_count < 4) {
  381. length = width * height;
  382. if(src_size < 5)
  383. return AVERROR_INVALIDDATA;
  384. if (esc_count && AV_RL32(src + 1) < length) {
  385. length = AV_RL32(src + 1);
  386. offset += 4;
  387. }
  388. init_get_bits(&gb, src + offset, src_size * 8);
  389. if (lag_read_prob_header(&rac, &gb) < 0)
  390. return -1;
  391. ff_lag_rac_init(&rac, &gb, length - stride);
  392. for (i = 0; i < height; i++)
  393. read += lag_decode_line(l, &rac, dst + (i * stride), width,
  394. stride, esc_count);
  395. if (read > length)
  396. av_log(l->avctx, AV_LOG_WARNING,
  397. "Output more bytes than length (%d of %d)\n", read,
  398. length);
  399. } else if (esc_count < 8) {
  400. esc_count -= 4;
  401. if (esc_count > 0) {
  402. /* Zero run coding only, no range coding. */
  403. for (i = 0; i < height; i++) {
  404. int res = lag_decode_zero_run_line(l, dst + (i * stride), src,
  405. src_end, width, esc_count);
  406. if (res < 0)
  407. return res;
  408. src += res;
  409. }
  410. } else {
  411. if (src_size < width * height)
  412. return AVERROR_INVALIDDATA; // buffer not big enough
  413. /* Plane is stored uncompressed */
  414. for (i = 0; i < height; i++) {
  415. memcpy(dst + (i * stride), src, width);
  416. src += width;
  417. }
  418. }
  419. } else if (esc_count == 0xff) {
  420. /* Plane is a solid run of given value */
  421. for (i = 0; i < height; i++)
  422. memset(dst + i * stride, src[1], width);
  423. /* Do not apply prediction.
  424. Note: memset to 0 above, setting first value to src[1]
  425. and applying prediction gives the same result. */
  426. return 0;
  427. } else {
  428. av_log(l->avctx, AV_LOG_ERROR,
  429. "Invalid zero run escape code! (%#x)\n", esc_count);
  430. return -1;
  431. }
  432. if (l->avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
  433. for (i = 0; i < height; i++) {
  434. lag_pred_line(l, dst, width, stride, i);
  435. dst += stride;
  436. }
  437. } else {
  438. for (i = 0; i < height; i++) {
  439. lag_pred_line_yuy2(l, dst, width, stride, i,
  440. width == l->avctx->width);
  441. dst += stride;
  442. }
  443. }
  444. return 0;
  445. }
  446. /**
  447. * Decode a frame.
  448. * @param avctx codec context
  449. * @param data output AVFrame
  450. * @param data_size size of output data or 0 if no picture is returned
  451. * @param avpkt input packet
  452. * @return number of consumed bytes on success or negative if decode fails
  453. */
  454. static int lag_decode_frame(AVCodecContext *avctx,
  455. void *data, int *got_frame, AVPacket *avpkt)
  456. {
  457. const uint8_t *buf = avpkt->data;
  458. unsigned int buf_size = avpkt->size;
  459. LagarithContext *l = avctx->priv_data;
  460. ThreadFrame frame = { .f = data };
  461. AVFrame *const p = data;
  462. uint8_t frametype = 0;
  463. uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
  464. uint32_t offs[4];
  465. uint8_t *srcs[4], *dst;
  466. int i, j, planes = 3;
  467. int ret;
  468. p->key_frame = 1;
  469. frametype = buf[0];
  470. offset_gu = AV_RL32(buf + 1);
  471. offset_bv = AV_RL32(buf + 5);
  472. switch (frametype) {
  473. case FRAME_SOLID_RGBA:
  474. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  475. case FRAME_SOLID_GRAY:
  476. if (frametype == FRAME_SOLID_GRAY)
  477. if (avctx->bits_per_coded_sample == 24) {
  478. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  479. } else {
  480. avctx->pix_fmt = AV_PIX_FMT_0RGB32;
  481. planes = 4;
  482. }
  483. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  484. return ret;
  485. dst = p->data[0];
  486. if (frametype == FRAME_SOLID_RGBA) {
  487. for (j = 0; j < avctx->height; j++) {
  488. for (i = 0; i < avctx->width; i++)
  489. AV_WN32(dst + i * 4, offset_gu);
  490. dst += p->linesize[0];
  491. }
  492. } else {
  493. for (j = 0; j < avctx->height; j++) {
  494. memset(dst, buf[1], avctx->width * planes);
  495. dst += p->linesize[0];
  496. }
  497. }
  498. break;
  499. case FRAME_SOLID_COLOR:
  500. if (avctx->bits_per_coded_sample == 24) {
  501. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  502. } else {
  503. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  504. offset_gu |= 0xFFU << 24;
  505. }
  506. if ((ret = ff_thread_get_buffer(avctx, &frame,0)) < 0)
  507. return ret;
  508. dst = p->data[0];
  509. for (j = 0; j < avctx->height; j++) {
  510. for (i = 0; i < avctx->width; i++)
  511. if (avctx->bits_per_coded_sample == 24) {
  512. AV_WB24(dst + i * 3, offset_gu);
  513. } else {
  514. AV_WN32(dst + i * 4, offset_gu);
  515. }
  516. dst += p->linesize[0];
  517. }
  518. break;
  519. case FRAME_ARITH_RGBA:
  520. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  521. planes = 4;
  522. offset_ry += 4;
  523. offs[3] = AV_RL32(buf + 9);
  524. case FRAME_ARITH_RGB24:
  525. case FRAME_U_RGB24:
  526. if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
  527. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  528. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  529. return ret;
  530. offs[0] = offset_bv;
  531. offs[1] = offset_gu;
  532. offs[2] = offset_ry;
  533. if (!l->rgb_planes) {
  534. l->rgb_stride = FFALIGN(avctx->width, 16);
  535. l->rgb_planes = av_malloc(l->rgb_stride * avctx->height * 4 + 16);
  536. if (!l->rgb_planes) {
  537. av_log(avctx, AV_LOG_ERROR, "cannot allocate temporary buffer\n");
  538. return AVERROR(ENOMEM);
  539. }
  540. }
  541. for (i = 0; i < planes; i++)
  542. srcs[i] = l->rgb_planes + (i + 1) * l->rgb_stride * avctx->height - l->rgb_stride;
  543. for (i = 0; i < planes; i++)
  544. if (buf_size <= offs[i]) {
  545. av_log(avctx, AV_LOG_ERROR,
  546. "Invalid frame offsets\n");
  547. return AVERROR_INVALIDDATA;
  548. }
  549. for (i = 0; i < planes; i++)
  550. lag_decode_arith_plane(l, srcs[i],
  551. avctx->width, avctx->height,
  552. -l->rgb_stride, buf + offs[i],
  553. buf_size - offs[i]);
  554. dst = p->data[0];
  555. for (i = 0; i < planes; i++)
  556. srcs[i] = l->rgb_planes + i * l->rgb_stride * avctx->height;
  557. for (j = 0; j < avctx->height; j++) {
  558. for (i = 0; i < avctx->width; i++) {
  559. uint8_t r, g, b, a;
  560. r = srcs[0][i];
  561. g = srcs[1][i];
  562. b = srcs[2][i];
  563. r += g;
  564. b += g;
  565. if (frametype == FRAME_ARITH_RGBA) {
  566. a = srcs[3][i];
  567. AV_WN32(dst + i * 4, MKBETAG(a, r, g, b));
  568. } else {
  569. dst[i * 3 + 0] = r;
  570. dst[i * 3 + 1] = g;
  571. dst[i * 3 + 2] = b;
  572. }
  573. }
  574. dst += p->linesize[0];
  575. for (i = 0; i < planes; i++)
  576. srcs[i] += l->rgb_stride;
  577. }
  578. break;
  579. case FRAME_ARITH_YUY2:
  580. avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  581. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  582. return ret;
  583. if (offset_ry >= buf_size ||
  584. offset_gu >= buf_size ||
  585. offset_bv >= buf_size) {
  586. av_log(avctx, AV_LOG_ERROR,
  587. "Invalid frame offsets\n");
  588. return AVERROR_INVALIDDATA;
  589. }
  590. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  591. p->linesize[0], buf + offset_ry,
  592. buf_size - offset_ry);
  593. lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
  594. avctx->height, p->linesize[1],
  595. buf + offset_gu, buf_size - offset_gu);
  596. lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
  597. avctx->height, p->linesize[2],
  598. buf + offset_bv, buf_size - offset_bv);
  599. break;
  600. case FRAME_ARITH_YV12:
  601. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  602. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  603. return ret;
  604. if (buf_size <= offset_ry || buf_size <= offset_gu || buf_size <= offset_bv) {
  605. return AVERROR_INVALIDDATA;
  606. }
  607. if (offset_ry >= buf_size ||
  608. offset_gu >= buf_size ||
  609. offset_bv >= buf_size) {
  610. av_log(avctx, AV_LOG_ERROR,
  611. "Invalid frame offsets\n");
  612. return AVERROR_INVALIDDATA;
  613. }
  614. lag_decode_arith_plane(l, p->data[0], avctx->width, avctx->height,
  615. p->linesize[0], buf + offset_ry,
  616. buf_size - offset_ry);
  617. lag_decode_arith_plane(l, p->data[2], avctx->width / 2,
  618. avctx->height / 2, p->linesize[2],
  619. buf + offset_gu, buf_size - offset_gu);
  620. lag_decode_arith_plane(l, p->data[1], avctx->width / 2,
  621. avctx->height / 2, p->linesize[1],
  622. buf + offset_bv, buf_size - offset_bv);
  623. break;
  624. default:
  625. av_log(avctx, AV_LOG_ERROR,
  626. "Unsupported Lagarith frame type: %#x\n", frametype);
  627. return AVERROR_PATCHWELCOME;
  628. }
  629. *got_frame = 1;
  630. return buf_size;
  631. }
  632. static av_cold int lag_decode_init(AVCodecContext *avctx)
  633. {
  634. LagarithContext *l = avctx->priv_data;
  635. l->avctx = avctx;
  636. ff_dsputil_init(&l->dsp, avctx);
  637. return 0;
  638. }
  639. static av_cold int lag_decode_end(AVCodecContext *avctx)
  640. {
  641. LagarithContext *l = avctx->priv_data;
  642. av_freep(&l->rgb_planes);
  643. return 0;
  644. }
  645. AVCodec ff_lagarith_decoder = {
  646. .name = "lagarith",
  647. .type = AVMEDIA_TYPE_VIDEO,
  648. .id = AV_CODEC_ID_LAGARITH,
  649. .priv_data_size = sizeof(LagarithContext),
  650. .init = lag_decode_init,
  651. .close = lag_decode_end,
  652. .decode = lag_decode_frame,
  653. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  654. .long_name = NULL_IF_CONFIG_SMALL("Lagarith lossless"),
  655. };