You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

691 lines
20KB

  1. /*
  2. * Apple Pixlet decoder
  3. * Copyright (c) 2016 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdint.h>
  22. #include "libavutil/imgutils.h"
  23. #include "libavutil/intmath.h"
  24. #include "libavutil/opt.h"
  25. #include "avcodec.h"
  26. #include "bytestream.h"
  27. #include "get_bits.h"
  28. #include "unary.h"
  29. #include "internal.h"
  30. #include "thread.h"
  31. #define NB_LEVELS 4
  32. #define H 0
  33. #define V 1
  34. typedef struct SubBand {
  35. unsigned width, height;
  36. unsigned size;
  37. unsigned x, y;
  38. } SubBand;
  39. typedef struct PixletContext {
  40. AVClass *class;
  41. GetByteContext gb;
  42. GetBitContext gbit;
  43. int levels;
  44. int depth;
  45. int h, w;
  46. int16_t *filter[2];
  47. int16_t *prediction;
  48. int64_t scaling[4][2][NB_LEVELS];
  49. SubBand band[4][NB_LEVELS * 3 + 1];
  50. } PixletContext;
  51. static int init_decoder(AVCodecContext *avctx)
  52. {
  53. PixletContext *ctx = avctx->priv_data;
  54. int i, plane;
  55. ctx->filter[0] = av_malloc_array(ctx->h, sizeof(int16_t));
  56. ctx->filter[1] = av_malloc_array(FFMAX(ctx->h, ctx->w) + 16, sizeof(int16_t));
  57. ctx->prediction = av_malloc_array((ctx->w >> NB_LEVELS), sizeof(int16_t));
  58. if (!ctx->filter[0] || !ctx->filter[1] || !ctx->prediction)
  59. return AVERROR(ENOMEM);
  60. for (plane = 0; plane < 3; plane++) {
  61. unsigned shift = plane > 0;
  62. unsigned w = ctx->w >> shift;
  63. unsigned h = ctx->h >> shift;
  64. ctx->band[plane][0].width = w >> NB_LEVELS;
  65. ctx->band[plane][0].height = h >> NB_LEVELS;
  66. ctx->band[plane][0].size = (w >> NB_LEVELS) * (h >> NB_LEVELS);
  67. for (i = 0; i < NB_LEVELS * 3; i++) {
  68. unsigned scale = ctx->levels - (i / 3);
  69. ctx->band[plane][i + 1].width = w >> scale;
  70. ctx->band[plane][i + 1].height = h >> scale;
  71. ctx->band[plane][i + 1].size = (w >> scale) * (h >> scale);
  72. ctx->band[plane][i + 1].x = (w >> scale) * (((i + 1) % 3) != 2);
  73. ctx->band[plane][i + 1].y = (h >> scale) * (((i + 1) % 3) != 1);
  74. }
  75. }
  76. return 0;
  77. }
  78. static void free_buffers(AVCodecContext *avctx)
  79. {
  80. PixletContext *ctx = avctx->priv_data;
  81. av_freep(&ctx->filter[0]);
  82. av_freep(&ctx->filter[1]);
  83. av_freep(&ctx->prediction);
  84. }
  85. static av_cold int pixlet_close(AVCodecContext *avctx)
  86. {
  87. PixletContext *ctx = avctx->priv_data;
  88. free_buffers(avctx);
  89. ctx->w = 0;
  90. ctx->h = 0;
  91. return 0;
  92. }
  93. static av_cold int pixlet_init(AVCodecContext *avctx)
  94. {
  95. avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
  96. avctx->color_range = AVCOL_RANGE_JPEG;
  97. return 0;
  98. }
  99. static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size, int width, ptrdiff_t stride)
  100. {
  101. PixletContext *ctx = avctx->priv_data;
  102. GetBitContext *b = &ctx->gbit;
  103. unsigned cnt1, nbits, k, j = 0, i = 0;
  104. int64_t value, state = 3;
  105. int rlen, escape, flag = 0;
  106. while (i < size) {
  107. nbits = FFMIN(ff_clz((state >> 8) + 3) ^ 0x1F, 14);
  108. cnt1 = get_unary(b, 0, 8);
  109. if (cnt1 < 8) {
  110. value = show_bits(b, nbits);
  111. if (value <= 1) {
  112. skip_bits(b, nbits - 1);
  113. escape = ((1 << nbits) - 1) * cnt1;
  114. } else {
  115. skip_bits(b, nbits);
  116. escape = value + ((1 << nbits) - 1) * cnt1 - 1;
  117. }
  118. } else {
  119. escape = get_bits(b, 16);
  120. }
  121. value = -((escape + flag) & 1) | 1;
  122. dst[j++] = value * ((escape + flag + 1) >> 1);
  123. i++;
  124. if (j == width) {
  125. j = 0;
  126. dst += stride;
  127. }
  128. state = 120 * (escape + flag) + state - (120 * state >> 8);
  129. flag = 0;
  130. if (state * 4ULL > 0xFF || i >= size)
  131. continue;
  132. nbits = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
  133. escape = av_mod_uintp2(16383, nbits);
  134. cnt1 = get_unary(b, 0, 8);
  135. if (cnt1 > 7) {
  136. rlen = get_bits(b, 16);
  137. } else {
  138. value = show_bits(b, nbits);
  139. if (value > 1) {
  140. skip_bits(b, nbits);
  141. rlen = value + escape * cnt1 - 1;
  142. } else {
  143. skip_bits(b, nbits - 1);
  144. rlen = escape * cnt1;
  145. }
  146. }
  147. if (rlen > size - i)
  148. return AVERROR_INVALIDDATA;
  149. i += rlen;
  150. for (k = 0; k < rlen; k++) {
  151. dst[j++] = 0;
  152. if (j == width) {
  153. j = 0;
  154. dst += stride;
  155. }
  156. }
  157. state = 0;
  158. flag = rlen < 0xFFFF ? 1 : 0;
  159. }
  160. align_get_bits(b);
  161. return get_bits_count(b) >> 3;
  162. }
  163. static int read_high_coeffs(AVCodecContext *avctx, uint8_t *src, int16_t *dst, int size,
  164. int c, int a, int d,
  165. int width, ptrdiff_t stride)
  166. {
  167. PixletContext *ctx = avctx->priv_data;
  168. GetBitContext *b = &ctx->gbit;
  169. unsigned cnt1, shbits, rlen, nbits, length, i = 0, j = 0, k;
  170. int ret, escape, pfx, value, yflag, xflag, flag = 0;
  171. int64_t state = 3, tmp;
  172. if ((ret = init_get_bits8(b, src, bytestream2_get_bytes_left(&ctx->gb))) < 0)
  173. return ret;
  174. if (a ^ (a >> 31)) {
  175. nbits = 33 - ff_clz(a ^ (a >> 31));
  176. if (nbits > 16)
  177. return AVERROR_INVALIDDATA;
  178. } else {
  179. nbits = 1;
  180. }
  181. length = 25 - nbits;
  182. while (i < size) {
  183. if (((state >> 8) + 3) & 0xFFFFFFF) {
  184. value = ff_clz((state >> 8) + 3) ^ 0x1F;
  185. } else {
  186. value = -1;
  187. }
  188. cnt1 = get_unary(b, 0, length);
  189. if (cnt1 >= length) {
  190. cnt1 = get_bits(b, nbits);
  191. } else {
  192. pfx = 14 + ((((uint64_t)(value - 14)) >> 32) & (value - 14));
  193. if (pfx < 1 || pfx > 25)
  194. return AVERROR_INVALIDDATA;
  195. cnt1 *= (1 << pfx) - 1;
  196. shbits = show_bits(b, pfx);
  197. if (shbits <= 1) {
  198. skip_bits(b, pfx - 1);
  199. } else {
  200. skip_bits(b, pfx);
  201. cnt1 += shbits - 1;
  202. }
  203. }
  204. xflag = flag + cnt1;
  205. yflag = xflag;
  206. if (flag + cnt1 == 0) {
  207. value = 0;
  208. } else {
  209. xflag &= 1u;
  210. tmp = (int64_t)c * ((yflag + 1) >> 1) + (c >> 1);
  211. value = xflag + (tmp ^ -xflag);
  212. }
  213. i++;
  214. dst[j++] = value;
  215. if (j == width) {
  216. j = 0;
  217. dst += stride;
  218. }
  219. state += (int64_t)d * (uint64_t)yflag - ((int64_t)(d * (uint64_t)state) >> 8);
  220. flag = 0;
  221. if ((uint64_t)state > 0xFF / 4 || i >= size)
  222. continue;
  223. pfx = ((state + 8) >> 5) + (state ? ff_clz(state): 32) - 24;
  224. escape = av_mod_uintp2(16383, pfx);
  225. cnt1 = get_unary(b, 0, 8);
  226. if (cnt1 < 8) {
  227. if (pfx < 1 || pfx > 25)
  228. return AVERROR_INVALIDDATA;
  229. value = show_bits(b, pfx);
  230. if (value > 1) {
  231. skip_bits(b, pfx);
  232. rlen = value + escape * cnt1 - 1;
  233. } else {
  234. skip_bits(b, pfx - 1);
  235. rlen = escape * cnt1;
  236. }
  237. } else {
  238. if (get_bits1(b))
  239. value = get_bits(b, 16);
  240. else
  241. value = get_bits(b, 8);
  242. rlen = value + 8 * escape;
  243. }
  244. if (rlen > 0xFFFF || i + rlen > size)
  245. return AVERROR_INVALIDDATA;
  246. i += rlen;
  247. for (k = 0; k < rlen; k++) {
  248. dst[j++] = 0;
  249. if (j == width) {
  250. j = 0;
  251. dst += stride;
  252. }
  253. }
  254. state = 0;
  255. flag = rlen < 0xFFFF ? 1 : 0;
  256. }
  257. align_get_bits(b);
  258. return get_bits_count(b) >> 3;
  259. }
  260. static int read_highpass(AVCodecContext *avctx, uint8_t *ptr, int plane, AVFrame *frame)
  261. {
  262. PixletContext *ctx = avctx->priv_data;
  263. ptrdiff_t stride = frame->linesize[plane] / 2;
  264. int i, ret;
  265. for (i = 0; i < ctx->levels * 3; i++) {
  266. int32_t a = bytestream2_get_be32(&ctx->gb);
  267. int32_t b = bytestream2_get_be32(&ctx->gb);
  268. int32_t c = bytestream2_get_be32(&ctx->gb);
  269. int32_t d = bytestream2_get_be32(&ctx->gb);
  270. int16_t *dest = (int16_t *)frame->data[plane] + ctx->band[plane][i + 1].x +
  271. stride * ctx->band[plane][i + 1].y;
  272. unsigned size = ctx->band[plane][i + 1].size;
  273. uint32_t magic;
  274. magic = bytestream2_get_be32(&ctx->gb);
  275. if (magic != 0xDEADBEEF) {
  276. av_log(avctx, AV_LOG_ERROR, "wrong magic number: 0x%08"PRIX32
  277. " for plane %d, band %d\n", magic, plane, i);
  278. return AVERROR_INVALIDDATA;
  279. }
  280. if (a == INT32_MIN)
  281. return AVERROR_INVALIDDATA;
  282. ret = read_high_coeffs(avctx, ptr + bytestream2_tell(&ctx->gb), dest, size,
  283. c, (b >= FFABS(a)) ? b : a, d,
  284. ctx->band[plane][i + 1].width, stride);
  285. if (ret < 0) {
  286. av_log(avctx, AV_LOG_ERROR, "error in highpass coefficients for plane %d, band %d\n", plane, i);
  287. return ret;
  288. }
  289. bytestream2_skip(&ctx->gb, ret);
  290. }
  291. return 0;
  292. }
  293. static void lowpass_prediction(int16_t *dst, int16_t *pred, int width, int height, ptrdiff_t stride)
  294. {
  295. int16_t val;
  296. int i, j;
  297. memset(pred, 0, width * sizeof(*pred));
  298. for (i = 0; i < height; i++) {
  299. val = pred[0] + dst[0];
  300. dst[0] = pred[0] = val;
  301. for (j = 1; j < width; j++) {
  302. val = pred[j] + dst[j];
  303. dst[j] = pred[j] = val;
  304. dst[j] += dst[j-1];
  305. }
  306. dst += stride;
  307. }
  308. }
  309. static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
  310. {
  311. int16_t *low, *high, *ll, *lh, *hl, *hh;
  312. int hsize, i, j;
  313. int64_t value;
  314. hsize = size >> 1;
  315. low = tmp + 4;
  316. high = &low[hsize + 8];
  317. memcpy(low, dest, size);
  318. memcpy(high, dest + hsize, size);
  319. ll = &low[hsize];
  320. lh = &low[hsize];
  321. hl = &high[hsize];
  322. hh = hl;
  323. for (i = 4, j = 2; i; i--, j++, ll--, hh++, lh++, hl--) {
  324. low[i - 5] = low[j - 1];
  325. lh[0] = ll[-1];
  326. high[i - 5] = high[j - 2];
  327. hh[0] = hl[-2];
  328. }
  329. for (i = 0; i < hsize; i++) {
  330. value = (int64_t) low [i + 1] * -INT64_C(325392907) +
  331. (int64_t) low [i + 0] * INT64_C(3687786320) +
  332. (int64_t) low [i - 1] * -INT64_C(325392907) +
  333. (int64_t) high[i + 0] * INT64_C(1518500249) +
  334. (int64_t) high[i - 1] * INT64_C(1518500249);
  335. dest[i * 2] = av_clip_int16(((value >> 32) * scale) >> 32);
  336. }
  337. for (i = 0; i < hsize; i++) {
  338. value = (int64_t) low [i + 2] * -INT64_C(65078576) +
  339. (int64_t) low [i + 1] * INT64_C(1583578880) +
  340. (int64_t) low [i + 0] * INT64_C(1583578880) +
  341. (int64_t) low [i - 1] * -INT64_C(65078576) +
  342. (int64_t) high[i + 1] * INT64_C(303700064) +
  343. (int64_t) high[i + 0] * -INT64_C(3644400640) +
  344. (int64_t) high[i - 1] * INT64_C(303700064);
  345. dest[i * 2 + 1] = av_clip_int16(((value >> 32) * scale) >> 32);
  346. }
  347. }
  348. static void reconstruction(AVCodecContext *avctx,
  349. int16_t *dest, unsigned width, unsigned height, ptrdiff_t stride, int nb_levels,
  350. int64_t *scaling_H, int64_t *scaling_V)
  351. {
  352. PixletContext *ctx = avctx->priv_data;
  353. unsigned scaled_width, scaled_height;
  354. int64_t scale_H, scale_V;
  355. int16_t *ptr, *tmp;
  356. int i, j, k;
  357. scaled_height = height >> nb_levels;
  358. scaled_width = width >> nb_levels;
  359. tmp = ctx->filter[0];
  360. for (i = 0; i < nb_levels; i++) {
  361. scaled_width <<= 1;
  362. scaled_height <<= 1;
  363. scale_H = scaling_H[i];
  364. scale_V = scaling_V[i];
  365. ptr = dest;
  366. for (j = 0; j < scaled_height; j++) {
  367. filterfn(ptr, ctx->filter[1], scaled_width, scale_V);
  368. ptr += stride;
  369. }
  370. for (j = 0; j < scaled_width; j++) {
  371. ptr = dest + j;
  372. for (k = 0; k < scaled_height; k++) {
  373. tmp[k] = *ptr;
  374. ptr += stride;
  375. }
  376. filterfn(tmp, ctx->filter[1], scaled_height, scale_H);
  377. ptr = dest + j;
  378. for (k = 0; k < scaled_height; k++) {
  379. *ptr = tmp[k];
  380. ptr += stride;
  381. }
  382. }
  383. }
  384. }
  385. static void postprocess_luma(AVFrame *frame, int w, int h, int depth)
  386. {
  387. uint16_t *dsty = (uint16_t *)frame->data[0];
  388. int16_t *srcy = (int16_t *)frame->data[0];
  389. ptrdiff_t stridey = frame->linesize[0] / 2;
  390. int i, j;
  391. for (j = 0; j < h; j++) {
  392. for (i = 0; i < w; i++) {
  393. if (srcy[i] <= 0)
  394. dsty[i] = 0;
  395. else if (srcy[i] > ((1 << depth) - 1))
  396. dsty[i] = 65535;
  397. else
  398. dsty[i] = ((int64_t) srcy[i] * srcy[i] * 65535) /
  399. ((1 << depth) - 1) / ((1 << depth) - 1);
  400. }
  401. dsty += stridey;
  402. srcy += stridey;
  403. }
  404. }
  405. static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
  406. {
  407. uint16_t *dstu = (uint16_t *)frame->data[1];
  408. uint16_t *dstv = (uint16_t *)frame->data[2];
  409. int16_t *srcu = (int16_t *)frame->data[1];
  410. int16_t *srcv = (int16_t *)frame->data[2];
  411. ptrdiff_t strideu = frame->linesize[1] / 2;
  412. ptrdiff_t stridev = frame->linesize[2] / 2;
  413. const unsigned add = 1 << (depth - 1);
  414. const unsigned shift = 16 - depth;
  415. int i, j;
  416. for (j = 0; j < h; j++) {
  417. for (i = 0; i < w; i++) {
  418. dstu[i] = av_clip_uintp2_c(add + srcu[i], depth) << shift;
  419. dstv[i] = av_clip_uintp2_c(add + srcv[i], depth) << shift;
  420. }
  421. dstu += strideu;
  422. dstv += stridev;
  423. srcu += strideu;
  424. srcv += stridev;
  425. }
  426. }
  427. static int decode_plane(AVCodecContext *avctx, int plane, AVPacket *avpkt, AVFrame *frame)
  428. {
  429. PixletContext *ctx = avctx->priv_data;
  430. ptrdiff_t stride = frame->linesize[plane] / 2;
  431. unsigned shift = plane > 0;
  432. int16_t *dst;
  433. int i, ret;
  434. for (i = ctx->levels - 1; i >= 0; i--) {
  435. int32_t h = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
  436. int32_t v = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
  437. if (!h || !v)
  438. return AVERROR_INVALIDDATA;
  439. ctx->scaling[plane][H][i] = (1000000ULL << 32) / h;
  440. ctx->scaling[plane][V][i] = (1000000ULL << 32) / v;
  441. }
  442. bytestream2_skip(&ctx->gb, 4);
  443. dst = (int16_t *)frame->data[plane];
  444. dst[0] = sign_extend(bytestream2_get_be16(&ctx->gb), 16);
  445. if ((ret = init_get_bits8(&ctx->gbit, avpkt->data + bytestream2_tell(&ctx->gb),
  446. bytestream2_get_bytes_left(&ctx->gb))) < 0)
  447. return ret;
  448. ret = read_low_coeffs(avctx, dst + 1, ctx->band[plane][0].width - 1, ctx->band[plane][0].width - 1, 0);
  449. if (ret < 0) {
  450. av_log(avctx, AV_LOG_ERROR, "error in lowpass coefficients for plane %d, top row\n", plane);
  451. return ret;
  452. }
  453. ret = read_low_coeffs(avctx, dst + stride, ctx->band[plane][0].height - 1, 1, stride);
  454. if (ret < 0) {
  455. av_log(avctx, AV_LOG_ERROR, "error in lowpass coefficients for plane %d, left column\n", plane);
  456. return ret;
  457. }
  458. ret = read_low_coeffs(avctx, dst + stride + 1,
  459. (ctx->band[plane][0].width - 1) * (ctx->band[plane][0].height - 1),
  460. ctx->band[plane][0].width - 1, stride);
  461. if (ret < 0) {
  462. av_log(avctx, AV_LOG_ERROR, "error in lowpass coefficients for plane %d, rest\n", plane);
  463. return ret;
  464. }
  465. bytestream2_skip(&ctx->gb, ret);
  466. if (bytestream2_get_bytes_left(&ctx->gb) <= 0) {
  467. av_log(avctx, AV_LOG_ERROR, "no bytes left\n");
  468. return AVERROR_INVALIDDATA;
  469. }
  470. ret = read_highpass(avctx, avpkt->data, plane, frame);
  471. if (ret < 0)
  472. return ret;
  473. lowpass_prediction(dst, ctx->prediction,
  474. ctx->band[plane][0].width, ctx->band[plane][0].height, stride);
  475. reconstruction(avctx, (int16_t *)frame->data[plane], ctx->w >> shift, ctx->h >> shift,
  476. stride, NB_LEVELS, ctx->scaling[plane][H], ctx->scaling[plane][V]);
  477. return 0;
  478. }
  479. static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
  480. int *got_frame, AVPacket *avpkt)
  481. {
  482. PixletContext *ctx = avctx->priv_data;
  483. int i, w, h, width, height, ret, version;
  484. AVFrame *p = data;
  485. ThreadFrame frame = { .f = data };
  486. uint32_t pktsize;
  487. bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
  488. pktsize = bytestream2_get_be32(&ctx->gb);
  489. if (pktsize <= 44 || pktsize - 4 > bytestream2_get_bytes_left(&ctx->gb)) {
  490. av_log(avctx, AV_LOG_ERROR, "Invalid packet size %"PRIu32"\n", pktsize);
  491. return AVERROR_INVALIDDATA;
  492. }
  493. version = bytestream2_get_le32(&ctx->gb);
  494. if (version != 1)
  495. avpriv_request_sample(avctx, "Version %d", version);
  496. bytestream2_skip(&ctx->gb, 4);
  497. if (bytestream2_get_be32(&ctx->gb) != 1)
  498. return AVERROR_INVALIDDATA;
  499. bytestream2_skip(&ctx->gb, 4);
  500. width = bytestream2_get_be32(&ctx->gb);
  501. height = bytestream2_get_be32(&ctx->gb);
  502. if ( width > INT_MAX - (1U << (NB_LEVELS + 1))
  503. || height > INT_MAX - (1U << (NB_LEVELS + 1)))
  504. return AVERROR_INVALIDDATA;
  505. w = FFALIGN(width, 1 << (NB_LEVELS + 1));
  506. h = FFALIGN(height, 1 << (NB_LEVELS + 1));
  507. ctx->levels = bytestream2_get_be32(&ctx->gb);
  508. if (ctx->levels != NB_LEVELS)
  509. return AVERROR_INVALIDDATA;
  510. ctx->depth = bytestream2_get_be32(&ctx->gb);
  511. if (ctx->depth < 8 || ctx->depth > 15) {
  512. avpriv_request_sample(avctx, "Depth %d", ctx->depth);
  513. return AVERROR_INVALIDDATA;
  514. }
  515. ret = ff_set_dimensions(avctx, w, h);
  516. if (ret < 0)
  517. return ret;
  518. avctx->width = width;
  519. avctx->height = height;
  520. if (ctx->w != w || ctx->h != h) {
  521. free_buffers(avctx);
  522. ctx->w = w;
  523. ctx->h = h;
  524. ret = init_decoder(avctx);
  525. if (ret < 0) {
  526. free_buffers(avctx);
  527. ctx->w = 0;
  528. ctx->h = 0;
  529. return ret;
  530. }
  531. }
  532. bytestream2_skip(&ctx->gb, 8);
  533. p->pict_type = AV_PICTURE_TYPE_I;
  534. p->key_frame = 1;
  535. p->color_range = AVCOL_RANGE_JPEG;
  536. ret = ff_thread_get_buffer(avctx, &frame, 0);
  537. if (ret < 0)
  538. return ret;
  539. for (i = 0; i < 3; i++) {
  540. ret = decode_plane(avctx, i, avpkt, frame.f);
  541. if (ret < 0)
  542. return ret;
  543. if (avctx->flags & AV_CODEC_FLAG_GRAY)
  544. break;
  545. }
  546. postprocess_luma(frame.f, ctx->w, ctx->h, ctx->depth);
  547. postprocess_chroma(frame.f, ctx->w >> 1, ctx->h >> 1, ctx->depth);
  548. *got_frame = 1;
  549. return pktsize;
  550. }
  551. #if HAVE_THREADS
  552. static int pixlet_init_thread_copy(AVCodecContext *avctx)
  553. {
  554. PixletContext *ctx = avctx->priv_data;
  555. ctx->filter[0] = NULL;
  556. ctx->filter[1] = NULL;
  557. ctx->prediction = NULL;
  558. ctx->w = ctx->h = 0;
  559. return 0;
  560. }
  561. #endif
  562. AVCodec ff_pixlet_decoder = {
  563. .name = "pixlet",
  564. .long_name = NULL_IF_CONFIG_SMALL("Apple Pixlet"),
  565. .type = AVMEDIA_TYPE_VIDEO,
  566. .id = AV_CODEC_ID_PIXLET,
  567. .init = pixlet_init,
  568. .init_thread_copy = ONLY_IF_THREADS_ENABLED(pixlet_init_thread_copy),
  569. .close = pixlet_close,
  570. .decode = pixlet_decode_frame,
  571. .priv_data_size = sizeof(PixletContext),
  572. .capabilities = AV_CODEC_CAP_DR1 |
  573. AV_CODEC_CAP_FRAME_THREADS,
  574. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  575. FF_CODEC_CAP_INIT_CLEANUP,
  576. };