You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

692 lines
20KB

  1. /*
  2. * Apple Pixlet decoder
  3. * Copyright (c) 2016 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdint.h>
  22. #include "libavutil/imgutils.h"
  23. #include "libavutil/intmath.h"
  24. #include "libavutil/opt.h"
  25. #include "avcodec.h"
  26. #include "bytestream.h"
  27. #include "get_bits.h"
  28. #include "internal.h"
  29. #include "thread.h"
  30. #include "unary.h"
  31. #define NB_LEVELS 4
  32. #define PIXLET_MAGIC 0xDEADBEEF
  33. #define H 0
  34. #define V 1
  35. typedef struct SubBand {
  36. unsigned width, height;
  37. unsigned size;
  38. unsigned x, y;
  39. } SubBand;
  40. typedef struct PixletContext {
  41. AVClass *class;
  42. GetByteContext gb;
  43. GetBitContext bc;
  44. int levels;
  45. int depth;
  46. int w, h;
  47. int16_t *filter[2];
  48. int16_t *prediction;
  49. int64_t scaling[4][2][NB_LEVELS];
  50. SubBand band[4][NB_LEVELS * 3 + 1];
  51. } PixletContext;
  52. static av_cold int pixlet_init(AVCodecContext *avctx)
  53. {
  54. avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
  55. avctx->color_range = AVCOL_RANGE_JPEG;
  56. return 0;
  57. }
  58. static void free_buffers(AVCodecContext *avctx)
  59. {
  60. PixletContext *ctx = avctx->priv_data;
  61. av_freep(&ctx->filter[0]);
  62. av_freep(&ctx->filter[1]);
  63. av_freep(&ctx->prediction);
  64. }
  65. static av_cold int pixlet_close(AVCodecContext *avctx)
  66. {
  67. PixletContext *ctx = avctx->priv_data;
  68. free_buffers(avctx);
  69. ctx->w = 0;
  70. ctx->h = 0;
  71. return 0;
  72. }
  73. static int init_decoder(AVCodecContext *avctx)
  74. {
  75. PixletContext *ctx = avctx->priv_data;
  76. int i, plane;
  77. ctx->filter[0] = av_malloc_array(ctx->h, sizeof(int16_t));
  78. ctx->filter[1] = av_malloc_array(FFMAX(ctx->h, ctx->w) + 16, sizeof(int16_t));
  79. ctx->prediction = av_malloc_array((ctx->w >> NB_LEVELS), sizeof(int16_t));
  80. if (!ctx->filter[0] || !ctx->filter[1] || !ctx->prediction)
  81. return AVERROR(ENOMEM);
  82. for (plane = 0; plane < 3; plane++) {
  83. unsigned shift = plane > 0;
  84. unsigned w = ctx->w >> shift;
  85. unsigned h = ctx->h >> shift;
  86. ctx->band[plane][0].width = w >> NB_LEVELS;
  87. ctx->band[plane][0].height = h >> NB_LEVELS;
  88. ctx->band[plane][0].size = (w >> NB_LEVELS) * (h >> NB_LEVELS);
  89. for (i = 0; i < NB_LEVELS * 3; i++) {
  90. unsigned scale = ctx->levels - (i / 3);
  91. ctx->band[plane][i + 1].width = w >> scale;
  92. ctx->band[plane][i + 1].height = h >> scale;
  93. ctx->band[plane][i + 1].size = (w >> scale) * (h >> scale);
  94. ctx->band[plane][i + 1].x = (w >> scale) * (((i + 1) % 3) != 2);
  95. ctx->band[plane][i + 1].y = (h >> scale) * (((i + 1) % 3) != 1);
  96. }
  97. }
  98. return 0;
  99. }
  100. static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size,
  101. int width, ptrdiff_t stride)
  102. {
  103. PixletContext *ctx = avctx->priv_data;
  104. GetBitContext *bc = &ctx->bc;
  105. unsigned cnt1, nbits, k, j = 0, i = 0;
  106. int64_t value, state = 3;
  107. int rlen, escape, flag = 0;
  108. while (i < size) {
  109. nbits = FFMIN(ff_clz((state >> 8) + 3) ^ 0x1F, 14);
  110. cnt1 = get_unary(bc, 0, 8);
  111. if (cnt1 < 8) {
  112. value = show_bits(bc, nbits);
  113. if (value <= 1) {
  114. skip_bits(bc, nbits - 1);
  115. escape = ((1 << nbits) - 1) * cnt1;
  116. } else {
  117. skip_bits(bc, nbits);
  118. escape = value + ((1 << nbits) - 1) * cnt1 - 1;
  119. }
  120. } else {
  121. escape = get_bits(bc, 16);
  122. }
  123. value = -((escape + flag) & 1) | 1;
  124. dst[j++] = value * ((escape + flag + 1) >> 1);
  125. i++;
  126. if (j == width) {
  127. j = 0;
  128. dst += stride;
  129. }
  130. state = 120 * (escape + flag) + state - (120 * state >> 8);
  131. flag = 0;
  132. if (state * 4ULL > 0xFF || i >= size)
  133. continue;
  134. nbits = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
  135. escape = av_mod_uintp2(16383, nbits);
  136. cnt1 = get_unary(bc, 0, 8);
  137. if (cnt1 > 7) {
  138. rlen = get_bits(bc, 16);
  139. } else {
  140. value = show_bits(bc, nbits);
  141. if (value > 1) {
  142. skip_bits(bc, nbits);
  143. rlen = value + escape * cnt1 - 1;
  144. } else {
  145. skip_bits(bc, nbits - 1);
  146. rlen = escape * cnt1;
  147. }
  148. }
  149. if (rlen > size - i)
  150. return AVERROR_INVALIDDATA;
  151. i += rlen;
  152. for (k = 0; k < rlen; k++) {
  153. dst[j++] = 0;
  154. if (j == width) {
  155. j = 0;
  156. dst += stride;
  157. }
  158. }
  159. state = 0;
  160. flag = rlen < 0xFFFF ? 1 : 0;
  161. }
  162. align_get_bits(bc);
  163. return get_bits_count(bc) >> 3;
  164. }
  165. static int read_high_coeffs(AVCodecContext *avctx, uint8_t *src, int16_t *dst,
  166. int size, int c, int a, int d,
  167. int width, ptrdiff_t stride)
  168. {
  169. PixletContext *ctx = avctx->priv_data;
  170. GetBitContext *bc = &ctx->bc;
  171. unsigned cnt1, shbits, rlen, nbits, length, i = 0, j = 0, k;
  172. int ret, escape, pfx, value, yflag, xflag, flag = 0;
  173. int64_t state = 3, tmp;
  174. ret = init_get_bits8(bc, src, bytestream2_get_bytes_left(&ctx->gb));
  175. if (ret < 0)
  176. return ret;
  177. if (a ^ (a >> 31)) {
  178. nbits = 33 - ff_clz(a ^ (a >> 31));
  179. if (nbits > 16)
  180. return AVERROR_INVALIDDATA;
  181. } else {
  182. nbits = 1;
  183. }
  184. length = 25 - nbits;
  185. while (i < size) {
  186. if (((state >> 8) + 3) & 0xFFFFFFF)
  187. value = ff_clz((state >> 8) + 3) ^ 0x1F;
  188. else
  189. value = -1;
  190. cnt1 = get_unary(bc, 0, length);
  191. if (cnt1 >= length) {
  192. cnt1 = get_bits(bc, nbits);
  193. } else {
  194. pfx = 14 + ((((uint64_t)(value - 14)) >> 32) & (value - 14));
  195. if (pfx < 1 || pfx > 25)
  196. return AVERROR_INVALIDDATA;
  197. cnt1 *= (1 << pfx) - 1;
  198. shbits = show_bits(bc, pfx);
  199. if (shbits <= 1) {
  200. skip_bits(bc, pfx - 1);
  201. } else {
  202. skip_bits(bc, pfx);
  203. cnt1 += shbits - 1;
  204. }
  205. }
  206. xflag = flag + cnt1;
  207. yflag = xflag;
  208. if (flag + cnt1 == 0) {
  209. value = 0;
  210. } else {
  211. xflag &= 1u;
  212. tmp = (int64_t)c * ((yflag + 1) >> 1) + (c >> 1);
  213. value = xflag + (tmp ^ -xflag);
  214. }
  215. i++;
  216. dst[j++] = value;
  217. if (j == width) {
  218. j = 0;
  219. dst += stride;
  220. }
  221. state += (int64_t)d * (uint64_t)yflag - ((int64_t)(d * (uint64_t)state) >> 8);
  222. flag = 0;
  223. if ((uint64_t)state > 0xFF / 4 || i >= size)
  224. continue;
  225. pfx = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
  226. escape = av_mod_uintp2(16383, pfx);
  227. cnt1 = get_unary(bc, 0, 8);
  228. if (cnt1 < 8) {
  229. if (pfx < 1 || pfx > 25)
  230. return AVERROR_INVALIDDATA;
  231. value = show_bits(bc, pfx);
  232. if (value > 1) {
  233. skip_bits(bc, pfx);
  234. rlen = value + escape * cnt1 - 1;
  235. } else {
  236. skip_bits(bc, pfx - 1);
  237. rlen = escape * cnt1;
  238. }
  239. } else {
  240. if (get_bits1(bc))
  241. value = get_bits(bc, 16);
  242. else
  243. value = get_bits(bc, 8);
  244. rlen = value + 8 * escape;
  245. }
  246. if (rlen > 0xFFFF || i + rlen > size)
  247. return AVERROR_INVALIDDATA;
  248. i += rlen;
  249. for (k = 0; k < rlen; k++) {
  250. dst[j++] = 0;
  251. if (j == width) {
  252. j = 0;
  253. dst += stride;
  254. }
  255. }
  256. state = 0;
  257. flag = rlen < 0xFFFF ? 1 : 0;
  258. }
  259. align_get_bits(bc);
  260. return get_bits_count(bc) >> 3;
  261. }
  262. static int read_highpass(AVCodecContext *avctx, uint8_t *ptr,
  263. int plane, AVFrame *frame)
  264. {
  265. PixletContext *ctx = avctx->priv_data;
  266. ptrdiff_t stride = frame->linesize[plane] / 2;
  267. int i, ret;
  268. for (i = 0; i < ctx->levels * 3; i++) {
  269. int32_t a = bytestream2_get_be32(&ctx->gb);
  270. int32_t b = bytestream2_get_be32(&ctx->gb);
  271. int32_t c = bytestream2_get_be32(&ctx->gb);
  272. int32_t d = bytestream2_get_be32(&ctx->gb);
  273. int16_t *dest = (int16_t *)frame->data[plane] +
  274. ctx->band[plane][i + 1].x +
  275. ctx->band[plane][i + 1].y * stride;
  276. unsigned size = ctx->band[plane][i + 1].size;
  277. uint32_t magic = bytestream2_get_be32(&ctx->gb);
  278. if (magic != PIXLET_MAGIC) {
  279. av_log(avctx, AV_LOG_ERROR,
  280. "wrong magic number: 0x%08"PRIX32" for plane %d, band %d\n",
  281. magic, plane, i);
  282. return AVERROR_INVALIDDATA;
  283. }
  284. if (a == INT32_MIN)
  285. return AVERROR_INVALIDDATA;
  286. ret = read_high_coeffs(avctx, ptr + bytestream2_tell(&ctx->gb), dest, size,
  287. c, (b >= FFABS(a)) ? b : a, d,
  288. ctx->band[plane][i + 1].width, stride);
  289. if (ret < 0) {
  290. av_log(avctx, AV_LOG_ERROR,
  291. "error in highpass coefficients for plane %d, band %d\n",
  292. plane, i);
  293. return ret;
  294. }
  295. bytestream2_skip(&ctx->gb, ret);
  296. }
  297. return 0;
  298. }
  299. static void lowpass_prediction(int16_t *dst, int16_t *pred,
  300. int width, int height, ptrdiff_t stride)
  301. {
  302. int16_t val;
  303. int i, j;
  304. memset(pred, 0, width * sizeof(*pred));
  305. for (i = 0; i < height; i++) {
  306. val = pred[0] + dst[0];
  307. dst[0] = pred[0] = val;
  308. for (j = 1; j < width; j++) {
  309. val = pred[j] + dst[j];
  310. dst[j] = pred[j] = val;
  311. dst[j] += dst[j-1];
  312. }
  313. dst += stride;
  314. }
  315. }
  316. static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
  317. {
  318. int16_t *low, *high, *ll, *lh, *hl, *hh;
  319. int hsize, i, j;
  320. int64_t value;
  321. hsize = size >> 1;
  322. low = tmp + 4;
  323. high = &low[hsize + 8];
  324. memcpy(low, dest, size);
  325. memcpy(high, dest + hsize, size);
  326. ll = &low[hsize];
  327. lh = &low[hsize];
  328. hl = &high[hsize];
  329. hh = hl;
  330. for (i = 4, j = 2; i; i--, j++, ll--, hh++, lh++, hl--) {
  331. low[i - 5] = low[j - 1];
  332. lh[0] = ll[-1];
  333. high[i - 5] = high[j - 2];
  334. hh[0] = hl[-2];
  335. }
  336. for (i = 0; i < hsize; i++) {
  337. value = (int64_t) low [i + 1] * -INT64_C(325392907) +
  338. (int64_t) low [i + 0] * INT64_C(3687786320) +
  339. (int64_t) low [i - 1] * -INT64_C(325392907) +
  340. (int64_t) high[i + 0] * INT64_C(1518500249) +
  341. (int64_t) high[i - 1] * INT64_C(1518500249);
  342. dest[i * 2] = av_clip_int16(((value >> 32) * scale) >> 32);
  343. }
  344. for (i = 0; i < hsize; i++) {
  345. value = (int64_t) low [i + 2] * -INT64_C(65078576) +
  346. (int64_t) low [i + 1] * INT64_C(1583578880) +
  347. (int64_t) low [i + 0] * INT64_C(1583578880) +
  348. (int64_t) low [i - 1] * -INT64_C(65078576) +
  349. (int64_t) high[i + 1] * INT64_C(303700064) +
  350. (int64_t) high[i + 0] * -INT64_C(3644400640) +
  351. (int64_t) high[i - 1] * INT64_C(303700064);
  352. dest[i * 2 + 1] = av_clip_int16(((value >> 32) * scale) >> 32);
  353. }
  354. }
  355. static void reconstruction(AVCodecContext *avctx, int16_t *dest,
  356. unsigned width, unsigned height, ptrdiff_t stride,
  357. int64_t *scaling_h, int64_t *scaling_v)
  358. {
  359. PixletContext *ctx = avctx->priv_data;
  360. unsigned scaled_width, scaled_height;
  361. int16_t *ptr, *tmp;
  362. int i, j, k;
  363. scaled_width = width >> NB_LEVELS;
  364. scaled_height = height >> NB_LEVELS;
  365. tmp = ctx->filter[0];
  366. for (i = 0; i < NB_LEVELS; i++) {
  367. int64_t scale_v = scaling_v[i];
  368. int64_t scale_h = scaling_h[i];
  369. scaled_width <<= 1;
  370. scaled_height <<= 1;
  371. ptr = dest;
  372. for (j = 0; j < scaled_height; j++) {
  373. filterfn(ptr, ctx->filter[1], scaled_width, scale_v);
  374. ptr += stride;
  375. }
  376. for (j = 0; j < scaled_width; j++) {
  377. ptr = dest + j;
  378. for (k = 0; k < scaled_height; k++) {
  379. tmp[k] = *ptr;
  380. ptr += stride;
  381. }
  382. filterfn(tmp, ctx->filter[1], scaled_height, scale_h);
  383. ptr = dest + j;
  384. for (k = 0; k < scaled_height; k++) {
  385. *ptr = tmp[k];
  386. ptr += stride;
  387. }
  388. }
  389. }
  390. }
  391. static void postprocess_luma(AVFrame *frame, int w, int h, int depth)
  392. {
  393. uint16_t *dsty = (uint16_t *)frame->data[0];
  394. int16_t *srcy = (int16_t *)frame->data[0];
  395. ptrdiff_t stridey = frame->linesize[0] / 2;
  396. int i, j;
  397. for (j = 0; j < h; j++) {
  398. for (i = 0; i < w; i++) {
  399. if (srcy[i] <= 0)
  400. dsty[i] = 0;
  401. else if (srcy[i] > ((1 << depth) - 1))
  402. dsty[i] = 65535;
  403. else
  404. dsty[i] = ((int64_t) srcy[i] * srcy[i] * 65535) /
  405. ((1 << depth) - 1) / ((1 << depth) - 1);
  406. }
  407. dsty += stridey;
  408. srcy += stridey;
  409. }
  410. }
  411. static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
  412. {
  413. uint16_t *dstu = (uint16_t *)frame->data[1];
  414. uint16_t *dstv = (uint16_t *)frame->data[2];
  415. int16_t *srcu = (int16_t *)frame->data[1];
  416. int16_t *srcv = (int16_t *)frame->data[2];
  417. ptrdiff_t strideu = frame->linesize[1] / 2;
  418. ptrdiff_t stridev = frame->linesize[2] / 2;
  419. const unsigned add = 1 << (depth - 1);
  420. const unsigned shift = 16 - depth;
  421. int i, j;
  422. for (j = 0; j < h; j++) {
  423. for (i = 0; i < w; i++) {
  424. dstu[i] = av_clip_uintp2_c(add + srcu[i], depth) << shift;
  425. dstv[i] = av_clip_uintp2_c(add + srcv[i], depth) << shift;
  426. }
  427. dstu += strideu;
  428. dstv += stridev;
  429. srcu += strideu;
  430. srcv += stridev;
  431. }
  432. }
  433. static int decode_plane(AVCodecContext *avctx, int plane,
  434. AVPacket *avpkt, AVFrame *frame)
  435. {
  436. PixletContext *ctx = avctx->priv_data;
  437. ptrdiff_t stride = frame->linesize[plane] / 2;
  438. unsigned shift = plane > 0;
  439. int16_t *dst;
  440. int i, ret;
  441. for (i = ctx->levels - 1; i >= 0; i--) {
  442. int32_t h = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
  443. int32_t v = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
  444. if (!h || !v)
  445. return AVERROR_INVALIDDATA;
  446. ctx->scaling[plane][H][i] = (1000000ULL << 32) / h;
  447. ctx->scaling[plane][V][i] = (1000000ULL << 32) / v;
  448. }
  449. bytestream2_skip(&ctx->gb, 4);
  450. dst = (int16_t *)frame->data[plane];
  451. dst[0] = sign_extend(bytestream2_get_be16(&ctx->gb), 16);
  452. ret = init_get_bits8(&ctx->bc, avpkt->data + bytestream2_tell(&ctx->gb),
  453. bytestream2_get_bytes_left(&ctx->gb));
  454. if (ret < 0)
  455. return ret;
  456. ret = read_low_coeffs(avctx, dst + 1, ctx->band[plane][0].width - 1,
  457. ctx->band[plane][0].width - 1, 0);
  458. if (ret < 0) {
  459. av_log(avctx, AV_LOG_ERROR,
  460. "error in lowpass coefficients for plane %d, top row\n", plane);
  461. return ret;
  462. }
  463. ret = read_low_coeffs(avctx, dst + stride,
  464. ctx->band[plane][0].height - 1, 1, stride);
  465. if (ret < 0) {
  466. av_log(avctx, AV_LOG_ERROR,
  467. "error in lowpass coefficients for plane %d, left column\n",
  468. plane);
  469. return ret;
  470. }
  471. ret = read_low_coeffs(avctx, dst + stride + 1,
  472. (ctx->band[plane][0].width - 1) * (ctx->band[plane][0].height - 1),
  473. ctx->band[plane][0].width - 1, stride);
  474. if (ret < 0) {
  475. av_log(avctx, AV_LOG_ERROR,
  476. "error in lowpass coefficients for plane %d, rest\n", plane);
  477. return ret;
  478. }
  479. bytestream2_skip(&ctx->gb, ret);
  480. if (bytestream2_get_bytes_left(&ctx->gb) <= 0) {
  481. av_log(avctx, AV_LOG_ERROR, "no bytes left\n");
  482. return AVERROR_INVALIDDATA;
  483. }
  484. ret = read_highpass(avctx, avpkt->data, plane, frame);
  485. if (ret < 0)
  486. return ret;
  487. lowpass_prediction(dst, ctx->prediction, ctx->band[plane][0].width,
  488. ctx->band[plane][0].height, stride);
  489. reconstruction(avctx, (int16_t *)frame->data[plane], ctx->w >> shift,
  490. ctx->h >> shift, stride, ctx->scaling[plane][H],
  491. ctx->scaling[plane][V]);
  492. return 0;
  493. }
  494. static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
  495. int *got_frame, AVPacket *avpkt)
  496. {
  497. PixletContext *ctx = avctx->priv_data;
  498. int i, w, h, width, height, ret, version;
  499. AVFrame *p = data;
  500. ThreadFrame frame = { .f = data };
  501. uint32_t pktsize;
  502. bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
  503. pktsize = bytestream2_get_be32(&ctx->gb);
  504. if (pktsize <= 44 || pktsize - 4 > bytestream2_get_bytes_left(&ctx->gb)) {
  505. av_log(avctx, AV_LOG_ERROR, "Invalid packet size %"PRIu32"\n", pktsize);
  506. return AVERROR_INVALIDDATA;
  507. }
  508. version = bytestream2_get_le32(&ctx->gb);
  509. if (version != 1)
  510. avpriv_request_sample(avctx, "Version %d", version);
  511. bytestream2_skip(&ctx->gb, 4);
  512. if (bytestream2_get_be32(&ctx->gb) != 1)
  513. return AVERROR_INVALIDDATA;
  514. bytestream2_skip(&ctx->gb, 4);
  515. width = bytestream2_get_be32(&ctx->gb);
  516. height = bytestream2_get_be32(&ctx->gb);
  517. if ( width > INT_MAX - (1U << (NB_LEVELS + 1))
  518. || height > INT_MAX - (1U << (NB_LEVELS + 1)))
  519. return AVERROR_INVALIDDATA;
  520. w = FFALIGN(width, 1 << (NB_LEVELS + 1));
  521. h = FFALIGN(height, 1 << (NB_LEVELS + 1));
  522. ctx->levels = bytestream2_get_be32(&ctx->gb);
  523. if (ctx->levels != NB_LEVELS)
  524. return AVERROR_INVALIDDATA;
  525. ctx->depth = bytestream2_get_be32(&ctx->gb);
  526. if (ctx->depth < 8 || ctx->depth > 15) {
  527. avpriv_request_sample(avctx, "Depth %d", ctx->depth);
  528. return AVERROR_INVALIDDATA;
  529. }
  530. ret = ff_set_dimensions(avctx, w, h);
  531. if (ret < 0)
  532. return ret;
  533. avctx->width = width;
  534. avctx->height = height;
  535. if (ctx->w != w || ctx->h != h) {
  536. free_buffers(avctx);
  537. ctx->w = w;
  538. ctx->h = h;
  539. ret = init_decoder(avctx);
  540. if (ret < 0) {
  541. free_buffers(avctx);
  542. ctx->w = 0;
  543. ctx->h = 0;
  544. return ret;
  545. }
  546. }
  547. bytestream2_skip(&ctx->gb, 8);
  548. p->pict_type = AV_PICTURE_TYPE_I;
  549. p->key_frame = 1;
  550. p->color_range = AVCOL_RANGE_JPEG;
  551. ret = ff_thread_get_buffer(avctx, &frame, 0);
  552. if (ret < 0)
  553. return ret;
  554. for (i = 0; i < 3; i++) {
  555. ret = decode_plane(avctx, i, avpkt, frame.f);
  556. if (ret < 0)
  557. return ret;
  558. if (avctx->flags & AV_CODEC_FLAG_GRAY)
  559. break;
  560. }
  561. postprocess_luma(frame.f, ctx->w, ctx->h, ctx->depth);
  562. postprocess_chroma(frame.f, ctx->w >> 1, ctx->h >> 1, ctx->depth);
  563. *got_frame = 1;
  564. return pktsize;
  565. }
  566. AVCodec ff_pixlet_decoder = {
  567. .name = "pixlet",
  568. .long_name = NULL_IF_CONFIG_SMALL("Apple Pixlet"),
  569. .type = AVMEDIA_TYPE_VIDEO,
  570. .id = AV_CODEC_ID_PIXLET,
  571. .init = pixlet_init,
  572. .close = pixlet_close,
  573. .decode = pixlet_decode_frame,
  574. .priv_data_size = sizeof(PixletContext),
  575. .capabilities = AV_CODEC_CAP_DR1 |
  576. AV_CODEC_CAP_FRAME_THREADS,
  577. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  578. FF_CODEC_CAP_INIT_CLEANUP,
  579. };