You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

710 lines
21KB

  1. /*
  2. * Apple Pixlet decoder
  3. * Copyright (c) 2016 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdint.h>
  22. #include "libavutil/imgutils.h"
  23. #include "libavutil/intmath.h"
  24. #include "libavutil/opt.h"
  25. #include "avcodec.h"
  26. #include "bytestream.h"
  27. #include "get_bits.h"
  28. #include "internal.h"
  29. #include "thread.h"
  30. #include "unary.h"
  31. #define NB_LEVELS 4
  32. #define PIXLET_MAGIC 0xDEADBEEF
  33. #define H 0
  34. #define V 1
  35. typedef struct SubBand {
  36. unsigned width, height;
  37. unsigned size;
  38. unsigned x, y;
  39. } SubBand;
  40. typedef struct PixletContext {
  41. AVClass *class;
  42. GetByteContext gb;
  43. GetBitContext bc;
  44. int levels;
  45. int depth;
  46. int w, h;
  47. int16_t *filter[2];
  48. int16_t *prediction;
  49. int64_t scaling[4][2][NB_LEVELS];
  50. uint16_t lut[65536];
  51. SubBand band[4][NB_LEVELS * 3 + 1];
  52. } PixletContext;
  53. static av_cold int pixlet_init(AVCodecContext *avctx)
  54. {
  55. avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
  56. avctx->color_range = AVCOL_RANGE_JPEG;
  57. return 0;
  58. }
  59. static void free_buffers(AVCodecContext *avctx)
  60. {
  61. PixletContext *ctx = avctx->priv_data;
  62. av_freep(&ctx->filter[0]);
  63. av_freep(&ctx->filter[1]);
  64. av_freep(&ctx->prediction);
  65. }
  66. static av_cold int pixlet_close(AVCodecContext *avctx)
  67. {
  68. PixletContext *ctx = avctx->priv_data;
  69. free_buffers(avctx);
  70. ctx->w = 0;
  71. ctx->h = 0;
  72. return 0;
  73. }
  74. static int init_decoder(AVCodecContext *avctx)
  75. {
  76. PixletContext *ctx = avctx->priv_data;
  77. int i, plane;
  78. ctx->filter[0] = av_malloc_array(ctx->h, sizeof(int16_t));
  79. ctx->filter[1] = av_malloc_array(FFMAX(ctx->h, ctx->w) + 16, sizeof(int16_t));
  80. ctx->prediction = av_malloc_array((ctx->w >> NB_LEVELS), sizeof(int16_t));
  81. if (!ctx->filter[0] || !ctx->filter[1] || !ctx->prediction)
  82. return AVERROR(ENOMEM);
  83. for (plane = 0; plane < 3; plane++) {
  84. unsigned shift = plane > 0;
  85. unsigned w = ctx->w >> shift;
  86. unsigned h = ctx->h >> shift;
  87. ctx->band[plane][0].width = w >> NB_LEVELS;
  88. ctx->band[plane][0].height = h >> NB_LEVELS;
  89. ctx->band[plane][0].size = (w >> NB_LEVELS) * (h >> NB_LEVELS);
  90. for (i = 0; i < NB_LEVELS * 3; i++) {
  91. unsigned scale = ctx->levels - (i / 3);
  92. ctx->band[plane][i + 1].width = w >> scale;
  93. ctx->band[plane][i + 1].height = h >> scale;
  94. ctx->band[plane][i + 1].size = (w >> scale) * (h >> scale);
  95. ctx->band[plane][i + 1].x = (w >> scale) * (((i + 1) % 3) != 2);
  96. ctx->band[plane][i + 1].y = (h >> scale) * (((i + 1) % 3) != 1);
  97. }
  98. }
  99. return 0;
  100. }
  101. static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, int size,
  102. int width, ptrdiff_t stride)
  103. {
  104. PixletContext *ctx = avctx->priv_data;
  105. GetBitContext *bc = &ctx->bc;
  106. unsigned cnt1, nbits, k, j = 0, i = 0;
  107. int64_t value, state = 3;
  108. int rlen, escape, flag = 0;
  109. while (i < size) {
  110. nbits = FFMIN(ff_clz((state >> 8) + 3) ^ 0x1F, 14);
  111. cnt1 = get_unary(bc, 0, 8);
  112. if (cnt1 < 8) {
  113. value = show_bits(bc, nbits);
  114. if (value <= 1) {
  115. skip_bits(bc, nbits - 1);
  116. escape = ((1 << nbits) - 1) * cnt1;
  117. } else {
  118. skip_bits(bc, nbits);
  119. escape = value + ((1 << nbits) - 1) * cnt1 - 1;
  120. }
  121. } else {
  122. escape = get_bits(bc, 16);
  123. }
  124. value = -((escape + flag) & 1) | 1;
  125. dst[j++] = value * ((escape + flag + 1) >> 1);
  126. i++;
  127. if (j == width) {
  128. j = 0;
  129. dst += stride;
  130. }
  131. state = 120 * (escape + flag) + state - (120 * state >> 8);
  132. flag = 0;
  133. if (state * 4ULL > 0xFF || i >= size)
  134. continue;
  135. nbits = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
  136. escape = av_mod_uintp2(16383, nbits);
  137. cnt1 = get_unary(bc, 0, 8);
  138. if (cnt1 > 7) {
  139. rlen = get_bits(bc, 16);
  140. } else {
  141. value = show_bits(bc, nbits);
  142. if (value > 1) {
  143. skip_bits(bc, nbits);
  144. rlen = value + escape * cnt1 - 1;
  145. } else {
  146. skip_bits(bc, nbits - 1);
  147. rlen = escape * cnt1;
  148. }
  149. }
  150. if (rlen > size - i)
  151. return AVERROR_INVALIDDATA;
  152. i += rlen;
  153. for (k = 0; k < rlen; k++) {
  154. dst[j++] = 0;
  155. if (j == width) {
  156. j = 0;
  157. dst += stride;
  158. }
  159. }
  160. state = 0;
  161. flag = rlen < 0xFFFF ? 1 : 0;
  162. }
  163. align_get_bits(bc);
  164. return get_bits_count(bc) >> 3;
  165. }
  166. static int read_high_coeffs(AVCodecContext *avctx, uint8_t *src, int16_t *dst,
  167. int size, int c, int a, int d,
  168. int width, ptrdiff_t stride)
  169. {
  170. PixletContext *ctx = avctx->priv_data;
  171. GetBitContext *bc = &ctx->bc;
  172. unsigned cnt1, shbits, rlen, nbits, length, i = 0, j = 0, k;
  173. int ret, escape, pfx, value, yflag, xflag, flag = 0;
  174. int64_t state = 3, tmp;
  175. ret = init_get_bits8(bc, src, bytestream2_get_bytes_left(&ctx->gb));
  176. if (ret < 0)
  177. return ret;
  178. if (a ^ (a >> 31)) {
  179. nbits = 33 - ff_clz(a ^ (a >> 31));
  180. if (nbits > 16)
  181. return AVERROR_INVALIDDATA;
  182. } else {
  183. nbits = 1;
  184. }
  185. length = 25 - nbits;
  186. while (i < size) {
  187. if (((state >> 8) + 3) & 0xFFFFFFF)
  188. value = ff_clz((state >> 8) + 3) ^ 0x1F;
  189. else
  190. value = -1;
  191. cnt1 = get_unary(bc, 0, length);
  192. if (cnt1 >= length) {
  193. cnt1 = get_bits(bc, nbits);
  194. } else {
  195. pfx = 14 + ((((uint64_t)(value - 14)) >> 32) & (value - 14));
  196. if (pfx < 1 || pfx > 25)
  197. return AVERROR_INVALIDDATA;
  198. cnt1 *= (1 << pfx) - 1;
  199. shbits = show_bits(bc, pfx);
  200. if (shbits <= 1) {
  201. skip_bits(bc, pfx - 1);
  202. } else {
  203. skip_bits(bc, pfx);
  204. cnt1 += shbits - 1;
  205. }
  206. }
  207. xflag = flag + cnt1;
  208. yflag = xflag;
  209. if (flag + cnt1 == 0) {
  210. value = 0;
  211. } else {
  212. xflag &= 1u;
  213. tmp = (int64_t)c * ((yflag + 1) >> 1) + (c >> 1);
  214. value = xflag + (tmp ^ -xflag);
  215. }
  216. i++;
  217. dst[j++] = value;
  218. if (j == width) {
  219. j = 0;
  220. dst += stride;
  221. }
  222. state += (int64_t)d * (uint64_t)yflag - ((int64_t)(d * (uint64_t)state) >> 8);
  223. flag = 0;
  224. if ((uint64_t)state > 0xFF / 4 || i >= size)
  225. continue;
  226. pfx = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
  227. escape = av_mod_uintp2(16383, pfx);
  228. cnt1 = get_unary(bc, 0, 8);
  229. if (cnt1 < 8) {
  230. if (pfx < 1 || pfx > 25)
  231. return AVERROR_INVALIDDATA;
  232. value = show_bits(bc, pfx);
  233. if (value > 1) {
  234. skip_bits(bc, pfx);
  235. rlen = value + escape * cnt1 - 1;
  236. } else {
  237. skip_bits(bc, pfx - 1);
  238. rlen = escape * cnt1;
  239. }
  240. } else {
  241. if (get_bits1(bc))
  242. value = get_bits(bc, 16);
  243. else
  244. value = get_bits(bc, 8);
  245. rlen = value + 8 * escape;
  246. }
  247. if (rlen > 0xFFFF || i + rlen > size)
  248. return AVERROR_INVALIDDATA;
  249. i += rlen;
  250. for (k = 0; k < rlen; k++) {
  251. dst[j++] = 0;
  252. if (j == width) {
  253. j = 0;
  254. dst += stride;
  255. }
  256. }
  257. state = 0;
  258. flag = rlen < 0xFFFF ? 1 : 0;
  259. }
  260. align_get_bits(bc);
  261. return get_bits_count(bc) >> 3;
  262. }
  263. static int read_highpass(AVCodecContext *avctx, uint8_t *ptr,
  264. int plane, AVFrame *frame)
  265. {
  266. PixletContext *ctx = avctx->priv_data;
  267. ptrdiff_t stride = frame->linesize[plane] / 2;
  268. int i, ret;
  269. for (i = 0; i < ctx->levels * 3; i++) {
  270. int32_t a = bytestream2_get_be32(&ctx->gb);
  271. int32_t b = bytestream2_get_be32(&ctx->gb);
  272. int32_t c = bytestream2_get_be32(&ctx->gb);
  273. int32_t d = bytestream2_get_be32(&ctx->gb);
  274. int16_t *dest = (int16_t *)frame->data[plane] +
  275. ctx->band[plane][i + 1].x +
  276. ctx->band[plane][i + 1].y * stride;
  277. unsigned size = ctx->band[plane][i + 1].size;
  278. uint32_t magic = bytestream2_get_be32(&ctx->gb);
  279. if (magic != PIXLET_MAGIC) {
  280. av_log(avctx, AV_LOG_ERROR,
  281. "wrong magic number: 0x%08"PRIX32" for plane %d, band %d\n",
  282. magic, plane, i);
  283. return AVERROR_INVALIDDATA;
  284. }
  285. if (a == INT32_MIN)
  286. return AVERROR_INVALIDDATA;
  287. ret = read_high_coeffs(avctx, ptr + bytestream2_tell(&ctx->gb), dest, size,
  288. c, (b >= FFABS(a)) ? b : a, d,
  289. ctx->band[plane][i + 1].width, stride);
  290. if (ret < 0) {
  291. av_log(avctx, AV_LOG_ERROR,
  292. "error in highpass coefficients for plane %d, band %d\n",
  293. plane, i);
  294. return ret;
  295. }
  296. bytestream2_skip(&ctx->gb, ret);
  297. }
  298. return 0;
  299. }
  300. static void lowpass_prediction(int16_t *dst, int16_t *pred,
  301. int width, int height, ptrdiff_t stride)
  302. {
  303. int16_t val;
  304. int i, j;
  305. memset(pred, 0, width * sizeof(*pred));
  306. for (i = 0; i < height; i++) {
  307. val = pred[0] + dst[0];
  308. dst[0] = pred[0] = val;
  309. for (j = 1; j < width; j++) {
  310. val = pred[j] + dst[j];
  311. dst[j] = pred[j] = val;
  312. dst[j] += dst[j-1];
  313. }
  314. dst += stride;
  315. }
  316. }
  317. static void filterfn(int16_t *dest, int16_t *tmp, unsigned size, int64_t scale)
  318. {
  319. int16_t *low, *high, *ll, *lh, *hl, *hh;
  320. int hsize, i, j;
  321. int64_t value;
  322. hsize = size >> 1;
  323. low = tmp + 4;
  324. high = &low[hsize + 8];
  325. memcpy(low, dest, size);
  326. memcpy(high, dest + hsize, size);
  327. ll = &low[hsize];
  328. lh = &low[hsize];
  329. hl = &high[hsize];
  330. hh = hl;
  331. for (i = 4, j = 2; i; i--, j++, ll--, hh++, lh++, hl--) {
  332. low[i - 5] = low[j - 1];
  333. lh[0] = ll[-1];
  334. high[i - 5] = high[j - 2];
  335. hh[0] = hl[-2];
  336. }
  337. for (i = 0; i < hsize; i++) {
  338. value = (int64_t) low [i + 1] * -INT64_C(325392907) +
  339. (int64_t) low [i + 0] * INT64_C(3687786320) +
  340. (int64_t) low [i - 1] * -INT64_C(325392907) +
  341. (int64_t) high[i + 0] * INT64_C(1518500249) +
  342. (int64_t) high[i - 1] * INT64_C(1518500249);
  343. dest[i * 2] = av_clip_int16(((value >> 32) * scale) >> 32);
  344. }
  345. for (i = 0; i < hsize; i++) {
  346. value = (int64_t) low [i + 2] * -INT64_C(65078576) +
  347. (int64_t) low [i + 1] * INT64_C(1583578880) +
  348. (int64_t) low [i + 0] * INT64_C(1583578880) +
  349. (int64_t) low [i - 1] * -INT64_C(65078576) +
  350. (int64_t) high[i + 1] * INT64_C(303700064) +
  351. (int64_t) high[i + 0] * -INT64_C(3644400640) +
  352. (int64_t) high[i - 1] * INT64_C(303700064);
  353. dest[i * 2 + 1] = av_clip_int16(((value >> 32) * scale) >> 32);
  354. }
  355. }
  356. static void reconstruction(AVCodecContext *avctx, int16_t *dest,
  357. unsigned width, unsigned height, ptrdiff_t stride,
  358. int64_t *scaling_h, int64_t *scaling_v)
  359. {
  360. PixletContext *ctx = avctx->priv_data;
  361. unsigned scaled_width, scaled_height;
  362. int16_t *ptr, *tmp;
  363. int i, j, k;
  364. scaled_width = width >> NB_LEVELS;
  365. scaled_height = height >> NB_LEVELS;
  366. tmp = ctx->filter[0];
  367. for (i = 0; i < NB_LEVELS; i++) {
  368. int64_t scale_v = scaling_v[i];
  369. int64_t scale_h = scaling_h[i];
  370. scaled_width <<= 1;
  371. scaled_height <<= 1;
  372. ptr = dest;
  373. for (j = 0; j < scaled_height; j++) {
  374. filterfn(ptr, ctx->filter[1], scaled_width, scale_v);
  375. ptr += stride;
  376. }
  377. for (j = 0; j < scaled_width; j++) {
  378. ptr = dest + j;
  379. for (k = 0; k < scaled_height; k++) {
  380. tmp[k] = *ptr;
  381. ptr += stride;
  382. }
  383. filterfn(tmp, ctx->filter[1], scaled_height, scale_h);
  384. ptr = dest + j;
  385. for (k = 0; k < scaled_height; k++) {
  386. *ptr = tmp[k];
  387. ptr += stride;
  388. }
  389. }
  390. }
  391. }
  392. static void build_luma_lut(AVCodecContext *avctx, int depth)
  393. {
  394. PixletContext *ctx = avctx->priv_data;
  395. int max = (1 << depth) - 1;
  396. if (ctx->depth == depth)
  397. return;
  398. ctx->depth = depth;
  399. for (int i = 0; i < FF_ARRAY_ELEMS(ctx->lut); i++)
  400. ctx->lut[i] = ((int64_t)i * i * 65535LL) / max / max;
  401. }
  402. static void postprocess_luma(AVCodecContext *avctx, AVFrame *frame,
  403. int w, int h, int depth)
  404. {
  405. PixletContext *ctx = avctx->priv_data;
  406. uint16_t *dsty = (uint16_t *)frame->data[0];
  407. int16_t *srcy = (int16_t *)frame->data[0];
  408. ptrdiff_t stridey = frame->linesize[0] / 2;
  409. uint16_t *lut = ctx->lut;
  410. int i, j;
  411. for (j = 0; j < h; j++) {
  412. for (i = 0; i < w; i++) {
  413. if (srcy[i] <= 0)
  414. dsty[i] = 0;
  415. else if (srcy[i] > ((1 << depth) - 1))
  416. dsty[i] = 65535;
  417. else
  418. dsty[i] = lut[srcy[i]];
  419. }
  420. dsty += stridey;
  421. srcy += stridey;
  422. }
  423. }
  424. static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
  425. {
  426. uint16_t *dstu = (uint16_t *)frame->data[1];
  427. uint16_t *dstv = (uint16_t *)frame->data[2];
  428. int16_t *srcu = (int16_t *)frame->data[1];
  429. int16_t *srcv = (int16_t *)frame->data[2];
  430. ptrdiff_t strideu = frame->linesize[1] / 2;
  431. ptrdiff_t stridev = frame->linesize[2] / 2;
  432. const unsigned add = 1 << (depth - 1);
  433. const unsigned shift = 16 - depth;
  434. int i, j;
  435. for (j = 0; j < h; j++) {
  436. for (i = 0; i < w; i++) {
  437. dstu[i] = av_clip_uintp2_c(add + srcu[i], depth) << shift;
  438. dstv[i] = av_clip_uintp2_c(add + srcv[i], depth) << shift;
  439. }
  440. dstu += strideu;
  441. dstv += stridev;
  442. srcu += strideu;
  443. srcv += stridev;
  444. }
  445. }
  446. static int decode_plane(AVCodecContext *avctx, int plane,
  447. AVPacket *avpkt, AVFrame *frame)
  448. {
  449. PixletContext *ctx = avctx->priv_data;
  450. ptrdiff_t stride = frame->linesize[plane] / 2;
  451. unsigned shift = plane > 0;
  452. int16_t *dst;
  453. int i, ret;
  454. for (i = ctx->levels - 1; i >= 0; i--) {
  455. int32_t h = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
  456. int32_t v = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
  457. if (!h || !v)
  458. return AVERROR_INVALIDDATA;
  459. ctx->scaling[plane][H][i] = (1000000ULL << 32) / h;
  460. ctx->scaling[plane][V][i] = (1000000ULL << 32) / v;
  461. }
  462. bytestream2_skip(&ctx->gb, 4);
  463. dst = (int16_t *)frame->data[plane];
  464. dst[0] = sign_extend(bytestream2_get_be16(&ctx->gb), 16);
  465. ret = init_get_bits8(&ctx->bc, avpkt->data + bytestream2_tell(&ctx->gb),
  466. bytestream2_get_bytes_left(&ctx->gb));
  467. if (ret < 0)
  468. return ret;
  469. ret = read_low_coeffs(avctx, dst + 1, ctx->band[plane][0].width - 1,
  470. ctx->band[plane][0].width - 1, 0);
  471. if (ret < 0) {
  472. av_log(avctx, AV_LOG_ERROR,
  473. "error in lowpass coefficients for plane %d, top row\n", plane);
  474. return ret;
  475. }
  476. ret = read_low_coeffs(avctx, dst + stride,
  477. ctx->band[plane][0].height - 1, 1, stride);
  478. if (ret < 0) {
  479. av_log(avctx, AV_LOG_ERROR,
  480. "error in lowpass coefficients for plane %d, left column\n",
  481. plane);
  482. return ret;
  483. }
  484. ret = read_low_coeffs(avctx, dst + stride + 1,
  485. (ctx->band[plane][0].width - 1) * (ctx->band[plane][0].height - 1),
  486. ctx->band[plane][0].width - 1, stride);
  487. if (ret < 0) {
  488. av_log(avctx, AV_LOG_ERROR,
  489. "error in lowpass coefficients for plane %d, rest\n", plane);
  490. return ret;
  491. }
  492. bytestream2_skip(&ctx->gb, ret);
  493. if (bytestream2_get_bytes_left(&ctx->gb) <= 0) {
  494. av_log(avctx, AV_LOG_ERROR, "no bytes left\n");
  495. return AVERROR_INVALIDDATA;
  496. }
  497. ret = read_highpass(avctx, avpkt->data, plane, frame);
  498. if (ret < 0)
  499. return ret;
  500. lowpass_prediction(dst, ctx->prediction, ctx->band[plane][0].width,
  501. ctx->band[plane][0].height, stride);
  502. reconstruction(avctx, (int16_t *)frame->data[plane], ctx->w >> shift,
  503. ctx->h >> shift, stride, ctx->scaling[plane][H],
  504. ctx->scaling[plane][V]);
  505. return 0;
  506. }
  507. static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
  508. int *got_frame, AVPacket *avpkt)
  509. {
  510. PixletContext *ctx = avctx->priv_data;
  511. int i, w, h, width, height, ret, version;
  512. AVFrame *p = data;
  513. ThreadFrame frame = { .f = data };
  514. uint32_t pktsize, depth;
  515. bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
  516. pktsize = bytestream2_get_be32(&ctx->gb);
  517. if (pktsize <= 44 || pktsize - 4 > bytestream2_get_bytes_left(&ctx->gb)) {
  518. av_log(avctx, AV_LOG_ERROR, "Invalid packet size %"PRIu32"\n", pktsize);
  519. return AVERROR_INVALIDDATA;
  520. }
  521. version = bytestream2_get_le32(&ctx->gb);
  522. if (version != 1)
  523. avpriv_request_sample(avctx, "Version %d", version);
  524. bytestream2_skip(&ctx->gb, 4);
  525. if (bytestream2_get_be32(&ctx->gb) != 1)
  526. return AVERROR_INVALIDDATA;
  527. bytestream2_skip(&ctx->gb, 4);
  528. width = bytestream2_get_be32(&ctx->gb);
  529. height = bytestream2_get_be32(&ctx->gb);
  530. if ( width > INT_MAX - (1U << (NB_LEVELS + 1))
  531. || height > INT_MAX - (1U << (NB_LEVELS + 1)))
  532. return AVERROR_INVALIDDATA;
  533. w = FFALIGN(width, 1 << (NB_LEVELS + 1));
  534. h = FFALIGN(height, 1 << (NB_LEVELS + 1));
  535. ctx->levels = bytestream2_get_be32(&ctx->gb);
  536. if (ctx->levels != NB_LEVELS)
  537. return AVERROR_INVALIDDATA;
  538. depth = bytestream2_get_be32(&ctx->gb);
  539. if (depth < 8 || depth > 15) {
  540. avpriv_request_sample(avctx, "Depth %d", depth);
  541. return AVERROR_INVALIDDATA;
  542. }
  543. build_luma_lut(avctx, depth);
  544. ret = ff_set_dimensions(avctx, w, h);
  545. if (ret < 0)
  546. return ret;
  547. avctx->width = width;
  548. avctx->height = height;
  549. if (ctx->w != w || ctx->h != h) {
  550. free_buffers(avctx);
  551. ctx->w = w;
  552. ctx->h = h;
  553. ret = init_decoder(avctx);
  554. if (ret < 0) {
  555. free_buffers(avctx);
  556. ctx->w = 0;
  557. ctx->h = 0;
  558. return ret;
  559. }
  560. }
  561. bytestream2_skip(&ctx->gb, 8);
  562. p->pict_type = AV_PICTURE_TYPE_I;
  563. p->key_frame = 1;
  564. p->color_range = AVCOL_RANGE_JPEG;
  565. ret = ff_thread_get_buffer(avctx, &frame, 0);
  566. if (ret < 0)
  567. return ret;
  568. for (i = 0; i < 3; i++) {
  569. ret = decode_plane(avctx, i, avpkt, frame.f);
  570. if (ret < 0)
  571. return ret;
  572. if (avctx->flags & AV_CODEC_FLAG_GRAY)
  573. break;
  574. }
  575. postprocess_luma(avctx, frame.f, ctx->w, ctx->h, ctx->depth);
  576. postprocess_chroma(frame.f, ctx->w >> 1, ctx->h >> 1, ctx->depth);
  577. *got_frame = 1;
  578. return pktsize;
  579. }
  580. AVCodec ff_pixlet_decoder = {
  581. .name = "pixlet",
  582. .long_name = NULL_IF_CONFIG_SMALL("Apple Pixlet"),
  583. .type = AVMEDIA_TYPE_VIDEO,
  584. .id = AV_CODEC_ID_PIXLET,
  585. .init = pixlet_init,
  586. .close = pixlet_close,
  587. .decode = pixlet_decode_frame,
  588. .priv_data_size = sizeof(PixletContext),
  589. .capabilities = AV_CODEC_CAP_DR1 |
  590. AV_CODEC_CAP_FRAME_THREADS,
  591. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  592. FF_CODEC_CAP_INIT_CLEANUP,
  593. };