You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

693 lines
20KB

  1. /*
  2. * Apple Pixlet decoder
  3. * Copyright (c) 2016 Paul B Mahol
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdint.h>
  22. #include "libavutil/imgutils.h"
  23. #include "libavutil/intmath.h"
  24. #include "libavutil/opt.h"
  25. #include "avcodec.h"
  26. #include "bitstream.h"
  27. #include "bytestream.h"
  28. #include "internal.h"
  29. #include "thread.h"
  30. #include "unary.h"
  31. #define NB_LEVELS 4
  32. #define PIXLET_MAGIC 0xDEADBEEF
  33. #define H 0
  34. #define V 1
  35. typedef struct SubBand {
  36. size_t width, height;
  37. size_t size;
  38. size_t x, y;
  39. } SubBand;
  40. typedef struct PixletContext {
  41. AVClass *class;
  42. GetByteContext gb;
  43. BitstreamContext bc;
  44. int levels;
  45. int depth;
  46. size_t w, h;
  47. int16_t *filter[2];
  48. int16_t *prediction;
  49. int64_t scaling[4][2][NB_LEVELS];
  50. SubBand band[4][NB_LEVELS * 3 + 1];
  51. } PixletContext;
  52. static av_cold int pixlet_init(AVCodecContext *avctx)
  53. {
  54. avctx->pix_fmt = AV_PIX_FMT_YUV420P16;
  55. avctx->color_range = AVCOL_RANGE_JPEG;
  56. return 0;
  57. }
  58. static av_cold int pixlet_close(AVCodecContext *avctx)
  59. {
  60. PixletContext *ctx = avctx->priv_data;
  61. av_freep(&ctx->filter[0]);
  62. av_freep(&ctx->filter[1]);
  63. av_freep(&ctx->prediction);
  64. return 0;
  65. }
  66. static int init_decoder(AVCodecContext *avctx)
  67. {
  68. PixletContext *ctx = avctx->priv_data;
  69. int i, plane;
  70. ctx->filter[0] = av_malloc_array(ctx->h, sizeof(int16_t));
  71. ctx->filter[1] = av_malloc_array(FFMAX(ctx->h, ctx->w) + 16, sizeof(int16_t));
  72. ctx->prediction = av_malloc_array((ctx->w >> NB_LEVELS), sizeof(int16_t));
  73. if (!ctx->filter[0] || !ctx->filter[1] || !ctx->prediction)
  74. return AVERROR(ENOMEM);
  75. for (plane = 0; plane < 3; plane++) {
  76. unsigned shift = plane > 0;
  77. size_t w = ctx->w >> shift;
  78. size_t h = ctx->h >> shift;
  79. ctx->band[plane][0].width = w >> NB_LEVELS;
  80. ctx->band[plane][0].height = h >> NB_LEVELS;
  81. ctx->band[plane][0].size = (w >> NB_LEVELS) * (h >> NB_LEVELS);
  82. for (i = 0; i < NB_LEVELS * 3; i++) {
  83. unsigned scale = ctx->levels - (i / 3);
  84. ctx->band[plane][i + 1].width = w >> scale;
  85. ctx->band[plane][i + 1].height = h >> scale;
  86. ctx->band[plane][i + 1].size = (w >> scale) * (h >> scale);
  87. ctx->band[plane][i + 1].x = (w >> scale) * (((i + 1) % 3) != 2);
  88. ctx->band[plane][i + 1].y = (h >> scale) * (((i + 1) % 3) != 1);
  89. }
  90. }
  91. return 0;
  92. }
  93. static int read_low_coeffs(AVCodecContext *avctx, int16_t *dst, size_t size,
  94. size_t width, ptrdiff_t stride)
  95. {
  96. PixletContext *ctx = avctx->priv_data;
  97. BitstreamContext *bc = &ctx->bc;
  98. unsigned cnt1, nbits, k, j = 0, i = 0;
  99. int64_t value, state = 3;
  100. int rlen, escape, flag = 0;
  101. while (i < size) {
  102. nbits = FFMIN(ff_clz((state >> 8) + 3) ^ 0x1F, 14);
  103. cnt1 = get_unary(bc, 0, 8);
  104. if (cnt1 < 8) {
  105. value = bitstream_read(bc, nbits);
  106. if (value <= 1) {
  107. bitstream_unget(bc, value & 1, 1);
  108. value = 1;
  109. }
  110. escape = value + ((1 << nbits) - 1) * cnt1 - 1;
  111. } else {
  112. escape = bitstream_read(bc, 16);
  113. }
  114. value = -((escape + flag) & 1) | 1;
  115. dst[j++] = value * ((escape + flag + 1) >> 1);
  116. i++;
  117. if (j == width) {
  118. j = 0;
  119. dst += stride;
  120. }
  121. state = 120 * (escape + flag) + state - (120 * state >> 8);
  122. flag = 0;
  123. if (state * 4 > 0xFF || i >= size)
  124. continue;
  125. nbits = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
  126. escape = av_mod_uintp2(16383, nbits);
  127. cnt1 = get_unary(bc, 0, 8);
  128. if (cnt1 > 7) {
  129. rlen = bitstream_read(bc, 16);
  130. } else {
  131. value = bitstream_read(bc, nbits);
  132. if (value <= 1) {
  133. bitstream_unget(bc, value & 1, 1);
  134. value = 1;
  135. }
  136. rlen = value + escape * cnt1 - 1;
  137. }
  138. if (i + rlen > size)
  139. return AVERROR_INVALIDDATA;
  140. i += rlen;
  141. for (k = 0; k < rlen; k++) {
  142. dst[j++] = 0;
  143. if (j == width) {
  144. j = 0;
  145. dst += stride;
  146. }
  147. }
  148. state = 0;
  149. flag = rlen < 0xFFFF ? 1 : 0;
  150. }
  151. bitstream_align(bc);
  152. return bitstream_tell(bc) >> 3;
  153. }
  154. static int read_high_coeffs(AVCodecContext *avctx, uint8_t *src, int16_t *dst,
  155. int size, int64_t c, int a, int64_t d,
  156. int width, ptrdiff_t stride)
  157. {
  158. PixletContext *ctx = avctx->priv_data;
  159. BitstreamContext *bc = &ctx->bc;
  160. unsigned cnt1, shbits, rlen, nbits, length, i = 0, j = 0, k;
  161. int ret, escape, pfx, cthulu, yflag, xflag, flag = 0;
  162. int64_t state = 3, value, tmp;
  163. ret = bitstream_init8(bc, src, bytestream2_get_bytes_left(&ctx->gb));
  164. if (ret < 0)
  165. return ret;
  166. cthulu = (a >= 0) + (a ^ (a >> 31)) - (a >> 31);
  167. if (cthulu != 1) {
  168. nbits = 33 - ff_clz(cthulu - 1);
  169. if (nbits > 16)
  170. return AVERROR_INVALIDDATA;
  171. } else {
  172. nbits = 1;
  173. }
  174. length = 25 - nbits;
  175. while (i < size) {
  176. if (state >> 8 != -3)
  177. value = ff_clz((state >> 8) + 3) ^ 0x1F;
  178. else
  179. value = -1;
  180. cnt1 = get_unary(bc, 0, length);
  181. if (cnt1 >= length) {
  182. cnt1 = bitstream_read(bc, nbits);
  183. } else {
  184. pfx = 14 + (((value - 14) >> 32) & (value - 14));
  185. cnt1 *= (1 << pfx) - 1;
  186. shbits = bitstream_read(bc, pfx);
  187. if (shbits <= 1) {
  188. bitstream_unget(bc, shbits & 1, 1);
  189. shbits = 1;
  190. }
  191. cnt1 += shbits - 1;
  192. }
  193. xflag = flag + cnt1;
  194. yflag = xflag;
  195. if (flag + cnt1 == 0) {
  196. value = 0;
  197. } else {
  198. xflag &= 1u;
  199. tmp = c * ((yflag + 1) >> 1) + (c >> 1);
  200. value = xflag + (tmp ^ -xflag);
  201. }
  202. i++;
  203. dst[j++] = value;
  204. if (j == width) {
  205. j = 0;
  206. dst += stride;
  207. }
  208. state += d * yflag - (d * state >> 8);
  209. flag = 0;
  210. if (state * 4 > 0xFF || i >= size)
  211. continue;
  212. pfx = ((state + 8) >> 5) + (state ? ff_clz(state) : 32) - 24;
  213. escape = av_mod_uintp2(16383, pfx);
  214. cnt1 = get_unary(bc, 0, 8);
  215. if (cnt1 < 8) {
  216. if (pfx < 1 || pfx > 25)
  217. return AVERROR_INVALIDDATA;
  218. value = bitstream_read(bc, pfx);
  219. if (value <= 1) {
  220. bitstream_unget(bc, value & 1, 1);
  221. value = 1;
  222. }
  223. rlen = value + escape * cnt1 - 1;
  224. } else {
  225. if (bitstream_read_bit(bc))
  226. value = bitstream_read(bc, 16);
  227. else
  228. value = bitstream_read(bc, 8);
  229. rlen = value + 8 * escape;
  230. }
  231. if (rlen > 0xFFFF || i + rlen > size)
  232. return AVERROR_INVALIDDATA;
  233. i += rlen;
  234. for (k = 0; k < rlen; k++) {
  235. dst[j++] = 0;
  236. if (j == width) {
  237. j = 0;
  238. dst += stride;
  239. }
  240. }
  241. state = 0;
  242. flag = rlen < 0xFFFF ? 1 : 0;
  243. }
  244. bitstream_align(bc);
  245. return bitstream_tell(bc) >> 3;
  246. }
  247. static int read_highpass(AVCodecContext *avctx, uint8_t *ptr,
  248. int plane, AVFrame *frame)
  249. {
  250. PixletContext *ctx = avctx->priv_data;
  251. ptrdiff_t stride = frame->linesize[plane] / 2;
  252. int i, ret;
  253. for (i = 0; i < ctx->levels * 3; i++) {
  254. int32_t a = bytestream2_get_be32(&ctx->gb);
  255. int32_t b = bytestream2_get_be32(&ctx->gb);
  256. int32_t c = bytestream2_get_be32(&ctx->gb);
  257. int32_t d = bytestream2_get_be32(&ctx->gb);
  258. int16_t *dest = (int16_t *)frame->data[plane] +
  259. ctx->band[plane][i + 1].x +
  260. ctx->band[plane][i + 1].y * stride;
  261. size_t size = ctx->band[plane][i + 1].size;
  262. uint32_t magic = bytestream2_get_be32(&ctx->gb);
  263. if (magic != PIXLET_MAGIC) {
  264. av_log(avctx, AV_LOG_ERROR,
  265. "wrong magic number: 0x%"PRIX32" for plane %d, band %d\n",
  266. magic, plane, i);
  267. return AVERROR_INVALIDDATA;
  268. }
  269. ret = read_high_coeffs(avctx, ptr + bytestream2_tell(&ctx->gb), dest,
  270. size, c, (b >= FFABS(a)) ? b : a, d,
  271. ctx->band[plane][i + 1].width, stride);
  272. if (ret < 0) {
  273. av_log(avctx, AV_LOG_ERROR,
  274. "error in highpass coefficients for plane %d, band %d\n",
  275. plane, i);
  276. return ret;
  277. }
  278. bytestream2_skip(&ctx->gb, ret);
  279. }
  280. return 0;
  281. }
  282. static void line_add_sat_s16(int16_t *dst, const int16_t *src, size_t len)
  283. {
  284. int i;
  285. for (i = 0; i < len; i++) {
  286. int val = dst[i] + src[i];
  287. dst[i] = av_clip_int16(val);
  288. }
  289. }
  290. static void lowpass_prediction(int16_t *dst, int16_t *pred,
  291. size_t width, size_t height, ptrdiff_t stride)
  292. {
  293. int i, j;
  294. memset(pred, 0, width * sizeof(*pred));
  295. for (i = 0; i < height; i++) {
  296. line_add_sat_s16(pred, dst, width);
  297. dst[0] = pred[0];
  298. for (j = 1; j < width; j++)
  299. dst[j] = pred[j] + dst[j - 1];
  300. dst += stride;
  301. }
  302. }
  303. static void filterfn(int16_t *dest, int16_t *tmp, size_t size, int64_t scale)
  304. {
  305. int16_t *low, *high, *ll, *lh, *hl, *hh;
  306. int hsize, i, j;
  307. int64_t value;
  308. hsize = size >> 1;
  309. low = tmp + 4;
  310. high = &low[hsize + 8];
  311. memcpy(low, dest, size);
  312. memcpy(high, dest + hsize, size);
  313. ll = &low[hsize];
  314. lh = &low[hsize];
  315. hl = &high[hsize];
  316. hh = hl;
  317. for (i = 4, j = 2; i; i--, j++, ll--, hh++, lh++, hl--) {
  318. low[i - 5] = low[j - 1];
  319. lh[0] = ll[-1];
  320. high[i - 5] = high[j - 2];
  321. hh[0] = hl[-2];
  322. }
  323. for (i = 0; i < hsize; i++) {
  324. value = (int64_t) low [i + 1] * -INT64_C(325392907) +
  325. (int64_t) low [i + 0] * INT64_C(3687786320) +
  326. (int64_t) low [i - 1] * -INT64_C(325392907) +
  327. (int64_t) high[i + 0] * INT64_C(1518500249) +
  328. (int64_t) high[i - 1] * INT64_C(1518500249);
  329. dest[i * 2] = av_clip_int16(((value >> 32) * scale) >> 32);
  330. }
  331. for (i = 0; i < hsize; i++) {
  332. value = (int64_t) low [i + 2] * -INT64_C(65078576) +
  333. (int64_t) low [i + 1] * INT64_C(1583578880) +
  334. (int64_t) low [i + 0] * INT64_C(1583578880) +
  335. (int64_t) low [i - 1] * -INT64_C(65078576) +
  336. (int64_t) high[i + 1] * INT64_C(303700064) +
  337. (int64_t) high[i + 0] * -INT64_C(3644400640) +
  338. (int64_t) high[i - 1] * INT64_C(303700064);
  339. dest[i * 2 + 1] = av_clip_int16(((value >> 32) * scale) >> 32);
  340. }
  341. }
  342. static void reconstruction(AVCodecContext *avctx, int16_t *dest,
  343. size_t width, size_t height, ptrdiff_t stride,
  344. int64_t *scaling_h, int64_t *scaling_v)
  345. {
  346. PixletContext *ctx = avctx->priv_data;
  347. unsigned scaled_width, scaled_height;
  348. int16_t *ptr, *tmp;
  349. int i, j, k;
  350. scaled_width = width >> NB_LEVELS;
  351. scaled_height = height >> NB_LEVELS;
  352. tmp = ctx->filter[0];
  353. for (i = 0; i < NB_LEVELS; i++) {
  354. int64_t scale_v = scaling_v[i];
  355. int64_t scale_h = scaling_h[i];
  356. scaled_width <<= 1;
  357. scaled_height <<= 1;
  358. ptr = dest;
  359. for (j = 0; j < scaled_height; j++) {
  360. filterfn(ptr, ctx->filter[1], scaled_width, scale_v);
  361. ptr += stride;
  362. }
  363. for (j = 0; j < scaled_width; j++) {
  364. ptr = dest + j;
  365. for (k = 0; k < scaled_height; k++) {
  366. tmp[k] = *ptr;
  367. ptr += stride;
  368. }
  369. filterfn(tmp, ctx->filter[1], scaled_height, scale_h);
  370. ptr = dest + j;
  371. for (k = 0; k < scaled_height; k++) {
  372. *ptr = tmp[k];
  373. ptr += stride;
  374. }
  375. }
  376. }
  377. }
  378. static void postprocess_luma(AVFrame *frame, size_t w, size_t h, int depth)
  379. {
  380. uint16_t *dsty = (uint16_t *)frame->data[0];
  381. int16_t *srcy = (int16_t *)frame->data[0];
  382. ptrdiff_t stridey = frame->linesize[0] / 2;
  383. int i, j;
  384. for (j = 0; j < h; j++) {
  385. for (i = 0; i < w; i++) {
  386. if (srcy[i] <= 0)
  387. dsty[i] = 0;
  388. else if (srcy[i] > ((1 << depth) - 1))
  389. dsty[i] = 65535;
  390. else
  391. dsty[i] = ((int64_t) srcy[i] * srcy[i] * 65535) /
  392. ((1 << depth) - 1) / ((1 << depth) - 1);
  393. }
  394. dsty += stridey;
  395. srcy += stridey;
  396. }
  397. }
  398. static void postprocess_chroma(AVFrame *frame, int w, int h, int depth)
  399. {
  400. uint16_t *dstu = (uint16_t *)frame->data[1];
  401. uint16_t *dstv = (uint16_t *)frame->data[2];
  402. int16_t *srcu = (int16_t *)frame->data[1];
  403. int16_t *srcv = (int16_t *)frame->data[2];
  404. ptrdiff_t strideu = frame->linesize[1] / 2;
  405. ptrdiff_t stridev = frame->linesize[2] / 2;
  406. const unsigned add = 1 << (depth - 1);
  407. const unsigned shift = 16 - depth;
  408. int i, j;
  409. for (j = 0; j < h; j++) {
  410. for (i = 0; i < w; i++) {
  411. dstu[i] = av_clip_uintp2_c(add + srcu[i], depth) << shift;
  412. dstv[i] = av_clip_uintp2_c(add + srcv[i], depth) << shift;
  413. }
  414. dstu += strideu;
  415. dstv += stridev;
  416. srcu += strideu;
  417. srcv += stridev;
  418. }
  419. }
  420. static int decode_plane(AVCodecContext *avctx, int plane,
  421. AVPacket *avpkt, AVFrame *frame)
  422. {
  423. PixletContext *ctx = avctx->priv_data;
  424. ptrdiff_t stride = frame->linesize[plane] / 2;
  425. unsigned shift = plane > 0;
  426. int16_t *dst;
  427. int i, ret;
  428. for (i = ctx->levels - 1; i >= 0; i--) {
  429. int32_t h = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
  430. int32_t v = sign_extend(bytestream2_get_be32(&ctx->gb), 32);
  431. if (!h || !v)
  432. return AVERROR_INVALIDDATA;
  433. ctx->scaling[plane][H][i] = (1000000ULL << 32) / h;
  434. ctx->scaling[plane][V][i] = (1000000ULL << 32) / v;
  435. }
  436. bytestream2_skip(&ctx->gb, 4);
  437. dst = (int16_t *)frame->data[plane];
  438. dst[0] = sign_extend(bytestream2_get_be16(&ctx->gb), 16);
  439. ret = bitstream_init8(&ctx->bc, avpkt->data + bytestream2_tell(&ctx->gb),
  440. bytestream2_get_bytes_left(&ctx->gb));
  441. if (ret < 0)
  442. return ret;
  443. ret = read_low_coeffs(avctx, dst + 1, ctx->band[plane][0].width - 1,
  444. ctx->band[plane][0].width - 1, 0);
  445. if (ret < 0) {
  446. av_log(avctx, AV_LOG_ERROR,
  447. "error in lowpass coefficients for plane %d, top row\n", plane);
  448. return ret;
  449. }
  450. ret = read_low_coeffs(avctx, dst + stride,
  451. ctx->band[plane][0].height - 1, 1, stride);
  452. if (ret < 0) {
  453. av_log(avctx, AV_LOG_ERROR,
  454. "error in lowpass coefficients for plane %d, left column\n",
  455. plane);
  456. return ret;
  457. }
  458. ret = read_low_coeffs(avctx, dst + stride + 1,
  459. (ctx->band[plane][0].width - 1) * (ctx->band[plane][0].height - 1),
  460. ctx->band[plane][0].width - 1, stride);
  461. if (ret < 0) {
  462. av_log(avctx, AV_LOG_ERROR,
  463. "error in lowpass coefficients for plane %d, rest\n", plane);
  464. return ret;
  465. }
  466. bytestream2_skip(&ctx->gb, ret);
  467. if (bytestream2_get_bytes_left(&ctx->gb) <= 0) {
  468. av_log(avctx, AV_LOG_ERROR, "no bytes left\n");
  469. return AVERROR_INVALIDDATA;
  470. }
  471. ret = read_highpass(avctx, avpkt->data, plane, frame);
  472. if (ret < 0)
  473. return ret;
  474. lowpass_prediction(dst, ctx->prediction, ctx->band[plane][0].width,
  475. ctx->band[plane][0].height, stride);
  476. reconstruction(avctx, (int16_t *)frame->data[plane], ctx->w >> shift,
  477. ctx->h >> shift, stride, ctx->scaling[plane][H],
  478. ctx->scaling[plane][V]);
  479. return 0;
  480. }
  481. static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
  482. int *got_frame, AVPacket *avpkt)
  483. {
  484. PixletContext *ctx = avctx->priv_data;
  485. int i, w, h, width, height, ret, version;
  486. AVFrame *p = data;
  487. ThreadFrame frame = { .f = data };
  488. uint32_t pktsize;
  489. bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
  490. pktsize = bytestream2_get_be32(&ctx->gb);
  491. if (pktsize <= 44 || pktsize - 4 > bytestream2_get_bytes_left(&ctx->gb)) {
  492. av_log(avctx, AV_LOG_ERROR, "Invalid packet size %"PRIu32".\n", pktsize);
  493. return AVERROR_INVALIDDATA;
  494. }
  495. version = bytestream2_get_le32(&ctx->gb);
  496. if (version != 1)
  497. avpriv_request_sample(avctx, "Version %d", version);
  498. bytestream2_skip(&ctx->gb, 4);
  499. if (bytestream2_get_be32(&ctx->gb) != 1)
  500. return AVERROR_INVALIDDATA;
  501. bytestream2_skip(&ctx->gb, 4);
  502. width = bytestream2_get_be32(&ctx->gb);
  503. height = bytestream2_get_be32(&ctx->gb);
  504. w = FFALIGN(width, 1 << (NB_LEVELS + 1));
  505. h = FFALIGN(height, 1 << (NB_LEVELS + 1));
  506. ctx->levels = bytestream2_get_be32(&ctx->gb);
  507. if (ctx->levels != NB_LEVELS)
  508. return AVERROR_INVALIDDATA;
  509. ctx->depth = bytestream2_get_be32(&ctx->gb);
  510. if (ctx->depth < 8 || ctx->depth > 15) {
  511. avpriv_request_sample(avctx, "Depth %d", ctx->depth);
  512. return AVERROR_INVALIDDATA;
  513. }
  514. ret = ff_set_dimensions(avctx, w, h);
  515. if (ret < 0)
  516. return ret;
  517. avctx->width = width;
  518. avctx->height = height;
  519. /* reinit should dimensions change */
  520. if (ctx->w != w || ctx->h != h) {
  521. pixlet_close(avctx);
  522. ctx->w = w;
  523. ctx->h = h;
  524. ret = init_decoder(avctx);
  525. if (ret < 0) {
  526. pixlet_close(avctx);
  527. ctx->w = 0;
  528. ctx->h = 0;
  529. return ret;
  530. }
  531. }
  532. bytestream2_skip(&ctx->gb, 8);
  533. ret = ff_thread_get_buffer(avctx, &frame, 0);
  534. if (ret < 0)
  535. return ret;
  536. for (i = 0; i < 3; i++) {
  537. ret = decode_plane(avctx, i, avpkt, frame.f);
  538. if (ret < 0)
  539. return ret;
  540. if (avctx->flags & AV_CODEC_FLAG_GRAY)
  541. break;
  542. }
  543. postprocess_luma(frame.f, ctx->w, ctx->h, ctx->depth);
  544. postprocess_chroma(frame.f, ctx->w >> 1, ctx->h >> 1, ctx->depth);
  545. p->pict_type = AV_PICTURE_TYPE_I;
  546. p->color_range = AVCOL_RANGE_JPEG;
  547. p->key_frame = 1;
  548. *got_frame = 1;
  549. return pktsize;
  550. }
  551. #if HAVE_THREADS
  552. static int pixlet_init_thread_copy(AVCodecContext *avctx)
  553. {
  554. PixletContext *ctx = avctx->priv_data;
  555. ctx->filter[0] = NULL;
  556. ctx->filter[1] = NULL;
  557. ctx->prediction = NULL;
  558. ctx->w = 0;
  559. ctx->h = 0;
  560. return 0;
  561. }
  562. #endif /* HAVE_THREADS */
  563. AVCodec ff_pixlet_decoder = {
  564. .name = "pixlet",
  565. .long_name = NULL_IF_CONFIG_SMALL("Apple Pixlet"),
  566. .type = AVMEDIA_TYPE_VIDEO,
  567. .id = AV_CODEC_ID_PIXLET,
  568. .init = pixlet_init,
  569. .init_thread_copy = ONLY_IF_THREADS_ENABLED(pixlet_init_thread_copy),
  570. .close = pixlet_close,
  571. .decode = pixlet_decode_frame,
  572. .priv_data_size = sizeof(PixletContext),
  573. .capabilities = AV_CODEC_CAP_DR1 |
  574. AV_CODEC_CAP_FRAME_THREADS,
  575. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  576. FF_CODEC_CAP_INIT_CLEANUP,
  577. };