You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

763 lines
24KB

  1. /*
  2. * MagicYUV decoder
  3. * Copyright (c) 2016 Paul B Mahol
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdlib.h>
  22. #include <string.h>
  23. #define CACHED_BITSTREAM_READER !ARCH_X86_32
  24. #include "libavutil/pixdesc.h"
  25. #include "libavutil/qsort.h"
  26. #include "avcodec.h"
  27. #include "bytestream.h"
  28. #include "get_bits.h"
  29. #include "huffyuvdsp.h"
  30. #include "internal.h"
  31. #include "lossless_videodsp.h"
  32. #include "thread.h"
  33. typedef struct Slice {
  34. uint32_t start;
  35. uint32_t size;
  36. } Slice;
  37. typedef enum Prediction {
  38. LEFT = 1,
  39. GRADIENT,
  40. MEDIAN,
  41. } Prediction;
  42. typedef struct HuffEntry {
  43. uint16_t sym;
  44. uint8_t len;
  45. uint32_t code;
  46. } HuffEntry;
  47. typedef struct MagicYUVContext {
  48. AVFrame *p;
  49. int max;
  50. int bps;
  51. int slice_height;
  52. int nb_slices;
  53. int planes; // number of encoded planes in bitstream
  54. int decorrelate; // postprocessing work
  55. int color_matrix; // video color matrix
  56. int flags;
  57. int interlaced; // video is interlaced
  58. uint8_t *buf; // pointer to AVPacket->data
  59. int hshift[4];
  60. int vshift[4];
  61. Slice *slices[4]; // slice bitstream positions for each plane
  62. unsigned int slices_size[4]; // slice sizes for each plane
  63. uint8_t len[4][4096]; // table of code lengths for each plane
  64. VLC vlc[4]; // VLC for each plane
  65. int (*huff_build)(VLC *vlc, uint8_t *len);
  66. int (*magy_decode_slice)(AVCodecContext *avctx, void *tdata,
  67. int j, int threadnr);
  68. LLVidDSPContext llviddsp;
  69. } MagicYUVContext;
  70. static int huff_cmp_len(const void *a, const void *b)
  71. {
  72. const HuffEntry *aa = a, *bb = b;
  73. return (aa->len - bb->len) * 4096 + bb->sym - aa->sym;
  74. }
  75. static int huff_build10(VLC *vlc, uint8_t *len)
  76. {
  77. HuffEntry he[1024];
  78. uint32_t code;
  79. int i;
  80. for (i = 0; i < 1024; i++) {
  81. he[i].sym = i;
  82. he[i].len = len[i];
  83. if (len[i] == 0 || len[i] > 32)
  84. return AVERROR_INVALIDDATA;
  85. }
  86. AV_QSORT(he, 1024, HuffEntry, huff_cmp_len);
  87. code = 1;
  88. for (i = 1023; i >= 0; i--) {
  89. he[i].code = code >> (32 - he[i].len);
  90. code += 0x80000000u >> (he[i].len - 1);
  91. }
  92. ff_free_vlc(vlc);
  93. return ff_init_vlc_sparse(vlc, FFMIN(he[1023].len, 12), 1024,
  94. &he[0].len, sizeof(he[0]), sizeof(he[0].len),
  95. &he[0].code, sizeof(he[0]), sizeof(he[0].code),
  96. &he[0].sym, sizeof(he[0]), sizeof(he[0].sym), 0);
  97. }
  98. static int huff_build12(VLC *vlc, uint8_t *len)
  99. {
  100. HuffEntry he[4096];
  101. uint32_t code;
  102. int i;
  103. for (i = 0; i < 4096; i++) {
  104. he[i].sym = i;
  105. he[i].len = len[i];
  106. if (len[i] == 0 || len[i] > 32)
  107. return AVERROR_INVALIDDATA;
  108. }
  109. AV_QSORT(he, 4096, HuffEntry, huff_cmp_len);
  110. code = 1;
  111. for (i = 4095; i >= 0; i--) {
  112. he[i].code = code >> (32 - he[i].len);
  113. code += 0x80000000u >> (he[i].len - 1);
  114. }
  115. ff_free_vlc(vlc);
  116. return ff_init_vlc_sparse(vlc, FFMIN(he[4095].len, 12), 4096,
  117. &he[0].len, sizeof(he[0]), sizeof(he[0].len),
  118. &he[0].code, sizeof(he[0]), sizeof(he[0].code),
  119. &he[0].sym, sizeof(he[0]), sizeof(he[0].sym), 0);
  120. }
  121. static int huff_build(VLC *vlc, uint8_t *len)
  122. {
  123. HuffEntry he[256];
  124. uint32_t code;
  125. int i;
  126. for (i = 0; i < 256; i++) {
  127. he[i].sym = i;
  128. he[i].len = len[i];
  129. if (len[i] == 0 || len[i] > 32)
  130. return AVERROR_INVALIDDATA;
  131. }
  132. AV_QSORT(he, 256, HuffEntry, huff_cmp_len);
  133. code = 1;
  134. for (i = 255; i >= 0; i--) {
  135. he[i].code = code >> (32 - he[i].len);
  136. code += 0x80000000u >> (he[i].len - 1);
  137. }
  138. ff_free_vlc(vlc);
  139. return ff_init_vlc_sparse(vlc, FFMIN(he[255].len, 12), 256,
  140. &he[0].len, sizeof(he[0]), sizeof(he[0].len),
  141. &he[0].code, sizeof(he[0]), sizeof(he[0].code),
  142. &he[0].sym, sizeof(he[0]), sizeof(he[0].sym), 0);
  143. }
  144. static void magicyuv_median_pred16(uint16_t *dst, const uint16_t *src1,
  145. const uint16_t *diff, intptr_t w,
  146. int *left, int *left_top, int max)
  147. {
  148. int i;
  149. uint16_t l, lt;
  150. l = *left;
  151. lt = *left_top;
  152. for (i = 0; i < w; i++) {
  153. l = mid_pred(l, src1[i], (l + src1[i] - lt)) + diff[i];
  154. l &= max;
  155. lt = src1[i];
  156. dst[i] = l;
  157. }
  158. *left = l;
  159. *left_top = lt;
  160. }
  161. static int magy_decode_slice10(AVCodecContext *avctx, void *tdata,
  162. int j, int threadnr)
  163. {
  164. MagicYUVContext *s = avctx->priv_data;
  165. int interlaced = s->interlaced;
  166. const int bps = s->bps;
  167. const int max = s->max - 1;
  168. AVFrame *p = s->p;
  169. int i, k, x;
  170. GetBitContext gb;
  171. uint16_t *dst;
  172. for (i = 0; i < s->planes; i++) {
  173. int left, lefttop, top;
  174. int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
  175. int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
  176. int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
  177. ptrdiff_t fake_stride = (p->linesize[i] / 2) * (1 + interlaced);
  178. ptrdiff_t stride = p->linesize[i] / 2;
  179. int flags, pred;
  180. int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
  181. s->slices[i][j].size);
  182. if (ret < 0)
  183. return ret;
  184. flags = get_bits(&gb, 8);
  185. pred = get_bits(&gb, 8);
  186. dst = (uint16_t *)p->data[i] + j * sheight * stride;
  187. if (flags & 1) {
  188. if (get_bits_left(&gb) < bps * width * height)
  189. return AVERROR_INVALIDDATA;
  190. for (k = 0; k < height; k++) {
  191. for (x = 0; x < width; x++)
  192. dst[x] = get_bits(&gb, bps);
  193. dst += stride;
  194. }
  195. } else {
  196. for (k = 0; k < height; k++) {
  197. for (x = 0; x < width; x++) {
  198. int pix;
  199. if (get_bits_left(&gb) <= 0)
  200. return AVERROR_INVALIDDATA;
  201. pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
  202. if (pix < 0)
  203. return AVERROR_INVALIDDATA;
  204. dst[x] = pix;
  205. }
  206. dst += stride;
  207. }
  208. }
  209. switch (pred) {
  210. case LEFT:
  211. dst = (uint16_t *)p->data[i] + j * sheight * stride;
  212. s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
  213. dst += stride;
  214. if (interlaced) {
  215. s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
  216. dst += stride;
  217. }
  218. for (k = 1 + interlaced; k < height; k++) {
  219. s->llviddsp.add_left_pred_int16(dst, dst, max, width, dst[-fake_stride]);
  220. dst += stride;
  221. }
  222. break;
  223. case GRADIENT:
  224. dst = (uint16_t *)p->data[i] + j * sheight * stride;
  225. s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
  226. dst += stride;
  227. if (interlaced) {
  228. s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
  229. dst += stride;
  230. }
  231. for (k = 1 + interlaced; k < height; k++) {
  232. top = dst[-fake_stride];
  233. left = top + dst[0];
  234. dst[0] = left & max;
  235. for (x = 1; x < width; x++) {
  236. top = dst[x - fake_stride];
  237. lefttop = dst[x - (fake_stride + 1)];
  238. left += top - lefttop + dst[x];
  239. dst[x] = left & max;
  240. }
  241. dst += stride;
  242. }
  243. break;
  244. case MEDIAN:
  245. dst = (uint16_t *)p->data[i] + j * sheight * stride;
  246. s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
  247. dst += stride;
  248. if (interlaced) {
  249. s->llviddsp.add_left_pred_int16(dst, dst, max, width, 0);
  250. dst += stride;
  251. }
  252. lefttop = left = dst[0];
  253. for (k = 1 + interlaced; k < height; k++) {
  254. magicyuv_median_pred16(dst, dst - fake_stride, dst, width, &left, &lefttop, max);
  255. lefttop = left = dst[0];
  256. dst += stride;
  257. }
  258. break;
  259. default:
  260. avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
  261. }
  262. }
  263. if (s->decorrelate) {
  264. int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
  265. int width = avctx->coded_width;
  266. uint16_t *r = (uint16_t *)p->data[0] + j * s->slice_height * p->linesize[0] / 2;
  267. uint16_t *g = (uint16_t *)p->data[1] + j * s->slice_height * p->linesize[1] / 2;
  268. uint16_t *b = (uint16_t *)p->data[2] + j * s->slice_height * p->linesize[2] / 2;
  269. for (i = 0; i < height; i++) {
  270. for (k = 0; k < width; k++) {
  271. b[k] = (b[k] + g[k]) & max;
  272. r[k] = (r[k] + g[k]) & max;
  273. }
  274. b += p->linesize[0] / 2;
  275. g += p->linesize[1] / 2;
  276. r += p->linesize[2] / 2;
  277. }
  278. }
  279. return 0;
  280. }
  281. static int magy_decode_slice(AVCodecContext *avctx, void *tdata,
  282. int j, int threadnr)
  283. {
  284. MagicYUVContext *s = avctx->priv_data;
  285. int interlaced = s->interlaced;
  286. AVFrame *p = s->p;
  287. int i, k, x, min_width;
  288. GetBitContext gb;
  289. uint8_t *dst;
  290. for (i = 0; i < s->planes; i++) {
  291. int left, lefttop, top;
  292. int height = AV_CEIL_RSHIFT(FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height), s->vshift[i]);
  293. int width = AV_CEIL_RSHIFT(avctx->coded_width, s->hshift[i]);
  294. int sheight = AV_CEIL_RSHIFT(s->slice_height, s->vshift[i]);
  295. ptrdiff_t fake_stride = p->linesize[i] * (1 + interlaced);
  296. ptrdiff_t stride = p->linesize[i];
  297. int flags, pred;
  298. int ret = init_get_bits8(&gb, s->buf + s->slices[i][j].start,
  299. s->slices[i][j].size);
  300. if (ret < 0)
  301. return ret;
  302. flags = get_bits(&gb, 8);
  303. pred = get_bits(&gb, 8);
  304. dst = p->data[i] + j * sheight * stride;
  305. if (flags & 1) {
  306. if (get_bits_left(&gb) < 8* width * height)
  307. return AVERROR_INVALIDDATA;
  308. for (k = 0; k < height; k++) {
  309. for (x = 0; x < width; x++)
  310. dst[x] = get_bits(&gb, 8);
  311. dst += stride;
  312. }
  313. } else {
  314. for (k = 0; k < height; k++) {
  315. for (x = 0; x < width; x++) {
  316. int pix;
  317. if (get_bits_left(&gb) <= 0)
  318. return AVERROR_INVALIDDATA;
  319. pix = get_vlc2(&gb, s->vlc[i].table, s->vlc[i].bits, 3);
  320. if (pix < 0)
  321. return AVERROR_INVALIDDATA;
  322. dst[x] = pix;
  323. }
  324. dst += stride;
  325. }
  326. }
  327. switch (pred) {
  328. case LEFT:
  329. dst = p->data[i] + j * sheight * stride;
  330. s->llviddsp.add_left_pred(dst, dst, width, 0);
  331. dst += stride;
  332. if (interlaced) {
  333. s->llviddsp.add_left_pred(dst, dst, width, 0);
  334. dst += stride;
  335. }
  336. for (k = 1 + interlaced; k < height; k++) {
  337. s->llviddsp.add_left_pred(dst, dst, width, dst[-fake_stride]);
  338. dst += stride;
  339. }
  340. break;
  341. case GRADIENT:
  342. dst = p->data[i] + j * sheight * stride;
  343. s->llviddsp.add_left_pred(dst, dst, width, 0);
  344. dst += stride;
  345. if (interlaced) {
  346. s->llviddsp.add_left_pred(dst, dst, width, 0);
  347. dst += stride;
  348. }
  349. min_width = FFMIN(width, 32);
  350. for (k = 1 + interlaced; k < height; k++) {
  351. top = dst[-fake_stride];
  352. left = top + dst[0];
  353. dst[0] = left;
  354. for (x = 1; x < min_width; x++) { /* dsp need aligned 32 */
  355. top = dst[x - fake_stride];
  356. lefttop = dst[x - (fake_stride + 1)];
  357. left += top - lefttop + dst[x];
  358. dst[x] = left;
  359. }
  360. if (width > 32)
  361. s->llviddsp.add_gradient_pred(dst + 32, fake_stride, width - 32);
  362. dst += stride;
  363. }
  364. break;
  365. case MEDIAN:
  366. dst = p->data[i] + j * sheight * stride;
  367. s->llviddsp.add_left_pred(dst, dst, width, 0);
  368. dst += stride;
  369. if (interlaced) {
  370. s->llviddsp.add_left_pred(dst, dst, width, 0);
  371. dst += stride;
  372. }
  373. lefttop = left = dst[0];
  374. for (k = 1 + interlaced; k < height; k++) {
  375. s->llviddsp.add_median_pred(dst, dst - fake_stride,
  376. dst, width, &left, &lefttop);
  377. lefttop = left = dst[0];
  378. dst += stride;
  379. }
  380. break;
  381. default:
  382. avpriv_request_sample(avctx, "Unknown prediction: %d", pred);
  383. }
  384. }
  385. if (s->decorrelate) {
  386. int height = FFMIN(s->slice_height, avctx->coded_height - j * s->slice_height);
  387. int width = avctx->coded_width;
  388. uint8_t *b = p->data[0] + j * s->slice_height * p->linesize[0];
  389. uint8_t *g = p->data[1] + j * s->slice_height * p->linesize[1];
  390. uint8_t *r = p->data[2] + j * s->slice_height * p->linesize[2];
  391. for (i = 0; i < height; i++) {
  392. s->llviddsp.add_bytes(b, g, width);
  393. s->llviddsp.add_bytes(r, g, width);
  394. b += p->linesize[0];
  395. g += p->linesize[1];
  396. r += p->linesize[2];
  397. }
  398. }
  399. return 0;
  400. }
  401. static int build_huffman(AVCodecContext *avctx, GetBitContext *gbit, int max)
  402. {
  403. MagicYUVContext *s = avctx->priv_data;
  404. int i = 0, j = 0, k;
  405. memset(s->len, 0, sizeof(s->len));
  406. while (get_bits_left(gbit) >= 8) {
  407. int b = get_bits(gbit, 1);
  408. int x = get_bits(gbit, 7);
  409. int l = get_bitsz(gbit, b * 8) + 1;
  410. k = j + l;
  411. if (k > max) {
  412. av_log(avctx, AV_LOG_ERROR, "Invalid Huffman codes\n");
  413. return AVERROR_INVALIDDATA;
  414. }
  415. for (; j < k; j++)
  416. s->len[i][j] = x;
  417. if (j == max) {
  418. j = 0;
  419. if (s->huff_build(&s->vlc[i], s->len[i])) {
  420. av_log(avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n");
  421. return AVERROR_INVALIDDATA;
  422. }
  423. i++;
  424. if (i == s->planes) {
  425. break;
  426. }
  427. }
  428. }
  429. if (i != s->planes) {
  430. av_log(avctx, AV_LOG_ERROR, "Huffman tables too short\n");
  431. return AVERROR_INVALIDDATA;
  432. }
  433. return 0;
  434. }
  435. static int magy_decode_frame(AVCodecContext *avctx, void *data,
  436. int *got_frame, AVPacket *avpkt)
  437. {
  438. MagicYUVContext *s = avctx->priv_data;
  439. ThreadFrame frame = { .f = data };
  440. AVFrame *p = data;
  441. GetByteContext gbyte;
  442. GetBitContext gbit;
  443. uint32_t first_offset, offset, next_offset, header_size, slice_width;
  444. int width, height, format, version, table_size;
  445. int ret, i, j;
  446. bytestream2_init(&gbyte, avpkt->data, avpkt->size);
  447. if (bytestream2_get_le32(&gbyte) != MKTAG('M', 'A', 'G', 'Y'))
  448. return AVERROR_INVALIDDATA;
  449. header_size = bytestream2_get_le32(&gbyte);
  450. if (header_size < 32 || header_size >= avpkt->size) {
  451. av_log(avctx, AV_LOG_ERROR,
  452. "header or packet too small %"PRIu32"\n", header_size);
  453. return AVERROR_INVALIDDATA;
  454. }
  455. version = bytestream2_get_byte(&gbyte);
  456. if (version != 7) {
  457. avpriv_request_sample(avctx, "Version %d", version);
  458. return AVERROR_PATCHWELCOME;
  459. }
  460. s->hshift[1] =
  461. s->vshift[1] =
  462. s->hshift[2] =
  463. s->vshift[2] = 0;
  464. s->decorrelate = 0;
  465. s->bps = 8;
  466. format = bytestream2_get_byte(&gbyte);
  467. switch (format) {
  468. case 0x65:
  469. avctx->pix_fmt = AV_PIX_FMT_GBRP;
  470. s->decorrelate = 1;
  471. break;
  472. case 0x66:
  473. avctx->pix_fmt = AV_PIX_FMT_GBRAP;
  474. s->decorrelate = 1;
  475. break;
  476. case 0x67:
  477. avctx->pix_fmt = AV_PIX_FMT_YUV444P;
  478. break;
  479. case 0x68:
  480. avctx->pix_fmt = AV_PIX_FMT_YUV422P;
  481. s->hshift[1] =
  482. s->hshift[2] = 1;
  483. break;
  484. case 0x69:
  485. avctx->pix_fmt = AV_PIX_FMT_YUV420P;
  486. s->hshift[1] =
  487. s->vshift[1] =
  488. s->hshift[2] =
  489. s->vshift[2] = 1;
  490. break;
  491. case 0x6a:
  492. avctx->pix_fmt = AV_PIX_FMT_YUVA444P;
  493. break;
  494. case 0x6b:
  495. avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  496. break;
  497. case 0x6c:
  498. avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
  499. s->hshift[1] =
  500. s->hshift[2] = 1;
  501. s->bps = 10;
  502. break;
  503. case 0x76:
  504. avctx->pix_fmt = AV_PIX_FMT_YUV444P10;
  505. s->bps = 10;
  506. break;
  507. case 0x6d:
  508. avctx->pix_fmt = AV_PIX_FMT_GBRP10;
  509. s->decorrelate = 1;
  510. s->bps = 10;
  511. break;
  512. case 0x6e:
  513. avctx->pix_fmt = AV_PIX_FMT_GBRAP10;
  514. s->decorrelate = 1;
  515. s->bps = 10;
  516. break;
  517. case 0x6f:
  518. avctx->pix_fmt = AV_PIX_FMT_GBRP12;
  519. s->decorrelate = 1;
  520. s->bps = 12;
  521. break;
  522. case 0x70:
  523. avctx->pix_fmt = AV_PIX_FMT_GBRAP12;
  524. s->decorrelate = 1;
  525. s->bps = 12;
  526. break;
  527. case 0x73:
  528. avctx->pix_fmt = AV_PIX_FMT_GRAY10;
  529. s->bps = 10;
  530. break;
  531. default:
  532. avpriv_request_sample(avctx, "Format 0x%X", format);
  533. return AVERROR_PATCHWELCOME;
  534. }
  535. s->max = 1 << s->bps;
  536. s->magy_decode_slice = s->bps == 8 ? magy_decode_slice : magy_decode_slice10;
  537. if ( s->bps == 8)
  538. s->huff_build = huff_build;
  539. else
  540. s->huff_build = s->bps == 10 ? huff_build10 : huff_build12;
  541. s->planes = av_pix_fmt_count_planes(avctx->pix_fmt);
  542. bytestream2_skip(&gbyte, 1);
  543. s->color_matrix = bytestream2_get_byte(&gbyte);
  544. s->flags = bytestream2_get_byte(&gbyte);
  545. s->interlaced = !!(s->flags & 2);
  546. bytestream2_skip(&gbyte, 3);
  547. width = bytestream2_get_le32(&gbyte);
  548. height = bytestream2_get_le32(&gbyte);
  549. ret = ff_set_dimensions(avctx, width, height);
  550. if (ret < 0)
  551. return ret;
  552. slice_width = bytestream2_get_le32(&gbyte);
  553. if (slice_width != avctx->coded_width) {
  554. avpriv_request_sample(avctx, "Slice width %"PRIu32, slice_width);
  555. return AVERROR_PATCHWELCOME;
  556. }
  557. s->slice_height = bytestream2_get_le32(&gbyte);
  558. if (s->slice_height <= 0 || s->slice_height > INT_MAX - avctx->coded_height) {
  559. av_log(avctx, AV_LOG_ERROR,
  560. "invalid slice height: %d\n", s->slice_height);
  561. return AVERROR_INVALIDDATA;
  562. }
  563. bytestream2_skip(&gbyte, 4);
  564. s->nb_slices = (avctx->coded_height + s->slice_height - 1) / s->slice_height;
  565. if (s->nb_slices > INT_MAX / sizeof(Slice)) {
  566. av_log(avctx, AV_LOG_ERROR,
  567. "invalid number of slices: %d\n", s->nb_slices);
  568. return AVERROR_INVALIDDATA;
  569. }
  570. if (s->interlaced) {
  571. if ((s->slice_height >> s->vshift[1]) < 2) {
  572. av_log(avctx, AV_LOG_ERROR, "impossible slice height\n");
  573. return AVERROR_INVALIDDATA;
  574. }
  575. if ((avctx->coded_height % s->slice_height) && ((avctx->coded_height % s->slice_height) >> s->vshift[1]) < 2) {
  576. av_log(avctx, AV_LOG_ERROR, "impossible height\n");
  577. return AVERROR_INVALIDDATA;
  578. }
  579. }
  580. for (i = 0; i < s->planes; i++) {
  581. av_fast_malloc(&s->slices[i], &s->slices_size[i], s->nb_slices * sizeof(Slice));
  582. if (!s->slices[i])
  583. return AVERROR(ENOMEM);
  584. offset = bytestream2_get_le32(&gbyte);
  585. if (offset >= avpkt->size - header_size)
  586. return AVERROR_INVALIDDATA;
  587. if (i == 0)
  588. first_offset = offset;
  589. for (j = 0; j < s->nb_slices - 1; j++) {
  590. s->slices[i][j].start = offset + header_size;
  591. next_offset = bytestream2_get_le32(&gbyte);
  592. if (next_offset <= offset || next_offset >= avpkt->size - header_size)
  593. return AVERROR_INVALIDDATA;
  594. s->slices[i][j].size = next_offset - offset;
  595. offset = next_offset;
  596. }
  597. s->slices[i][j].start = offset + header_size;
  598. s->slices[i][j].size = avpkt->size - s->slices[i][j].start;
  599. }
  600. if (bytestream2_get_byte(&gbyte) != s->planes)
  601. return AVERROR_INVALIDDATA;
  602. bytestream2_skip(&gbyte, s->nb_slices * s->planes);
  603. table_size = header_size + first_offset - bytestream2_tell(&gbyte);
  604. if (table_size < 2)
  605. return AVERROR_INVALIDDATA;
  606. ret = init_get_bits8(&gbit, avpkt->data + bytestream2_tell(&gbyte), table_size);
  607. if (ret < 0)
  608. return ret;
  609. ret = build_huffman(avctx, &gbit, s->max);
  610. if (ret < 0)
  611. return ret;
  612. p->pict_type = AV_PICTURE_TYPE_I;
  613. p->key_frame = 1;
  614. if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
  615. return ret;
  616. s->buf = avpkt->data;
  617. s->p = p;
  618. avctx->execute2(avctx, s->magy_decode_slice, NULL, NULL, s->nb_slices);
  619. if (avctx->pix_fmt == AV_PIX_FMT_GBRP ||
  620. avctx->pix_fmt == AV_PIX_FMT_GBRAP ||
  621. avctx->pix_fmt == AV_PIX_FMT_GBRP10 ||
  622. avctx->pix_fmt == AV_PIX_FMT_GBRAP10||
  623. avctx->pix_fmt == AV_PIX_FMT_GBRAP12||
  624. avctx->pix_fmt == AV_PIX_FMT_GBRP12) {
  625. FFSWAP(uint8_t*, p->data[0], p->data[1]);
  626. FFSWAP(int, p->linesize[0], p->linesize[1]);
  627. } else {
  628. switch (s->color_matrix) {
  629. case 1:
  630. p->colorspace = AVCOL_SPC_BT470BG;
  631. break;
  632. case 2:
  633. p->colorspace = AVCOL_SPC_BT709;
  634. break;
  635. }
  636. p->color_range = (s->flags & 4) ? AVCOL_RANGE_JPEG : AVCOL_RANGE_MPEG;
  637. }
  638. *got_frame = 1;
  639. return avpkt->size;
  640. }
  641. static av_cold int magy_decode_init(AVCodecContext *avctx)
  642. {
  643. MagicYUVContext *s = avctx->priv_data;
  644. ff_llviddsp_init(&s->llviddsp);
  645. return 0;
  646. }
  647. static av_cold int magy_decode_end(AVCodecContext *avctx)
  648. {
  649. MagicYUVContext * const s = avctx->priv_data;
  650. int i;
  651. for (i = 0; i < FF_ARRAY_ELEMS(s->slices); i++) {
  652. av_freep(&s->slices[i]);
  653. s->slices_size[i] = 0;
  654. ff_free_vlc(&s->vlc[i]);
  655. }
  656. return 0;
  657. }
  658. AVCodec ff_magicyuv_decoder = {
  659. .name = "magicyuv",
  660. .long_name = NULL_IF_CONFIG_SMALL("MagicYUV video"),
  661. .type = AVMEDIA_TYPE_VIDEO,
  662. .id = AV_CODEC_ID_MAGICYUV,
  663. .priv_data_size = sizeof(MagicYUVContext),
  664. .init = magy_decode_init,
  665. .close = magy_decode_end,
  666. .decode = magy_decode_frame,
  667. .capabilities = AV_CODEC_CAP_DR1 |
  668. AV_CODEC_CAP_FRAME_THREADS |
  669. AV_CODEC_CAP_SLICE_THREADS,
  670. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
  671. };