You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2077 lines
72KB

  1. /*
  2. * MJPEG decoder
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2003 Alex Beregszaszi
  5. * Copyright (c) 2003-2004 Michael Niedermayer
  6. *
  7. * Support for external huffman table, various fixes (AVID workaround),
  8. * aspecting, new decode_frame mechanism and apple mjpeg-b support
  9. * by Alex Beregszaszi
  10. *
  11. * This file is part of FFmpeg.
  12. *
  13. * FFmpeg is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2.1 of the License, or (at your option) any later version.
  17. *
  18. * FFmpeg is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with FFmpeg; if not, write to the Free Software
  25. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  26. */
  27. /**
  28. * @file
  29. * MJPEG decoder.
  30. */
  31. #include "libavutil/imgutils.h"
  32. #include "libavutil/avassert.h"
  33. #include "libavutil/opt.h"
  34. #include "avcodec.h"
  35. #include "copy_block.h"
  36. #include "internal.h"
  37. #include "mjpeg.h"
  38. #include "mjpegdec.h"
  39. #include "jpeglsdec.h"
  40. #include "tiff.h"
  41. #include "exif.h"
  42. #include "bytestream.h"
  43. static int build_vlc(VLC *vlc, const uint8_t *bits_table,
  44. const uint8_t *val_table, int nb_codes,
  45. int use_static, int is_ac)
  46. {
  47. uint8_t huff_size[256] = { 0 };
  48. uint16_t huff_code[256];
  49. uint16_t huff_sym[256];
  50. int i;
  51. av_assert0(nb_codes <= 256);
  52. ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
  53. for (i = 0; i < 256; i++)
  54. huff_sym[i] = i + 16 * is_ac;
  55. if (is_ac)
  56. huff_sym[0] = 16 * 256;
  57. return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
  58. huff_code, 2, 2, huff_sym, 2, 2, use_static);
  59. }
  60. static void build_basic_mjpeg_vlc(MJpegDecodeContext *s)
  61. {
  62. build_vlc(&s->vlcs[0][0], avpriv_mjpeg_bits_dc_luminance,
  63. avpriv_mjpeg_val_dc, 12, 0, 0);
  64. build_vlc(&s->vlcs[0][1], avpriv_mjpeg_bits_dc_chrominance,
  65. avpriv_mjpeg_val_dc, 12, 0, 0);
  66. build_vlc(&s->vlcs[1][0], avpriv_mjpeg_bits_ac_luminance,
  67. avpriv_mjpeg_val_ac_luminance, 251, 0, 1);
  68. build_vlc(&s->vlcs[1][1], avpriv_mjpeg_bits_ac_chrominance,
  69. avpriv_mjpeg_val_ac_chrominance, 251, 0, 1);
  70. build_vlc(&s->vlcs[2][0], avpriv_mjpeg_bits_ac_luminance,
  71. avpriv_mjpeg_val_ac_luminance, 251, 0, 0);
  72. build_vlc(&s->vlcs[2][1], avpriv_mjpeg_bits_ac_chrominance,
  73. avpriv_mjpeg_val_ac_chrominance, 251, 0, 0);
  74. }
  75. av_cold int ff_mjpeg_decode_init(AVCodecContext *avctx)
  76. {
  77. MJpegDecodeContext *s = avctx->priv_data;
  78. if (!s->picture_ptr)
  79. s->picture_ptr = &s->picture;
  80. avcodec_get_frame_defaults(&s->picture);
  81. s->avctx = avctx;
  82. ff_hpeldsp_init(&s->hdsp, avctx->flags);
  83. ff_dsputil_init(&s->dsp, avctx);
  84. ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct);
  85. s->buffer_size = 0;
  86. s->buffer = NULL;
  87. s->start_code = -1;
  88. s->first_picture = 1;
  89. s->got_picture = 0;
  90. s->org_height = avctx->coded_height;
  91. avctx->chroma_sample_location = AVCHROMA_LOC_CENTER;
  92. build_basic_mjpeg_vlc(s);
  93. if (s->extern_huff) {
  94. av_log(avctx, AV_LOG_INFO, "using external huffman table\n");
  95. init_get_bits(&s->gb, avctx->extradata, avctx->extradata_size * 8);
  96. if (ff_mjpeg_decode_dht(s)) {
  97. av_log(avctx, AV_LOG_ERROR,
  98. "error using external huffman table, switching back to internal\n");
  99. build_basic_mjpeg_vlc(s);
  100. }
  101. }
  102. if (avctx->field_order == AV_FIELD_BB) { /* quicktime icefloe 019 */
  103. s->interlace_polarity = 1; /* bottom field first */
  104. av_log(avctx, AV_LOG_DEBUG, "bottom field first\n");
  105. }
  106. if (avctx->codec->id == AV_CODEC_ID_AMV)
  107. s->flipped = 1;
  108. return 0;
  109. }
  110. /* quantize tables */
  111. int ff_mjpeg_decode_dqt(MJpegDecodeContext *s)
  112. {
  113. int len, index, i, j;
  114. len = get_bits(&s->gb, 16) - 2;
  115. while (len >= 65) {
  116. int pr = get_bits(&s->gb, 4);
  117. if (pr > 1) {
  118. av_log(s->avctx, AV_LOG_ERROR, "dqt: invalid precision\n");
  119. return AVERROR_INVALIDDATA;
  120. }
  121. index = get_bits(&s->gb, 4);
  122. if (index >= 4)
  123. return -1;
  124. av_log(s->avctx, AV_LOG_DEBUG, "index=%d\n", index);
  125. /* read quant table */
  126. for (i = 0; i < 64; i++) {
  127. j = s->scantable.permutated[i];
  128. s->quant_matrixes[index][j] = get_bits(&s->gb, pr ? 16 : 8);
  129. }
  130. // XXX FIXME finetune, and perhaps add dc too
  131. s->qscale[index] = FFMAX(s->quant_matrixes[index][s->scantable.permutated[1]],
  132. s->quant_matrixes[index][s->scantable.permutated[8]]) >> 1;
  133. av_log(s->avctx, AV_LOG_DEBUG, "qscale[%d]: %d\n",
  134. index, s->qscale[index]);
  135. len -= 65;
  136. }
  137. return 0;
  138. }
  139. /* decode huffman tables and build VLC decoders */
  140. int ff_mjpeg_decode_dht(MJpegDecodeContext *s)
  141. {
  142. int len, index, i, class, n, v, code_max;
  143. uint8_t bits_table[17];
  144. uint8_t val_table[256];
  145. int ret = 0;
  146. len = get_bits(&s->gb, 16) - 2;
  147. while (len > 0) {
  148. if (len < 17)
  149. return AVERROR_INVALIDDATA;
  150. class = get_bits(&s->gb, 4);
  151. if (class >= 2)
  152. return AVERROR_INVALIDDATA;
  153. index = get_bits(&s->gb, 4);
  154. if (index >= 4)
  155. return AVERROR_INVALIDDATA;
  156. n = 0;
  157. for (i = 1; i <= 16; i++) {
  158. bits_table[i] = get_bits(&s->gb, 8);
  159. n += bits_table[i];
  160. }
  161. len -= 17;
  162. if (len < n || n > 256)
  163. return AVERROR_INVALIDDATA;
  164. code_max = 0;
  165. for (i = 0; i < n; i++) {
  166. v = get_bits(&s->gb, 8);
  167. if (v > code_max)
  168. code_max = v;
  169. val_table[i] = v;
  170. }
  171. len -= n;
  172. /* build VLC and flush previous vlc if present */
  173. ff_free_vlc(&s->vlcs[class][index]);
  174. av_log(s->avctx, AV_LOG_DEBUG, "class=%d index=%d nb_codes=%d\n",
  175. class, index, code_max + 1);
  176. if ((ret = build_vlc(&s->vlcs[class][index], bits_table, val_table,
  177. code_max + 1, 0, class > 0)) < 0)
  178. return ret;
  179. if (class > 0) {
  180. ff_free_vlc(&s->vlcs[2][index]);
  181. if ((ret = build_vlc(&s->vlcs[2][index], bits_table, val_table,
  182. code_max + 1, 0, 0)) < 0)
  183. return ret;
  184. }
  185. }
  186. return 0;
  187. }
  188. int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
  189. {
  190. int len, nb_components, i, width, height, pix_fmt_id;
  191. int h_count[MAX_COMPONENTS];
  192. int v_count[MAX_COMPONENTS];
  193. s->cur_scan = 0;
  194. s->upscale_h = s->upscale_v = 0;
  195. /* XXX: verify len field validity */
  196. len = get_bits(&s->gb, 16);
  197. s->avctx->bits_per_raw_sample =
  198. s->bits = get_bits(&s->gb, 8);
  199. if (s->pegasus_rct)
  200. s->bits = 9;
  201. if (s->bits == 9 && !s->pegasus_rct)
  202. s->rct = 1; // FIXME ugly
  203. if(s->lossless && s->avctx->lowres){
  204. av_log(s->avctx, AV_LOG_ERROR, "lowres is not possible with lossless jpeg\n");
  205. return -1;
  206. }
  207. height = get_bits(&s->gb, 16);
  208. width = get_bits(&s->gb, 16);
  209. // HACK for odd_height.mov
  210. if (s->interlaced && s->width == width && s->height == height + 1)
  211. height= s->height;
  212. av_log(s->avctx, AV_LOG_DEBUG, "sof0: picture: %dx%d\n", width, height);
  213. if (av_image_check_size(width, height, 0, s->avctx))
  214. return AVERROR_INVALIDDATA;
  215. nb_components = get_bits(&s->gb, 8);
  216. if (nb_components <= 0 ||
  217. nb_components > MAX_COMPONENTS)
  218. return -1;
  219. if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
  220. if (nb_components != s->nb_components) {
  221. av_log(s->avctx, AV_LOG_ERROR,
  222. "nb_components changing in interlaced picture\n");
  223. return AVERROR_INVALIDDATA;
  224. }
  225. }
  226. if (s->ls && !(s->bits <= 8 || nb_components == 1)) {
  227. avpriv_report_missing_feature(s->avctx,
  228. "JPEG-LS that is not <= 8 "
  229. "bits/component or 16-bit gray");
  230. return AVERROR_PATCHWELCOME;
  231. }
  232. if (!s->lossless && !(s->bits <= 8 || nb_components == 1)) {
  233. avpriv_report_missing_feature(s->avctx,
  234. "lossy that is not <= 8 "
  235. "bits/component or 16-bit gray");
  236. return AVERROR_PATCHWELCOME;
  237. }
  238. s->nb_components = nb_components;
  239. s->h_max = 1;
  240. s->v_max = 1;
  241. memset(h_count, 0, sizeof(h_count));
  242. memset(v_count, 0, sizeof(v_count));
  243. for (i = 0; i < nb_components; i++) {
  244. /* component id */
  245. s->component_id[i] = get_bits(&s->gb, 8) - 1;
  246. h_count[i] = get_bits(&s->gb, 4);
  247. v_count[i] = get_bits(&s->gb, 4);
  248. /* compute hmax and vmax (only used in interleaved case) */
  249. if (h_count[i] > s->h_max)
  250. s->h_max = h_count[i];
  251. if (v_count[i] > s->v_max)
  252. s->v_max = v_count[i];
  253. s->quant_index[i] = get_bits(&s->gb, 8);
  254. if (s->quant_index[i] >= 4) {
  255. av_log(s->avctx, AV_LOG_ERROR, "quant_index is invalid\n");
  256. return AVERROR_INVALIDDATA;
  257. }
  258. if (!h_count[i] || !v_count[i]) {
  259. av_log(s->avctx, AV_LOG_ERROR,
  260. "Invalid sampling factor in component %d %d:%d\n",
  261. i, h_count[i], v_count[i]);
  262. return AVERROR_INVALIDDATA;
  263. }
  264. av_log(s->avctx, AV_LOG_DEBUG, "component %d %d:%d id: %d quant:%d\n",
  265. i, h_count[i], v_count[i],
  266. s->component_id[i], s->quant_index[i]);
  267. }
  268. if (s->ls && (s->h_max > 1 || s->v_max > 1)) {
  269. avpriv_report_missing_feature(s->avctx, "Subsampling in JPEG-LS");
  270. return AVERROR_PATCHWELCOME;
  271. }
  272. /* if different size, realloc/alloc picture */
  273. if ( width != s->width || height != s->height
  274. || memcmp(s->h_count, h_count, sizeof(h_count))
  275. || memcmp(s->v_count, v_count, sizeof(v_count))) {
  276. s->width = width;
  277. s->height = height;
  278. memcpy(s->h_count, h_count, sizeof(h_count));
  279. memcpy(s->v_count, v_count, sizeof(v_count));
  280. s->interlaced = 0;
  281. s->got_picture = 0;
  282. /* test interlaced mode */
  283. if (s->first_picture &&
  284. s->org_height != 0 &&
  285. s->height < ((s->org_height * 3) / 4)) {
  286. s->interlaced = 1;
  287. s->bottom_field = s->interlace_polarity;
  288. s->picture_ptr->interlaced_frame = 1;
  289. s->picture_ptr->top_field_first = !s->interlace_polarity;
  290. height *= 2;
  291. }
  292. avcodec_set_dimensions(s->avctx, width, height);
  293. s->first_picture = 0;
  294. }
  295. if (s->interlaced && (s->bottom_field == !s->interlace_polarity)) {
  296. if (s->progressive) {
  297. avpriv_request_sample(s->avctx, "progressively coded interlaced picture");
  298. return AVERROR_INVALIDDATA;
  299. }
  300. } else{
  301. if (s->v_max == 1 && s->h_max == 1 && s->lossless==1 && (nb_components==3 || nb_components==4))
  302. s->rgb = 1;
  303. else if (!s->lossless)
  304. s->rgb = 0;
  305. /* XXX: not complete test ! */
  306. pix_fmt_id = (s->h_count[0] << 28) | (s->v_count[0] << 24) |
  307. (s->h_count[1] << 20) | (s->v_count[1] << 16) |
  308. (s->h_count[2] << 12) | (s->v_count[2] << 8) |
  309. (s->h_count[3] << 4) | s->v_count[3];
  310. av_log(s->avctx, AV_LOG_DEBUG, "pix fmt id %x\n", pix_fmt_id);
  311. /* NOTE we do not allocate pictures large enough for the possible
  312. * padding of h/v_count being 4 */
  313. if (!(pix_fmt_id & 0xD0D0D0D0))
  314. pix_fmt_id -= (pix_fmt_id & 0xF0F0F0F0) >> 1;
  315. if (!(pix_fmt_id & 0x0D0D0D0D))
  316. pix_fmt_id -= (pix_fmt_id & 0x0F0F0F0F) >> 1;
  317. switch (pix_fmt_id) {
  318. case 0x11111100:
  319. if (s->rgb)
  320. s->avctx->pix_fmt = AV_PIX_FMT_BGR24;
  321. else {
  322. if (s->component_id[0] == 'Q' && s->component_id[1] == 'F' && s->component_id[2] == 'A') {
  323. s->avctx->pix_fmt = AV_PIX_FMT_GBR24P;
  324. } else {
  325. s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
  326. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  327. }
  328. }
  329. av_assert0(s->nb_components == 3);
  330. break;
  331. case 0x11111111:
  332. if (s->rgb)
  333. s->avctx->pix_fmt = AV_PIX_FMT_ABGR;
  334. else {
  335. s->avctx->pix_fmt = /*s->cs_itu601 ?*/ AV_PIX_FMT_YUVA444P/* : AV_PIX_FMT_YUVJA444P*/;
  336. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  337. }
  338. av_assert0(s->nb_components == 4);
  339. break;
  340. case 0x12121100:
  341. case 0x22122100:
  342. s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
  343. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  344. s->upscale_v = 2;
  345. s->upscale_h = (pix_fmt_id == 0x22122100);
  346. s->chroma_height = s->height;
  347. break;
  348. case 0x21211100:
  349. case 0x22211200:
  350. s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
  351. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  352. s->upscale_v = (pix_fmt_id == 0x22211200);
  353. s->upscale_h = 2;
  354. s->chroma_height = s->height;
  355. break;
  356. case 0x22221100:
  357. s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV444P : AV_PIX_FMT_YUVJ444P;
  358. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  359. s->upscale_v = 2;
  360. s->upscale_h = 2;
  361. s->chroma_height = s->height / 2;
  362. break;
  363. case 0x11000000:
  364. case 0x13000000:
  365. case 0x14000000:
  366. case 0x31000000:
  367. case 0x33000000:
  368. case 0x34000000:
  369. case 0x41000000:
  370. case 0x43000000:
  371. case 0x44000000:
  372. if(s->bits <= 8)
  373. s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  374. else
  375. s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
  376. break;
  377. case 0x12111100:
  378. case 0x22211100:
  379. case 0x22112100:
  380. s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV440P : AV_PIX_FMT_YUVJ440P;
  381. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  382. s->upscale_h = (pix_fmt_id == 0x22211100) * 2 + (pix_fmt_id == 0x22112100);
  383. s->chroma_height = s->height / 2;
  384. break;
  385. case 0x21111100:
  386. s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
  387. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  388. break;
  389. case 0x22121100:
  390. case 0x22111200:
  391. s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV422P : AV_PIX_FMT_YUVJ422P;
  392. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  393. s->upscale_v = (pix_fmt_id == 0x22121100) + 1;
  394. break;
  395. case 0x22111100:
  396. s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_YUVJ420P;
  397. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  398. break;
  399. case 0x41111100:
  400. s->avctx->pix_fmt = s->cs_itu601 ? AV_PIX_FMT_YUV411P : AV_PIX_FMT_YUVJ411P;
  401. s->avctx->color_range = s->cs_itu601 ? AVCOL_RANGE_MPEG : AVCOL_RANGE_JPEG;
  402. break;
  403. default:
  404. av_log(s->avctx, AV_LOG_ERROR, "Unhandled pixel format 0x%x\n", pix_fmt_id);
  405. return AVERROR_PATCHWELCOME;
  406. }
  407. if ((s->upscale_h || s->upscale_v) && s->avctx->lowres) {
  408. av_log(s->avctx, AV_LOG_ERROR, "lowres not supported for weird subsampling\n");
  409. return AVERROR_PATCHWELCOME;
  410. }
  411. if (s->ls) {
  412. s->upscale_h = s->upscale_v = 0;
  413. if (s->nb_components > 1)
  414. s->avctx->pix_fmt = AV_PIX_FMT_RGB24;
  415. else if (s->bits <= 8)
  416. s->avctx->pix_fmt = AV_PIX_FMT_GRAY8;
  417. else
  418. s->avctx->pix_fmt = AV_PIX_FMT_GRAY16;
  419. }
  420. av_frame_unref(s->picture_ptr);
  421. if (ff_get_buffer(s->avctx, s->picture_ptr, AV_GET_BUFFER_FLAG_REF) < 0)
  422. return -1;
  423. s->picture_ptr->pict_type = AV_PICTURE_TYPE_I;
  424. s->picture_ptr->key_frame = 1;
  425. s->got_picture = 1;
  426. for (i = 0; i < 3; i++)
  427. s->linesize[i] = s->picture_ptr->linesize[i] << s->interlaced;
  428. av_dlog(s->avctx, "%d %d %d %d %d %d\n",
  429. s->width, s->height, s->linesize[0], s->linesize[1],
  430. s->interlaced, s->avctx->height);
  431. if (len != (8 + (3 * nb_components)))
  432. av_log(s->avctx, AV_LOG_DEBUG, "decode_sof0: error, len(%d) mismatch\n", len);
  433. }
  434. if (s->rgb && !s->lossless && !s->ls) {
  435. av_log(s->avctx, AV_LOG_ERROR, "Unsupported coding and pixel format combination\n");
  436. return AVERROR_PATCHWELCOME;
  437. }
  438. /* totally blank picture as progressive JPEG will only add details to it */
  439. if (s->progressive) {
  440. int bw = (width + s->h_max * 8 - 1) / (s->h_max * 8);
  441. int bh = (height + s->v_max * 8 - 1) / (s->v_max * 8);
  442. for (i = 0; i < s->nb_components; i++) {
  443. int size = bw * bh * s->h_count[i] * s->v_count[i];
  444. av_freep(&s->blocks[i]);
  445. av_freep(&s->last_nnz[i]);
  446. s->blocks[i] = av_malloc(size * sizeof(**s->blocks));
  447. s->last_nnz[i] = av_mallocz(size * sizeof(**s->last_nnz));
  448. s->block_stride[i] = bw * s->h_count[i];
  449. }
  450. memset(s->coefs_finished, 0, sizeof(s->coefs_finished));
  451. }
  452. return 0;
  453. }
  454. static inline int mjpeg_decode_dc(MJpegDecodeContext *s, int dc_index)
  455. {
  456. int code;
  457. code = get_vlc2(&s->gb, s->vlcs[0][dc_index].table, 9, 2);
  458. if (code < 0 || code > 16) {
  459. av_log(s->avctx, AV_LOG_WARNING,
  460. "mjpeg_decode_dc: bad vlc: %d:%d (%p)\n",
  461. 0, dc_index, &s->vlcs[0][dc_index]);
  462. return 0xffff;
  463. }
  464. if (code)
  465. return get_xbits(&s->gb, code);
  466. else
  467. return 0;
  468. }
  469. /* decode block and dequantize */
  470. static int decode_block(MJpegDecodeContext *s, int16_t *block, int component,
  471. int dc_index, int ac_index, int16_t *quant_matrix)
  472. {
  473. int code, i, j, level, val;
  474. /* DC coef */
  475. val = mjpeg_decode_dc(s, dc_index);
  476. if (val == 0xffff) {
  477. av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
  478. return AVERROR_INVALIDDATA;
  479. }
  480. val = val * quant_matrix[0] + s->last_dc[component];
  481. s->last_dc[component] = val;
  482. block[0] = val;
  483. /* AC coefs */
  484. i = 0;
  485. {OPEN_READER(re, &s->gb);
  486. do {
  487. UPDATE_CACHE(re, &s->gb);
  488. GET_VLC(code, re, &s->gb, s->vlcs[1][ac_index].table, 9, 2);
  489. i += ((unsigned)code) >> 4;
  490. code &= 0xf;
  491. if (code) {
  492. if (code > MIN_CACHE_BITS - 16)
  493. UPDATE_CACHE(re, &s->gb);
  494. {
  495. int cache = GET_CACHE(re, &s->gb);
  496. int sign = (~cache) >> 31;
  497. level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
  498. }
  499. LAST_SKIP_BITS(re, &s->gb, code);
  500. if (i > 63) {
  501. av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
  502. return AVERROR_INVALIDDATA;
  503. }
  504. j = s->scantable.permutated[i];
  505. block[j] = level * quant_matrix[j];
  506. }
  507. } while (i < 63);
  508. CLOSE_READER(re, &s->gb);}
  509. return 0;
  510. }
  511. static int decode_dc_progressive(MJpegDecodeContext *s, int16_t *block,
  512. int component, int dc_index,
  513. int16_t *quant_matrix, int Al)
  514. {
  515. int val;
  516. s->dsp.clear_block(block);
  517. val = mjpeg_decode_dc(s, dc_index);
  518. if (val == 0xffff) {
  519. av_log(s->avctx, AV_LOG_ERROR, "error dc\n");
  520. return AVERROR_INVALIDDATA;
  521. }
  522. val = (val * quant_matrix[0] << Al) + s->last_dc[component];
  523. s->last_dc[component] = val;
  524. block[0] = val;
  525. return 0;
  526. }
  527. /* decode block and dequantize - progressive JPEG version */
  528. static int decode_block_progressive(MJpegDecodeContext *s, int16_t *block,
  529. uint8_t *last_nnz, int ac_index,
  530. int16_t *quant_matrix,
  531. int ss, int se, int Al, int *EOBRUN)
  532. {
  533. int code, i, j, level, val, run;
  534. if (*EOBRUN) {
  535. (*EOBRUN)--;
  536. return 0;
  537. }
  538. {
  539. OPEN_READER(re, &s->gb);
  540. for (i = ss; ; i++) {
  541. UPDATE_CACHE(re, &s->gb);
  542. GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
  543. run = ((unsigned) code) >> 4;
  544. code &= 0xF;
  545. if (code) {
  546. i += run;
  547. if (code > MIN_CACHE_BITS - 16)
  548. UPDATE_CACHE(re, &s->gb);
  549. {
  550. int cache = GET_CACHE(re, &s->gb);
  551. int sign = (~cache) >> 31;
  552. level = (NEG_USR32(sign ^ cache,code) ^ sign) - sign;
  553. }
  554. LAST_SKIP_BITS(re, &s->gb, code);
  555. if (i >= se) {
  556. if (i == se) {
  557. j = s->scantable.permutated[se];
  558. block[j] = level * quant_matrix[j] << Al;
  559. break;
  560. }
  561. av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i);
  562. return AVERROR_INVALIDDATA;
  563. }
  564. j = s->scantable.permutated[i];
  565. block[j] = level * quant_matrix[j] << Al;
  566. } else {
  567. if (run == 0xF) {// ZRL - skip 15 coefficients
  568. i += 15;
  569. if (i >= se) {
  570. av_log(s->avctx, AV_LOG_ERROR, "ZRL overflow: %d\n", i);
  571. return AVERROR_INVALIDDATA;
  572. }
  573. } else {
  574. val = (1 << run);
  575. if (run) {
  576. UPDATE_CACHE(re, &s->gb);
  577. val += NEG_USR32(GET_CACHE(re, &s->gb), run);
  578. LAST_SKIP_BITS(re, &s->gb, run);
  579. }
  580. *EOBRUN = val - 1;
  581. break;
  582. }
  583. }
  584. }
  585. CLOSE_READER(re, &s->gb);
  586. }
  587. if (i > *last_nnz)
  588. *last_nnz = i;
  589. return 0;
  590. }
  591. #define REFINE_BIT(j) { \
  592. UPDATE_CACHE(re, &s->gb); \
  593. sign = block[j] >> 15; \
  594. block[j] += SHOW_UBITS(re, &s->gb, 1) * \
  595. ((quant_matrix[j] ^ sign) - sign) << Al; \
  596. LAST_SKIP_BITS(re, &s->gb, 1); \
  597. }
  598. #define ZERO_RUN \
  599. for (; ; i++) { \
  600. if (i > last) { \
  601. i += run; \
  602. if (i > se) { \
  603. av_log(s->avctx, AV_LOG_ERROR, "error count: %d\n", i); \
  604. return -1; \
  605. } \
  606. break; \
  607. } \
  608. j = s->scantable.permutated[i]; \
  609. if (block[j]) \
  610. REFINE_BIT(j) \
  611. else if (run-- == 0) \
  612. break; \
  613. }
  614. /* decode block and dequantize - progressive JPEG refinement pass */
  615. static int decode_block_refinement(MJpegDecodeContext *s, int16_t *block,
  616. uint8_t *last_nnz,
  617. int ac_index, int16_t *quant_matrix,
  618. int ss, int se, int Al, int *EOBRUN)
  619. {
  620. int code, i = ss, j, sign, val, run;
  621. int last = FFMIN(se, *last_nnz);
  622. OPEN_READER(re, &s->gb);
  623. if (*EOBRUN) {
  624. (*EOBRUN)--;
  625. } else {
  626. for (; ; i++) {
  627. UPDATE_CACHE(re, &s->gb);
  628. GET_VLC(code, re, &s->gb, s->vlcs[2][ac_index].table, 9, 2);
  629. if (code & 0xF) {
  630. run = ((unsigned) code) >> 4;
  631. UPDATE_CACHE(re, &s->gb);
  632. val = SHOW_UBITS(re, &s->gb, 1);
  633. LAST_SKIP_BITS(re, &s->gb, 1);
  634. ZERO_RUN;
  635. j = s->scantable.permutated[i];
  636. val--;
  637. block[j] = ((quant_matrix[j]^val) - val) << Al;
  638. if (i == se) {
  639. if (i > *last_nnz)
  640. *last_nnz = i;
  641. CLOSE_READER(re, &s->gb);
  642. return 0;
  643. }
  644. } else {
  645. run = ((unsigned) code) >> 4;
  646. if (run == 0xF) {
  647. ZERO_RUN;
  648. } else {
  649. val = run;
  650. run = (1 << run);
  651. if (val) {
  652. UPDATE_CACHE(re, &s->gb);
  653. run += SHOW_UBITS(re, &s->gb, val);
  654. LAST_SKIP_BITS(re, &s->gb, val);
  655. }
  656. *EOBRUN = run - 1;
  657. break;
  658. }
  659. }
  660. }
  661. if (i > *last_nnz)
  662. *last_nnz = i;
  663. }
  664. for (; i <= last; i++) {
  665. j = s->scantable.permutated[i];
  666. if (block[j])
  667. REFINE_BIT(j)
  668. }
  669. CLOSE_READER(re, &s->gb);
  670. return 0;
  671. }
  672. #undef REFINE_BIT
  673. #undef ZERO_RUN
  674. static int handle_rstn(MJpegDecodeContext *s, int nb_components)
  675. {
  676. int i;
  677. int reset = 0;
  678. if (s->restart_interval) {
  679. s->restart_count--;
  680. if(s->restart_count == 0 && s->avctx->codec_id == AV_CODEC_ID_THP){
  681. align_get_bits(&s->gb);
  682. for (i = 0; i < nb_components; i++) /* reset dc */
  683. s->last_dc[i] = (4 << s->bits);
  684. }
  685. i = 8 + ((-get_bits_count(&s->gb)) & 7);
  686. /* skip RSTn */
  687. if (s->restart_count == 0) {
  688. if( show_bits(&s->gb, i) == (1 << i) - 1
  689. || show_bits(&s->gb, i) == 0xFF) {
  690. int pos = get_bits_count(&s->gb);
  691. align_get_bits(&s->gb);
  692. while (get_bits_left(&s->gb) >= 8 && show_bits(&s->gb, 8) == 0xFF)
  693. skip_bits(&s->gb, 8);
  694. if (get_bits_left(&s->gb) >= 8 && (get_bits(&s->gb, 8) & 0xF8) == 0xD0) {
  695. for (i = 0; i < nb_components; i++) /* reset dc */
  696. s->last_dc[i] = (4 << s->bits);
  697. reset = 1;
  698. } else
  699. skip_bits_long(&s->gb, pos - get_bits_count(&s->gb));
  700. }
  701. }
  702. }
  703. return reset;
  704. }
  705. static int ljpeg_decode_rgb_scan(MJpegDecodeContext *s, int nb_components, int predictor, int point_transform)
  706. {
  707. int i, mb_x, mb_y;
  708. uint16_t (*buffer)[4];
  709. int left[4], top[4], topleft[4];
  710. const int linesize = s->linesize[0];
  711. const int mask = ((1 << s->bits) - 1) << point_transform;
  712. int resync_mb_y = 0;
  713. int resync_mb_x = 0;
  714. if (s->nb_components != 3 && s->nb_components != 4)
  715. return AVERROR_INVALIDDATA;
  716. if (s->v_max != 1 || s->h_max != 1 || !s->lossless)
  717. return AVERROR_INVALIDDATA;
  718. s->restart_count = s->restart_interval;
  719. av_fast_malloc(&s->ljpeg_buffer, &s->ljpeg_buffer_size,
  720. (unsigned)s->mb_width * 4 * sizeof(s->ljpeg_buffer[0][0]));
  721. buffer = s->ljpeg_buffer;
  722. for (i = 0; i < 4; i++)
  723. buffer[0][i] = 1 << (s->bits - 1);
  724. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  725. uint8_t *ptr = s->picture.data[0] + (linesize * mb_y);
  726. if (s->interlaced && s->bottom_field)
  727. ptr += linesize >> 1;
  728. for (i = 0; i < 4; i++)
  729. top[i] = left[i] = topleft[i] = buffer[0][i];
  730. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  731. int modified_predictor = predictor;
  732. if (s->restart_interval && !s->restart_count){
  733. s->restart_count = s->restart_interval;
  734. resync_mb_x = mb_x;
  735. resync_mb_y = mb_y;
  736. for(i=0; i<4; i++)
  737. top[i] = left[i]= topleft[i]= 1 << (s->bits - 1);
  738. }
  739. if (mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || !mb_x)
  740. modified_predictor = 1;
  741. for (i=0;i<nb_components;i++) {
  742. int pred, dc;
  743. topleft[i] = top[i];
  744. top[i] = buffer[mb_x][i];
  745. PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
  746. dc = mjpeg_decode_dc(s, s->dc_index[i]);
  747. if(dc == 0xFFFF)
  748. return -1;
  749. left[i] = buffer[mb_x][i] =
  750. mask & (pred + (dc << point_transform));
  751. }
  752. if (s->restart_interval && !--s->restart_count) {
  753. align_get_bits(&s->gb);
  754. skip_bits(&s->gb, 16); /* skip RSTn */
  755. }
  756. }
  757. if (s->nb_components == 4) {
  758. for(i=0; i<nb_components; i++) {
  759. int c= s->comp_index[i];
  760. for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
  761. ptr[4*mb_x+3-c] = buffer[mb_x][i];
  762. }
  763. }
  764. } else if (s->rct) {
  765. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  766. ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2] - 0x200) >> 2);
  767. ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
  768. ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
  769. }
  770. } else if (s->pegasus_rct) {
  771. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  772. ptr[3*mb_x + 1] = buffer[mb_x][0] - ((buffer[mb_x][1] + buffer[mb_x][2]) >> 2);
  773. ptr[3*mb_x + 0] = buffer[mb_x][1] + ptr[3*mb_x + 1];
  774. ptr[3*mb_x + 2] = buffer[mb_x][2] + ptr[3*mb_x + 1];
  775. }
  776. } else {
  777. for(i=0; i<nb_components; i++) {
  778. int c= s->comp_index[i];
  779. for(mb_x = 0; mb_x < s->mb_width; mb_x++) {
  780. ptr[3*mb_x+2-c] = buffer[mb_x][i];
  781. }
  782. }
  783. }
  784. }
  785. return 0;
  786. }
  787. static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
  788. int point_transform, int nb_components)
  789. {
  790. int i, mb_x, mb_y, mask;
  791. int bits= (s->bits+7)&~7;
  792. int resync_mb_y = 0;
  793. int resync_mb_x = 0;
  794. point_transform += bits - s->bits;
  795. mask = ((1 << s->bits) - 1) << point_transform;
  796. av_assert0(nb_components>=1 && nb_components<=4);
  797. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  798. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  799. if (s->restart_interval && !s->restart_count){
  800. s->restart_count = s->restart_interval;
  801. resync_mb_x = mb_x;
  802. resync_mb_y = mb_y;
  803. }
  804. if(!mb_x || mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x || s->interlaced){
  805. int toprow = mb_y == resync_mb_y || mb_y == resync_mb_y+1 && mb_x < resync_mb_x;
  806. int leftcol = !mb_x || mb_y == resync_mb_y && mb_x == resync_mb_x;
  807. for (i = 0; i < nb_components; i++) {
  808. uint8_t *ptr;
  809. uint16_t *ptr16;
  810. int n, h, v, x, y, c, j, linesize;
  811. n = s->nb_blocks[i];
  812. c = s->comp_index[i];
  813. h = s->h_scount[i];
  814. v = s->v_scount[i];
  815. x = 0;
  816. y = 0;
  817. linesize= s->linesize[c];
  818. if(bits>8) linesize /= 2;
  819. for(j=0; j<n; j++) {
  820. int pred, dc;
  821. dc = mjpeg_decode_dc(s, s->dc_index[i]);
  822. if(dc == 0xFFFF)
  823. return -1;
  824. if(bits<=8){
  825. ptr = s->picture.data[c] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
  826. if(y==0 && toprow){
  827. if(x==0 && leftcol){
  828. pred= 1 << (bits - 1);
  829. }else{
  830. pred= ptr[-1];
  831. }
  832. }else{
  833. if(x==0 && leftcol){
  834. pred= ptr[-linesize];
  835. }else{
  836. PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
  837. }
  838. }
  839. if (s->interlaced && s->bottom_field)
  840. ptr += linesize >> 1;
  841. pred &= mask;
  842. *ptr= pred + (dc << point_transform);
  843. }else{
  844. ptr16 = (uint16_t*)(s->picture.data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
  845. if(y==0 && toprow){
  846. if(x==0 && leftcol){
  847. pred= 1 << (bits - 1);
  848. }else{
  849. pred= ptr16[-1];
  850. }
  851. }else{
  852. if(x==0 && leftcol){
  853. pred= ptr16[-linesize];
  854. }else{
  855. PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
  856. }
  857. }
  858. if (s->interlaced && s->bottom_field)
  859. ptr16 += linesize >> 1;
  860. pred &= mask;
  861. *ptr16= pred + (dc << point_transform);
  862. }
  863. if (++x == h) {
  864. x = 0;
  865. y++;
  866. }
  867. }
  868. }
  869. } else {
  870. for (i = 0; i < nb_components; i++) {
  871. uint8_t *ptr;
  872. uint16_t *ptr16;
  873. int n, h, v, x, y, c, j, linesize, dc;
  874. n = s->nb_blocks[i];
  875. c = s->comp_index[i];
  876. h = s->h_scount[i];
  877. v = s->v_scount[i];
  878. x = 0;
  879. y = 0;
  880. linesize = s->linesize[c];
  881. if(bits>8) linesize /= 2;
  882. for (j = 0; j < n; j++) {
  883. int pred;
  884. dc = mjpeg_decode_dc(s, s->dc_index[i]);
  885. if(dc == 0xFFFF)
  886. return -1;
  887. if(bits<=8){
  888. ptr = s->picture.data[c] +
  889. (linesize * (v * mb_y + y)) +
  890. (h * mb_x + x); //FIXME optimize this crap
  891. PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor);
  892. pred &= mask;
  893. *ptr = pred + (dc << point_transform);
  894. }else{
  895. ptr16 = (uint16_t*)(s->picture.data[c] + 2*(linesize * (v * mb_y + y)) + 2*(h * mb_x + x)); //FIXME optimize this crap
  896. PREDICT(pred, ptr16[-linesize-1], ptr16[-linesize], ptr16[-1], predictor);
  897. pred &= mask;
  898. *ptr16= pred + (dc << point_transform);
  899. }
  900. if (++x == h) {
  901. x = 0;
  902. y++;
  903. }
  904. }
  905. }
  906. }
  907. if (s->restart_interval && !--s->restart_count) {
  908. align_get_bits(&s->gb);
  909. skip_bits(&s->gb, 16); /* skip RSTn */
  910. }
  911. }
  912. }
  913. return 0;
  914. }
  915. static av_always_inline void mjpeg_copy_block(MJpegDecodeContext *s,
  916. uint8_t *dst, const uint8_t *src,
  917. int linesize, int lowres)
  918. {
  919. switch (lowres) {
  920. case 0: s->hdsp.put_pixels_tab[1][0](dst, src, linesize, 8);
  921. break;
  922. case 1: copy_block4(dst, src, linesize, linesize, 4);
  923. break;
  924. case 2: copy_block2(dst, src, linesize, linesize, 2);
  925. break;
  926. case 3: *dst = *src;
  927. break;
  928. }
  929. }
  930. static void shift_output(MJpegDecodeContext *s, uint8_t *ptr, int linesize)
  931. {
  932. int block_x, block_y;
  933. if (s->bits > 8) {
  934. for (block_y=0; block_y<8; block_y++)
  935. for (block_x=0; block_x<8; block_x++)
  936. *(uint16_t*)(ptr + 2*block_x + block_y*linesize) <<= 16 - s->bits;
  937. } else {
  938. for (block_y=0; block_y<8; block_y++)
  939. for (block_x=0; block_x<8; block_x++)
  940. *(ptr + block_x + block_y*linesize) <<= 8 - s->bits;
  941. }
  942. }
  943. static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
  944. int Al, const uint8_t *mb_bitmask,
  945. const AVFrame *reference)
  946. {
  947. int i, mb_x, mb_y;
  948. uint8_t *data[MAX_COMPONENTS];
  949. const uint8_t *reference_data[MAX_COMPONENTS];
  950. int linesize[MAX_COMPONENTS];
  951. GetBitContext mb_bitmask_gb;
  952. int bytes_per_pixel = 1 + (s->bits > 8);
  953. if (mb_bitmask)
  954. init_get_bits(&mb_bitmask_gb, mb_bitmask, s->mb_width * s->mb_height);
  955. if (s->flipped && s->avctx->lowres) {
  956. av_log(s->avctx, AV_LOG_ERROR, "Can not flip image with lowres\n");
  957. s->flipped = 0;
  958. }
  959. s->restart_count = 0;
  960. for (i = 0; i < nb_components; i++) {
  961. int c = s->comp_index[i];
  962. data[c] = s->picture_ptr->data[c];
  963. reference_data[c] = reference ? reference->data[c] : NULL;
  964. linesize[c] = s->linesize[c];
  965. s->coefs_finished[c] |= 1;
  966. if (s->flipped && !(s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
  967. // picture should be flipped upside-down for this codec
  968. int offset = (linesize[c] * (s->v_scount[i] *
  969. (8 * s->mb_height - ((s->height / s->v_max) & 7)) - 1));
  970. data[c] += offset;
  971. reference_data[c] += offset;
  972. linesize[c] *= -1;
  973. }
  974. }
  975. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  976. for (mb_x = 0; mb_x < s->mb_width; mb_x++) {
  977. const int copy_mb = mb_bitmask && !get_bits1(&mb_bitmask_gb);
  978. if (s->restart_interval && !s->restart_count)
  979. s->restart_count = s->restart_interval;
  980. if (get_bits_left(&s->gb) < 0) {
  981. av_log(s->avctx, AV_LOG_ERROR, "overread %d\n",
  982. -get_bits_left(&s->gb));
  983. return AVERROR_INVALIDDATA;
  984. }
  985. for (i = 0; i < nb_components; i++) {
  986. uint8_t *ptr;
  987. int n, h, v, x, y, c, j;
  988. int block_offset;
  989. n = s->nb_blocks[i];
  990. c = s->comp_index[i];
  991. h = s->h_scount[i];
  992. v = s->v_scount[i];
  993. x = 0;
  994. y = 0;
  995. for (j = 0; j < n; j++) {
  996. block_offset = (((linesize[c] * (v * mb_y + y) * 8) +
  997. (h * mb_x + x) * 8 * bytes_per_pixel) >> s->avctx->lowres);
  998. if (s->interlaced && s->bottom_field)
  999. block_offset += linesize[c] >> 1;
  1000. ptr = data[c] + block_offset;
  1001. if (!s->progressive) {
  1002. if (copy_mb)
  1003. mjpeg_copy_block(s, ptr, reference_data[c] + block_offset,
  1004. linesize[c], s->avctx->lowres);
  1005. else {
  1006. s->dsp.clear_block(s->block);
  1007. if (decode_block(s, s->block, i,
  1008. s->dc_index[i], s->ac_index[i],
  1009. s->quant_matrixes[s->quant_sindex[i]]) < 0) {
  1010. av_log(s->avctx, AV_LOG_ERROR,
  1011. "error y=%d x=%d\n", mb_y, mb_x);
  1012. return AVERROR_INVALIDDATA;
  1013. }
  1014. s->dsp.idct_put(ptr, linesize[c], s->block);
  1015. if (s->bits & 7)
  1016. shift_output(s, ptr, linesize[c]);
  1017. }
  1018. } else {
  1019. int block_idx = s->block_stride[c] * (v * mb_y + y) +
  1020. (h * mb_x + x);
  1021. int16_t *block = s->blocks[c][block_idx];
  1022. if (Ah)
  1023. block[0] += get_bits1(&s->gb) *
  1024. s->quant_matrixes[s->quant_sindex[i]][0] << Al;
  1025. else if (decode_dc_progressive(s, block, i, s->dc_index[i],
  1026. s->quant_matrixes[s->quant_sindex[i]],
  1027. Al) < 0) {
  1028. av_log(s->avctx, AV_LOG_ERROR,
  1029. "error y=%d x=%d\n", mb_y, mb_x);
  1030. return AVERROR_INVALIDDATA;
  1031. }
  1032. }
  1033. av_dlog(s->avctx, "mb: %d %d processed\n", mb_y, mb_x);
  1034. av_dlog(s->avctx, "%d %d %d %d %d %d %d %d \n",
  1035. mb_x, mb_y, x, y, c, s->bottom_field,
  1036. (v * mb_y + y) * 8, (h * mb_x + x) * 8);
  1037. if (++x == h) {
  1038. x = 0;
  1039. y++;
  1040. }
  1041. }
  1042. }
  1043. handle_rstn(s, nb_components);
  1044. }
  1045. }
  1046. return 0;
  1047. }
  1048. static int mjpeg_decode_scan_progressive_ac(MJpegDecodeContext *s, int ss,
  1049. int se, int Ah, int Al)
  1050. {
  1051. int mb_x, mb_y;
  1052. int EOBRUN = 0;
  1053. int c = s->comp_index[0];
  1054. uint8_t *data = s->picture.data[c];
  1055. int linesize = s->linesize[c];
  1056. int last_scan = 0;
  1057. int16_t *quant_matrix = s->quant_matrixes[s->quant_sindex[0]];
  1058. av_assert0(ss>=0 && Ah>=0 && Al>=0);
  1059. if (se < ss || se > 63) {
  1060. av_log(s->avctx, AV_LOG_ERROR, "SS/SE %d/%d is invalid\n", ss, se);
  1061. return AVERROR_INVALIDDATA;
  1062. }
  1063. if (!Al) {
  1064. s->coefs_finished[c] |= (1LL << (se + 1)) - (1LL << ss);
  1065. last_scan = !~s->coefs_finished[c];
  1066. }
  1067. if (s->interlaced && s->bottom_field)
  1068. data += linesize >> 1;
  1069. s->restart_count = 0;
  1070. for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
  1071. uint8_t *ptr = data + (mb_y * linesize * 8 >> s->avctx->lowres);
  1072. int block_idx = mb_y * s->block_stride[c];
  1073. int16_t (*block)[64] = &s->blocks[c][block_idx];
  1074. uint8_t *last_nnz = &s->last_nnz[c][block_idx];
  1075. for (mb_x = 0; mb_x < s->mb_width; mb_x++, block++, last_nnz++) {
  1076. int ret;
  1077. if (s->restart_interval && !s->restart_count)
  1078. s->restart_count = s->restart_interval;
  1079. if (Ah)
  1080. ret = decode_block_refinement(s, *block, last_nnz, s->ac_index[0],
  1081. quant_matrix, ss, se, Al, &EOBRUN);
  1082. else
  1083. ret = decode_block_progressive(s, *block, last_nnz, s->ac_index[0],
  1084. quant_matrix, ss, se, Al, &EOBRUN);
  1085. if (ret < 0) {
  1086. av_log(s->avctx, AV_LOG_ERROR,
  1087. "error y=%d x=%d\n", mb_y, mb_x);
  1088. return AVERROR_INVALIDDATA;
  1089. }
  1090. if (last_scan) {
  1091. s->dsp.idct_put(ptr, linesize, *block);
  1092. ptr += 8 >> s->avctx->lowres;
  1093. }
  1094. if (handle_rstn(s, 0))
  1095. EOBRUN = 0;
  1096. }
  1097. }
  1098. return 0;
  1099. }
  1100. int ff_mjpeg_decode_sos(MJpegDecodeContext *s, const uint8_t *mb_bitmask,
  1101. const AVFrame *reference)
  1102. {
  1103. int len, nb_components, i, h, v, predictor, point_transform;
  1104. int index, id, ret;
  1105. const int block_size = s->lossless ? 1 : 8;
  1106. int ilv, prev_shift;
  1107. if (!s->got_picture) {
  1108. av_log(s->avctx, AV_LOG_WARNING,
  1109. "Can not process SOS before SOF, skipping\n");
  1110. return -1;
  1111. }
  1112. av_assert0(s->picture_ptr->data[0]);
  1113. /* XXX: verify len field validity */
  1114. len = get_bits(&s->gb, 16);
  1115. nb_components = get_bits(&s->gb, 8);
  1116. if (nb_components == 0 || nb_components > MAX_COMPONENTS) {
  1117. av_log(s->avctx, AV_LOG_ERROR,
  1118. "decode_sos: nb_components (%d) unsupported\n", nb_components);
  1119. return AVERROR_PATCHWELCOME;
  1120. }
  1121. if (len != 6 + 2 * nb_components) {
  1122. av_log(s->avctx, AV_LOG_ERROR, "decode_sos: invalid len (%d)\n", len);
  1123. return AVERROR_INVALIDDATA;
  1124. }
  1125. for (i = 0; i < nb_components; i++) {
  1126. id = get_bits(&s->gb, 8) - 1;
  1127. av_log(s->avctx, AV_LOG_DEBUG, "component: %d\n", id);
  1128. /* find component index */
  1129. for (index = 0; index < s->nb_components; index++)
  1130. if (id == s->component_id[index])
  1131. break;
  1132. if (index == s->nb_components) {
  1133. av_log(s->avctx, AV_LOG_ERROR,
  1134. "decode_sos: index(%d) out of components\n", index);
  1135. return AVERROR_INVALIDDATA;
  1136. }
  1137. /* Metasoft MJPEG codec has Cb and Cr swapped */
  1138. if (s->avctx->codec_tag == MKTAG('M', 'T', 'S', 'J')
  1139. && nb_components == 3 && s->nb_components == 3 && i)
  1140. index = 3 - i;
  1141. s->quant_sindex[i] = s->quant_index[index];
  1142. s->nb_blocks[i] = s->h_count[index] * s->v_count[index];
  1143. s->h_scount[i] = s->h_count[index];
  1144. s->v_scount[i] = s->v_count[index];
  1145. if(nb_components == 3 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
  1146. index = (i+2)%3;
  1147. if(nb_components == 1 && s->nb_components == 3 && s->avctx->pix_fmt == AV_PIX_FMT_GBR24P)
  1148. index = (index+2)%3;
  1149. s->comp_index[i] = index;
  1150. s->dc_index[i] = get_bits(&s->gb, 4);
  1151. s->ac_index[i] = get_bits(&s->gb, 4);
  1152. if (s->dc_index[i] < 0 || s->ac_index[i] < 0 ||
  1153. s->dc_index[i] >= 4 || s->ac_index[i] >= 4)
  1154. goto out_of_range;
  1155. if (!s->vlcs[0][s->dc_index[i]].table || !(s->progressive ? s->vlcs[2][s->ac_index[0]].table : s->vlcs[1][s->ac_index[i]].table))
  1156. goto out_of_range;
  1157. }
  1158. predictor = get_bits(&s->gb, 8); /* JPEG Ss / lossless JPEG predictor /JPEG-LS NEAR */
  1159. ilv = get_bits(&s->gb, 8); /* JPEG Se / JPEG-LS ILV */
  1160. if(s->avctx->codec_tag != AV_RL32("CJPG")){
  1161. prev_shift = get_bits(&s->gb, 4); /* Ah */
  1162. point_transform = get_bits(&s->gb, 4); /* Al */
  1163. }else
  1164. prev_shift = point_transform = 0;
  1165. if (nb_components > 1) {
  1166. /* interleaved stream */
  1167. s->mb_width = (s->width + s->h_max * block_size - 1) / (s->h_max * block_size);
  1168. s->mb_height = (s->height + s->v_max * block_size - 1) / (s->v_max * block_size);
  1169. } else if (!s->ls) { /* skip this for JPEG-LS */
  1170. h = s->h_max / s->h_scount[0];
  1171. v = s->v_max / s->v_scount[0];
  1172. s->mb_width = (s->width + h * block_size - 1) / (h * block_size);
  1173. s->mb_height = (s->height + v * block_size - 1) / (v * block_size);
  1174. s->nb_blocks[0] = 1;
  1175. s->h_scount[0] = 1;
  1176. s->v_scount[0] = 1;
  1177. }
  1178. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  1179. av_log(s->avctx, AV_LOG_DEBUG, "%s %s p:%d >>:%d ilv:%d bits:%d skip:%d %s comp:%d\n",
  1180. s->lossless ? "lossless" : "sequential DCT", s->rgb ? "RGB" : "",
  1181. predictor, point_transform, ilv, s->bits, s->mjpb_skiptosod,
  1182. s->pegasus_rct ? "PRCT" : (s->rct ? "RCT" : ""), nb_components);
  1183. /* mjpeg-b can have padding bytes between sos and image data, skip them */
  1184. for (i = s->mjpb_skiptosod; i > 0; i--)
  1185. skip_bits(&s->gb, 8);
  1186. next_field:
  1187. for (i = 0; i < nb_components; i++)
  1188. s->last_dc[i] = (4 << s->bits);
  1189. if (s->lossless) {
  1190. av_assert0(s->picture_ptr == &s->picture);
  1191. if (CONFIG_JPEGLS_DECODER && s->ls) {
  1192. // for () {
  1193. // reset_ls_coding_parameters(s, 0);
  1194. if ((ret = ff_jpegls_decode_picture(s, predictor,
  1195. point_transform, ilv)) < 0)
  1196. return ret;
  1197. } else {
  1198. if (s->rgb) {
  1199. if ((ret = ljpeg_decode_rgb_scan(s, nb_components, predictor, point_transform)) < 0)
  1200. return ret;
  1201. } else {
  1202. if ((ret = ljpeg_decode_yuv_scan(s, predictor,
  1203. point_transform,
  1204. nb_components)) < 0)
  1205. return ret;
  1206. }
  1207. }
  1208. } else {
  1209. if (s->progressive && predictor) {
  1210. av_assert0(s->picture_ptr == &s->picture);
  1211. if ((ret = mjpeg_decode_scan_progressive_ac(s, predictor,
  1212. ilv, prev_shift,
  1213. point_transform)) < 0)
  1214. return ret;
  1215. } else {
  1216. if ((ret = mjpeg_decode_scan(s, nb_components,
  1217. prev_shift, point_transform,
  1218. mb_bitmask, reference)) < 0)
  1219. return ret;
  1220. }
  1221. }
  1222. if (s->interlaced &&
  1223. get_bits_left(&s->gb) > 32 &&
  1224. show_bits(&s->gb, 8) == 0xFF) {
  1225. GetBitContext bak = s->gb;
  1226. align_get_bits(&bak);
  1227. if (show_bits(&bak, 16) == 0xFFD1) {
  1228. av_log(s->avctx, AV_LOG_DEBUG, "AVRn interlaced picture marker found\n");
  1229. s->gb = bak;
  1230. skip_bits(&s->gb, 16);
  1231. s->bottom_field ^= 1;
  1232. goto next_field;
  1233. }
  1234. }
  1235. emms_c();
  1236. return 0;
  1237. out_of_range:
  1238. av_log(s->avctx, AV_LOG_ERROR, "decode_sos: ac/dc index out of range\n");
  1239. return AVERROR_INVALIDDATA;
  1240. }
  1241. static int mjpeg_decode_dri(MJpegDecodeContext *s)
  1242. {
  1243. if (get_bits(&s->gb, 16) != 4)
  1244. return AVERROR_INVALIDDATA;
  1245. s->restart_interval = get_bits(&s->gb, 16);
  1246. s->restart_count = 0;
  1247. av_log(s->avctx, AV_LOG_DEBUG, "restart interval: %d\n",
  1248. s->restart_interval);
  1249. return 0;
  1250. }
  1251. static int mjpeg_decode_app(MJpegDecodeContext *s)
  1252. {
  1253. int len, id, i;
  1254. len = get_bits(&s->gb, 16);
  1255. if (len < 5)
  1256. return AVERROR_INVALIDDATA;
  1257. if (8 * len > get_bits_left(&s->gb))
  1258. return AVERROR_INVALIDDATA;
  1259. id = get_bits_long(&s->gb, 32);
  1260. len -= 6;
  1261. if (s->avctx->debug & FF_DEBUG_STARTCODE)
  1262. av_log(s->avctx, AV_LOG_DEBUG, "APPx %8X len=%d\n", id, len);
  1263. /* Buggy AVID, it puts EOI only at every 10th frame. */
  1264. /* Also, this fourcc is used by non-avid files too, it holds some
  1265. information, but it's always present in AVID-created files. */
  1266. if (id == AV_RB32("AVI1")) {
  1267. /* structure:
  1268. 4bytes AVI1
  1269. 1bytes polarity
  1270. 1bytes always zero
  1271. 4bytes field_size
  1272. 4bytes field_size_less_padding
  1273. */
  1274. s->buggy_avid = 1;
  1275. i = get_bits(&s->gb, 8); len--;
  1276. av_log(s->avctx, AV_LOG_DEBUG, "polarity %d\n", i);
  1277. #if 0
  1278. skip_bits(&s->gb, 8);
  1279. skip_bits(&s->gb, 32);
  1280. skip_bits(&s->gb, 32);
  1281. len -= 10;
  1282. #endif
  1283. goto out;
  1284. }
  1285. // len -= 2;
  1286. if (id == AV_RB32("JFIF")) {
  1287. int t_w, t_h, v1, v2;
  1288. skip_bits(&s->gb, 8); /* the trailing zero-byte */
  1289. v1 = get_bits(&s->gb, 8);
  1290. v2 = get_bits(&s->gb, 8);
  1291. skip_bits(&s->gb, 8);
  1292. s->avctx->sample_aspect_ratio.num = get_bits(&s->gb, 16);
  1293. s->avctx->sample_aspect_ratio.den = get_bits(&s->gb, 16);
  1294. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  1295. av_log(s->avctx, AV_LOG_INFO,
  1296. "mjpeg: JFIF header found (version: %x.%x) SAR=%d/%d\n",
  1297. v1, v2,
  1298. s->avctx->sample_aspect_ratio.num,
  1299. s->avctx->sample_aspect_ratio.den);
  1300. t_w = get_bits(&s->gb, 8);
  1301. t_h = get_bits(&s->gb, 8);
  1302. if (t_w && t_h) {
  1303. /* skip thumbnail */
  1304. if (len -10 - (t_w * t_h * 3) > 0)
  1305. len -= t_w * t_h * 3;
  1306. }
  1307. len -= 10;
  1308. goto out;
  1309. }
  1310. if (id == AV_RB32("Adob") && (get_bits(&s->gb, 8) == 'e')) {
  1311. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  1312. av_log(s->avctx, AV_LOG_INFO, "mjpeg: Adobe header found\n");
  1313. skip_bits(&s->gb, 16); /* version */
  1314. skip_bits(&s->gb, 16); /* flags0 */
  1315. skip_bits(&s->gb, 16); /* flags1 */
  1316. skip_bits(&s->gb, 8); /* transform */
  1317. len -= 7;
  1318. goto out;
  1319. }
  1320. if (id == AV_RB32("LJIF")) {
  1321. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  1322. av_log(s->avctx, AV_LOG_INFO,
  1323. "Pegasus lossless jpeg header found\n");
  1324. skip_bits(&s->gb, 16); /* version ? */
  1325. skip_bits(&s->gb, 16); /* unknown always 0? */
  1326. skip_bits(&s->gb, 16); /* unknown always 0? */
  1327. skip_bits(&s->gb, 16); /* unknown always 0? */
  1328. switch (i=get_bits(&s->gb, 8)) {
  1329. case 1:
  1330. s->rgb = 1;
  1331. s->pegasus_rct = 0;
  1332. break;
  1333. case 2:
  1334. s->rgb = 1;
  1335. s->pegasus_rct = 1;
  1336. break;
  1337. default:
  1338. av_log(s->avctx, AV_LOG_ERROR, "unknown colorspace %d\n", i);
  1339. }
  1340. len -= 9;
  1341. goto out;
  1342. }
  1343. if (id == AV_RL32("colr") && len > 0) {
  1344. s->colr = get_bits(&s->gb, 8);
  1345. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  1346. av_log(s->avctx, AV_LOG_INFO, "COLR %d\n", s->colr);
  1347. len --;
  1348. goto out;
  1349. }
  1350. if (id == AV_RL32("xfrm") && len > 0) {
  1351. s->xfrm = get_bits(&s->gb, 8);
  1352. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  1353. av_log(s->avctx, AV_LOG_INFO, "XFRM %d\n", s->xfrm);
  1354. len --;
  1355. goto out;
  1356. }
  1357. /* EXIF metadata */
  1358. if (s->start_code == APP1 && id == AV_RB32("Exif")) {
  1359. GetByteContext gbytes;
  1360. int ret, le, ifd_offset, bytes_read;
  1361. const uint8_t *aligned;
  1362. skip_bits(&s->gb, 16); // skip padding
  1363. len -= 2;
  1364. // init byte wise reading
  1365. aligned = align_get_bits(&s->gb);
  1366. bytestream2_init(&gbytes, aligned, len);
  1367. // read TIFF header
  1368. ret = ff_tdecode_header(&gbytes, &le, &ifd_offset);
  1369. if (ret) {
  1370. av_log(s->avctx, AV_LOG_ERROR, "mjpeg: invalid TIFF header in EXIF data\n");
  1371. return ret;
  1372. }
  1373. bytestream2_seek(&gbytes, ifd_offset, SEEK_SET);
  1374. // read 0th IFD and store the metadata
  1375. // (return values > 0 indicate the presence of subimage metadata)
  1376. ret = ff_exif_decode_ifd(s->avctx, &gbytes, le, 0, &s->exif_metadata);
  1377. if (ret < 0) {
  1378. av_log(s->avctx, AV_LOG_ERROR, "mjpeg: error decoding EXIF data\n");
  1379. return ret;
  1380. }
  1381. bytes_read = bytestream2_tell(&gbytes);
  1382. skip_bits(&s->gb, bytes_read << 3);
  1383. len -= bytes_read;
  1384. goto out;
  1385. }
  1386. /* Apple MJPEG-A */
  1387. if ((s->start_code == APP1) && (len > (0x28 - 8))) {
  1388. id = get_bits_long(&s->gb, 32);
  1389. len -= 4;
  1390. /* Apple MJPEG-A */
  1391. if (id == AV_RB32("mjpg")) {
  1392. #if 0
  1393. skip_bits(&s->gb, 32); /* field size */
  1394. skip_bits(&s->gb, 32); /* pad field size */
  1395. skip_bits(&s->gb, 32); /* next off */
  1396. skip_bits(&s->gb, 32); /* quant off */
  1397. skip_bits(&s->gb, 32); /* huff off */
  1398. skip_bits(&s->gb, 32); /* image off */
  1399. skip_bits(&s->gb, 32); /* scan off */
  1400. skip_bits(&s->gb, 32); /* data off */
  1401. #endif
  1402. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  1403. av_log(s->avctx, AV_LOG_INFO, "mjpeg: Apple MJPEG-A header found\n");
  1404. }
  1405. }
  1406. out:
  1407. /* slow but needed for extreme adobe jpegs */
  1408. if (len < 0)
  1409. av_log(s->avctx, AV_LOG_ERROR,
  1410. "mjpeg: error, decode_app parser read over the end\n");
  1411. while (--len > 0)
  1412. skip_bits(&s->gb, 8);
  1413. return 0;
  1414. }
  1415. static int mjpeg_decode_com(MJpegDecodeContext *s)
  1416. {
  1417. int len = get_bits(&s->gb, 16);
  1418. if (len >= 2 && 8 * len - 16 <= get_bits_left(&s->gb)) {
  1419. char *cbuf = av_malloc(len - 1);
  1420. if (cbuf) {
  1421. int i;
  1422. for (i = 0; i < len - 2; i++)
  1423. cbuf[i] = get_bits(&s->gb, 8);
  1424. if (i > 0 && cbuf[i - 1] == '\n')
  1425. cbuf[i - 1] = 0;
  1426. else
  1427. cbuf[i] = 0;
  1428. if (s->avctx->debug & FF_DEBUG_PICT_INFO)
  1429. av_log(s->avctx, AV_LOG_INFO, "comment: '%s'\n", cbuf);
  1430. /* buggy avid, it puts EOI only at every 10th frame */
  1431. if (!strncmp(cbuf, "AVID", 4)) {
  1432. s->buggy_avid = 1;
  1433. if (len > 14 && cbuf[12] == 1) /* 1 - NTSC, 2 - PAL */
  1434. s->interlace_polarity = 1;
  1435. } else if (!strcmp(cbuf, "CS=ITU601"))
  1436. s->cs_itu601 = 1;
  1437. else if ((!strncmp(cbuf, "Intel(R) JPEG Library, version 1", 32)) ||
  1438. (!strncmp(cbuf, "Metasoft MJPEG Codec", 20)))
  1439. s->flipped = 1;
  1440. av_free(cbuf);
  1441. }
  1442. }
  1443. return 0;
  1444. }
  1445. /* return the 8 bit start code value and update the search
  1446. state. Return -1 if no start code found */
  1447. static int find_marker(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
  1448. {
  1449. const uint8_t *buf_ptr;
  1450. unsigned int v, v2;
  1451. int val;
  1452. int skipped = 0;
  1453. buf_ptr = *pbuf_ptr;
  1454. while (buf_end - buf_ptr > 1) {
  1455. v = *buf_ptr++;
  1456. v2 = *buf_ptr;
  1457. if ((v == 0xff) && (v2 >= 0xc0) && (v2 <= 0xfe) && buf_ptr < buf_end) {
  1458. val = *buf_ptr++;
  1459. goto found;
  1460. }
  1461. skipped++;
  1462. }
  1463. buf_ptr = buf_end;
  1464. val = -1;
  1465. found:
  1466. av_dlog(NULL, "find_marker skipped %d bytes\n", skipped);
  1467. *pbuf_ptr = buf_ptr;
  1468. return val;
  1469. }
  1470. int ff_mjpeg_find_marker(MJpegDecodeContext *s,
  1471. const uint8_t **buf_ptr, const uint8_t *buf_end,
  1472. const uint8_t **unescaped_buf_ptr,
  1473. int *unescaped_buf_size)
  1474. {
  1475. int start_code;
  1476. start_code = find_marker(buf_ptr, buf_end);
  1477. av_fast_padded_malloc(&s->buffer, &s->buffer_size, buf_end - *buf_ptr);
  1478. if (!s->buffer)
  1479. return AVERROR(ENOMEM);
  1480. /* unescape buffer of SOS, use special treatment for JPEG-LS */
  1481. if (start_code == SOS && !s->ls) {
  1482. const uint8_t *src = *buf_ptr;
  1483. uint8_t *dst = s->buffer;
  1484. while (src < buf_end) {
  1485. uint8_t x = *(src++);
  1486. *(dst++) = x;
  1487. if (s->avctx->codec_id != AV_CODEC_ID_THP) {
  1488. if (x == 0xff) {
  1489. while (src < buf_end && x == 0xff)
  1490. x = *(src++);
  1491. if (x >= 0xd0 && x <= 0xd7)
  1492. *(dst++) = x;
  1493. else if (x)
  1494. break;
  1495. }
  1496. }
  1497. }
  1498. *unescaped_buf_ptr = s->buffer;
  1499. *unescaped_buf_size = dst - s->buffer;
  1500. memset(s->buffer + *unescaped_buf_size, 0,
  1501. FF_INPUT_BUFFER_PADDING_SIZE);
  1502. av_log(s->avctx, AV_LOG_DEBUG, "escaping removed %td bytes\n",
  1503. (buf_end - *buf_ptr) - (dst - s->buffer));
  1504. } else if (start_code == SOS && s->ls) {
  1505. const uint8_t *src = *buf_ptr;
  1506. uint8_t *dst = s->buffer;
  1507. int bit_count = 0;
  1508. int t = 0, b = 0;
  1509. PutBitContext pb;
  1510. s->cur_scan++;
  1511. /* find marker */
  1512. while (src + t < buf_end) {
  1513. uint8_t x = src[t++];
  1514. if (x == 0xff) {
  1515. while ((src + t < buf_end) && x == 0xff)
  1516. x = src[t++];
  1517. if (x & 0x80) {
  1518. t -= FFMIN(2, t);
  1519. break;
  1520. }
  1521. }
  1522. }
  1523. bit_count = t * 8;
  1524. init_put_bits(&pb, dst, t);
  1525. /* unescape bitstream */
  1526. while (b < t) {
  1527. uint8_t x = src[b++];
  1528. put_bits(&pb, 8, x);
  1529. if (x == 0xFF) {
  1530. x = src[b++];
  1531. put_bits(&pb, 7, x);
  1532. bit_count--;
  1533. }
  1534. }
  1535. flush_put_bits(&pb);
  1536. *unescaped_buf_ptr = dst;
  1537. *unescaped_buf_size = (bit_count + 7) >> 3;
  1538. memset(s->buffer + *unescaped_buf_size, 0,
  1539. FF_INPUT_BUFFER_PADDING_SIZE);
  1540. } else {
  1541. *unescaped_buf_ptr = *buf_ptr;
  1542. *unescaped_buf_size = buf_end - *buf_ptr;
  1543. }
  1544. return start_code;
  1545. }
  1546. int ff_mjpeg_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  1547. AVPacket *avpkt)
  1548. {
  1549. const uint8_t *buf = avpkt->data;
  1550. int buf_size = avpkt->size;
  1551. MJpegDecodeContext *s = avctx->priv_data;
  1552. const uint8_t *buf_end, *buf_ptr;
  1553. const uint8_t *unescaped_buf_ptr;
  1554. int hshift, vshift;
  1555. int unescaped_buf_size;
  1556. int start_code;
  1557. int i, index;
  1558. int ret = 0;
  1559. av_dict_free(&s->exif_metadata);
  1560. buf_ptr = buf;
  1561. buf_end = buf + buf_size;
  1562. while (buf_ptr < buf_end) {
  1563. /* find start next marker */
  1564. start_code = ff_mjpeg_find_marker(s, &buf_ptr, buf_end,
  1565. &unescaped_buf_ptr,
  1566. &unescaped_buf_size);
  1567. /* EOF */
  1568. if (start_code < 0) {
  1569. goto the_end;
  1570. } else if (unescaped_buf_size > INT_MAX / 8) {
  1571. av_log(avctx, AV_LOG_ERROR,
  1572. "MJPEG packet 0x%x too big (%d/%d), corrupt data?\n",
  1573. start_code, unescaped_buf_size, buf_size);
  1574. return AVERROR_INVALIDDATA;
  1575. }
  1576. av_log(avctx, AV_LOG_DEBUG, "marker=%x avail_size_in_buf=%td\n",
  1577. start_code, buf_end - buf_ptr);
  1578. ret = init_get_bits8(&s->gb, unescaped_buf_ptr, unescaped_buf_size);
  1579. if (ret < 0) {
  1580. av_log(avctx, AV_LOG_ERROR, "invalid buffer\n");
  1581. goto fail;
  1582. }
  1583. s->start_code = start_code;
  1584. if (s->avctx->debug & FF_DEBUG_STARTCODE)
  1585. av_log(avctx, AV_LOG_DEBUG, "startcode: %X\n", start_code);
  1586. /* process markers */
  1587. if (start_code >= 0xd0 && start_code <= 0xd7)
  1588. av_log(avctx, AV_LOG_DEBUG,
  1589. "restart marker: %d\n", start_code & 0x0f);
  1590. /* APP fields */
  1591. else if (start_code >= APP0 && start_code <= APP15)
  1592. mjpeg_decode_app(s);
  1593. /* Comment */
  1594. else if (start_code == COM)
  1595. mjpeg_decode_com(s);
  1596. ret = -1;
  1597. if (!CONFIG_JPEGLS_DECODER &&
  1598. (start_code == SOF48 || start_code == LSE)) {
  1599. av_log(avctx, AV_LOG_ERROR, "JPEG-LS support not enabled.\n");
  1600. return AVERROR(ENOSYS);
  1601. }
  1602. switch (start_code) {
  1603. case SOI:
  1604. s->restart_interval = 0;
  1605. s->restart_count = 0;
  1606. /* nothing to do on SOI */
  1607. break;
  1608. case DQT:
  1609. ff_mjpeg_decode_dqt(s);
  1610. break;
  1611. case DHT:
  1612. if ((ret = ff_mjpeg_decode_dht(s)) < 0) {
  1613. av_log(avctx, AV_LOG_ERROR, "huffman table decode error\n");
  1614. goto fail;
  1615. }
  1616. break;
  1617. case SOF0:
  1618. case SOF1:
  1619. s->lossless = 0;
  1620. s->ls = 0;
  1621. s->progressive = 0;
  1622. if ((ret = ff_mjpeg_decode_sof(s)) < 0)
  1623. goto fail;
  1624. break;
  1625. case SOF2:
  1626. s->lossless = 0;
  1627. s->ls = 0;
  1628. s->progressive = 1;
  1629. if ((ret = ff_mjpeg_decode_sof(s)) < 0)
  1630. goto fail;
  1631. break;
  1632. case SOF3:
  1633. s->lossless = 1;
  1634. s->ls = 0;
  1635. s->progressive = 0;
  1636. if ((ret = ff_mjpeg_decode_sof(s)) < 0)
  1637. goto fail;
  1638. break;
  1639. case SOF48:
  1640. s->lossless = 1;
  1641. s->ls = 1;
  1642. s->progressive = 0;
  1643. if ((ret = ff_mjpeg_decode_sof(s)) < 0)
  1644. goto fail;
  1645. break;
  1646. case LSE:
  1647. if (!CONFIG_JPEGLS_DECODER ||
  1648. (ret = ff_jpegls_decode_lse(s)) < 0)
  1649. goto fail;
  1650. break;
  1651. case EOI:
  1652. eoi_parser:
  1653. s->cur_scan = 0;
  1654. if (!s->got_picture) {
  1655. av_log(avctx, AV_LOG_WARNING,
  1656. "Found EOI before any SOF, ignoring\n");
  1657. break;
  1658. }
  1659. if (s->interlaced) {
  1660. s->bottom_field ^= 1;
  1661. /* if not bottom field, do not output image yet */
  1662. if (s->bottom_field == !s->interlace_polarity)
  1663. break;
  1664. }
  1665. if ((ret = av_frame_ref(data, s->picture_ptr)) < 0)
  1666. return ret;
  1667. *got_frame = 1;
  1668. s->got_picture = 0;
  1669. if (!s->lossless) {
  1670. int qp = FFMAX3(s->qscale[0],
  1671. s->qscale[1],
  1672. s->qscale[2]);
  1673. int qpw = (s->width + 15) / 16;
  1674. AVBufferRef *qp_table_buf = av_buffer_alloc(qpw);
  1675. if (qp_table_buf) {
  1676. memset(qp_table_buf->data, qp, qpw);
  1677. av_frame_set_qp_table(data, qp_table_buf, 0, FF_QSCALE_TYPE_MPEG1);
  1678. }
  1679. if(avctx->debug & FF_DEBUG_QP)
  1680. av_log(avctx, AV_LOG_DEBUG, "QP: %d\n", qp);
  1681. }
  1682. goto the_end;
  1683. case SOS:
  1684. if ((ret = ff_mjpeg_decode_sos(s, NULL, NULL)) < 0 &&
  1685. (avctx->err_recognition & AV_EF_EXPLODE))
  1686. goto fail;
  1687. break;
  1688. case DRI:
  1689. mjpeg_decode_dri(s);
  1690. break;
  1691. case SOF5:
  1692. case SOF6:
  1693. case SOF7:
  1694. case SOF9:
  1695. case SOF10:
  1696. case SOF11:
  1697. case SOF13:
  1698. case SOF14:
  1699. case SOF15:
  1700. case JPG:
  1701. av_log(avctx, AV_LOG_ERROR,
  1702. "mjpeg: unsupported coding type (%x)\n", start_code);
  1703. break;
  1704. }
  1705. /* eof process start code */
  1706. buf_ptr += (get_bits_count(&s->gb) + 7) / 8;
  1707. av_log(avctx, AV_LOG_DEBUG,
  1708. "marker parser used %d bytes (%d bits)\n",
  1709. (get_bits_count(&s->gb) + 7) / 8, get_bits_count(&s->gb));
  1710. }
  1711. if (s->got_picture) {
  1712. av_log(avctx, AV_LOG_WARNING, "EOI missing, emulating\n");
  1713. goto eoi_parser;
  1714. }
  1715. av_log(avctx, AV_LOG_FATAL, "No JPEG data found in image\n");
  1716. return AVERROR_INVALIDDATA;
  1717. fail:
  1718. s->got_picture = 0;
  1719. return ret;
  1720. the_end:
  1721. if (s->upscale_h) {
  1722. uint8_t *line = s->picture_ptr->data[s->upscale_h];
  1723. av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
  1724. avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
  1725. avctx->pix_fmt == AV_PIX_FMT_YUVJ440P ||
  1726. avctx->pix_fmt == AV_PIX_FMT_YUV440P);
  1727. for (i = 0; i < s->chroma_height; i++) {
  1728. for (index = s->width - 1; index; index--)
  1729. line[index] = (line[index / 2] + line[(index + 1) / 2]) >> 1;
  1730. line += s->linesize[s->upscale_h];
  1731. }
  1732. }
  1733. if (s->upscale_v) {
  1734. uint8_t *dst = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[(s->height - 1) * s->linesize[s->upscale_v]];
  1735. int w;
  1736. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
  1737. w = s->width >> hshift;
  1738. av_assert0(avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
  1739. avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
  1740. avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
  1741. avctx->pix_fmt == AV_PIX_FMT_YUV422P);
  1742. for (i = s->height - 1; i; i--) {
  1743. uint8_t *src1 = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[i / 2 * s->linesize[s->upscale_v]];
  1744. uint8_t *src2 = &((uint8_t *)s->picture_ptr->data[s->upscale_v])[(i + 1) / 2 * s->linesize[s->upscale_v]];
  1745. if (src1 == src2) {
  1746. memcpy(dst, src1, w);
  1747. } else {
  1748. for (index = 0; index < w; index++)
  1749. dst[index] = (src1[index] + src2[index]) >> 1;
  1750. }
  1751. dst -= s->linesize[s->upscale_v];
  1752. }
  1753. }
  1754. if (s->flipped && (s->avctx->flags & CODEC_FLAG_EMU_EDGE)) {
  1755. int j;
  1756. avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &hshift, &vshift);
  1757. for (index=0; index<4; index++) {
  1758. uint8_t *dst = s->picture_ptr->data[index];
  1759. int w = s->width;
  1760. int h = s->height;
  1761. if(index && index<3){
  1762. w = FF_CEIL_RSHIFT(w, hshift);
  1763. h = FF_CEIL_RSHIFT(h, vshift);
  1764. }
  1765. if(dst){
  1766. uint8_t *dst2 = dst + s->linesize[index]*(h-1);
  1767. for (i=0; i<h/2; i++) {
  1768. for (j=0; j<w; j++)
  1769. FFSWAP(int, dst[j], dst2[j]);
  1770. dst += s->linesize[index];
  1771. dst2 -= s->linesize[index];
  1772. }
  1773. }
  1774. }
  1775. }
  1776. av_dict_copy(avpriv_frame_get_metadatap(data), s->exif_metadata, 0);
  1777. av_dict_free(&s->exif_metadata);
  1778. av_log(avctx, AV_LOG_DEBUG, "decode frame unused %td bytes\n",
  1779. buf_end - buf_ptr);
  1780. // return buf_end - buf_ptr;
  1781. return buf_ptr - buf;
  1782. }
  1783. av_cold int ff_mjpeg_decode_end(AVCodecContext *avctx)
  1784. {
  1785. MJpegDecodeContext *s = avctx->priv_data;
  1786. int i, j;
  1787. if (s->interlaced && s->bottom_field == !s->interlace_polarity && s->got_picture && !avctx->frame_number) {
  1788. av_log(avctx, AV_LOG_INFO, "Single field\n");
  1789. }
  1790. if (s->picture_ptr)
  1791. av_frame_unref(s->picture_ptr);
  1792. av_free(s->buffer);
  1793. av_freep(&s->ljpeg_buffer);
  1794. s->ljpeg_buffer_size = 0;
  1795. for (i = 0; i < 3; i++) {
  1796. for (j = 0; j < 4; j++)
  1797. ff_free_vlc(&s->vlcs[i][j]);
  1798. }
  1799. for (i = 0; i < MAX_COMPONENTS; i++) {
  1800. av_freep(&s->blocks[i]);
  1801. av_freep(&s->last_nnz[i]);
  1802. }
  1803. av_dict_free(&s->exif_metadata);
  1804. return 0;
  1805. }
  1806. static void decode_flush(AVCodecContext *avctx)
  1807. {
  1808. MJpegDecodeContext *s = avctx->priv_data;
  1809. s->got_picture = 0;
  1810. }
  1811. #if CONFIG_MJPEG_DECODER
  1812. #define OFFSET(x) offsetof(MJpegDecodeContext, x)
  1813. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  1814. static const AVOption options[] = {
  1815. { "extern_huff", "Use external huffman table.",
  1816. OFFSET(extern_huff), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VD },
  1817. { NULL },
  1818. };
  1819. static const AVClass mjpegdec_class = {
  1820. .class_name = "MJPEG decoder",
  1821. .item_name = av_default_item_name,
  1822. .option = options,
  1823. .version = LIBAVUTIL_VERSION_INT,
  1824. };
  1825. AVCodec ff_mjpeg_decoder = {
  1826. .name = "mjpeg",
  1827. .type = AVMEDIA_TYPE_VIDEO,
  1828. .id = AV_CODEC_ID_MJPEG,
  1829. .priv_data_size = sizeof(MJpegDecodeContext),
  1830. .init = ff_mjpeg_decode_init,
  1831. .close = ff_mjpeg_decode_end,
  1832. .decode = ff_mjpeg_decode_frame,
  1833. .flush = decode_flush,
  1834. .capabilities = CODEC_CAP_DR1,
  1835. .max_lowres = 3,
  1836. .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
  1837. .priv_class = &mjpegdec_class,
  1838. };
  1839. #endif
  1840. #if CONFIG_THP_DECODER
  1841. AVCodec ff_thp_decoder = {
  1842. .name = "thp",
  1843. .type = AVMEDIA_TYPE_VIDEO,
  1844. .id = AV_CODEC_ID_THP,
  1845. .priv_data_size = sizeof(MJpegDecodeContext),
  1846. .init = ff_mjpeg_decode_init,
  1847. .close = ff_mjpeg_decode_end,
  1848. .decode = ff_mjpeg_decode_frame,
  1849. .flush = decode_flush,
  1850. .capabilities = CODEC_CAP_DR1,
  1851. .max_lowres = 3,
  1852. .long_name = NULL_IF_CONFIG_SMALL("Nintendo Gamecube THP video"),
  1853. };
  1854. #endif