You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

423 lines
14KB

  1. /*
  2. * JPEG 2000 decoding support via OpenJPEG
  3. * Copyright (c) 2009 Jaikrishnan Menon <realityman@gmx.net>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * JPEG 2000 decoder using libopenjpeg
  24. */
  25. #define OPJ_STATIC
  26. #include <openjpeg.h>
  27. #include "libavutil/intreadwrite.h"
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/pixfmt.h"
  30. #include "libavutil/opt.h"
  31. #include "avcodec.h"
  32. #include "thread.h"
  33. #define JP2_SIG_TYPE 0x6A502020
  34. #define JP2_SIG_VALUE 0x0D0A870A
  35. // pix_fmts with lower bpp have to be listed before
  36. // similar pix_fmts with higher bpp.
  37. #define RGB_PIXEL_FORMATS PIX_FMT_RGB24,PIX_FMT_RGBA,PIX_FMT_RGB48,PIX_FMT_RGBA64
  38. #define GRAY_PIXEL_FORMATS PIX_FMT_GRAY8,PIX_FMT_GRAY8A,PIX_FMT_GRAY16
  39. #define YUV_PIXEL_FORMATS PIX_FMT_YUV410P,PIX_FMT_YUV411P,PIX_FMT_YUVA420P, \
  40. PIX_FMT_YUV420P,PIX_FMT_YUV422P,PIX_FMT_YUVA422P, \
  41. PIX_FMT_YUV440P,PIX_FMT_YUV444P,PIX_FMT_YUVA444P, \
  42. PIX_FMT_YUV420P9,PIX_FMT_YUV422P9,PIX_FMT_YUV444P9, \
  43. PIX_FMT_YUV420P10,PIX_FMT_YUV422P10,PIX_FMT_YUV444P10, \
  44. PIX_FMT_YUV420P12,PIX_FMT_YUV422P12,PIX_FMT_YUV444P12, \
  45. PIX_FMT_YUV420P14,PIX_FMT_YUV422P14,PIX_FMT_YUV444P14, \
  46. PIX_FMT_YUV420P16,PIX_FMT_YUV422P16,PIX_FMT_YUV444P16
  47. static const enum PixelFormat libopenjpeg_rgb_pix_fmts[] = {RGB_PIXEL_FORMATS};
  48. static const enum PixelFormat libopenjpeg_gray_pix_fmts[] = {GRAY_PIXEL_FORMATS};
  49. static const enum PixelFormat libopenjpeg_yuv_pix_fmts[] = {YUV_PIXEL_FORMATS};
  50. static const enum PixelFormat libopenjpeg_all_pix_fmts[] = {RGB_PIXEL_FORMATS,GRAY_PIXEL_FORMATS,YUV_PIXEL_FORMATS};
  51. typedef struct {
  52. AVClass *class;
  53. opj_dparameters_t dec_params;
  54. AVFrame image;
  55. int lowqual;
  56. } LibOpenJPEGContext;
  57. static inline int libopenjpeg_matches_pix_fmt(const opj_image_t *image, enum PixelFormat pix_fmt)
  58. {
  59. AVPixFmtDescriptor descriptor = av_pix_fmt_descriptors[pix_fmt];
  60. int match = 1;
  61. if (descriptor.nb_components != image->numcomps) {
  62. return 0;
  63. }
  64. switch (descriptor.nb_components) {
  65. case 4: match = match && descriptor.comp[3].depth_minus1 + 1 >= image->comps[3].prec &&
  66. 1 == image->comps[3].dx &&
  67. 1 == image->comps[3].dy;
  68. case 3: match = match && descriptor.comp[2].depth_minus1 + 1 >= image->comps[2].prec &&
  69. 1 << descriptor.log2_chroma_w == image->comps[2].dx &&
  70. 1 << descriptor.log2_chroma_h == image->comps[2].dy;
  71. case 2: match = match && descriptor.comp[1].depth_minus1 + 1 >= image->comps[1].prec &&
  72. 1 << descriptor.log2_chroma_w == image->comps[1].dx &&
  73. 1 << descriptor.log2_chroma_h == image->comps[1].dy;
  74. case 1: match = match && descriptor.comp[0].depth_minus1 + 1 >= image->comps[0].prec &&
  75. 1 == image->comps[0].dx &&
  76. 1 == image->comps[0].dy;
  77. default:
  78. break;
  79. }
  80. return match;
  81. }
  82. static inline enum PixelFormat libopenjpeg_guess_pix_fmt(const opj_image_t *image) {
  83. int index;
  84. const enum PixelFormat *possible_fmts = NULL;
  85. int possible_fmts_nb = 0;
  86. switch (image->color_space) {
  87. case CLRSPC_SRGB:
  88. possible_fmts = libopenjpeg_rgb_pix_fmts;
  89. possible_fmts_nb = FF_ARRAY_ELEMS(libopenjpeg_rgb_pix_fmts);
  90. break;
  91. case CLRSPC_GRAY:
  92. possible_fmts = libopenjpeg_gray_pix_fmts;
  93. possible_fmts_nb = FF_ARRAY_ELEMS(libopenjpeg_gray_pix_fmts);
  94. break;
  95. case CLRSPC_SYCC:
  96. possible_fmts = libopenjpeg_yuv_pix_fmts;
  97. possible_fmts_nb = FF_ARRAY_ELEMS(libopenjpeg_yuv_pix_fmts);
  98. break;
  99. default:
  100. possible_fmts = libopenjpeg_all_pix_fmts;
  101. possible_fmts_nb = FF_ARRAY_ELEMS(libopenjpeg_all_pix_fmts);
  102. break;
  103. }
  104. for (index = 0; index < possible_fmts_nb; ++index) {
  105. if (libopenjpeg_matches_pix_fmt(image, possible_fmts[index])) {
  106. return possible_fmts[index];
  107. }
  108. }
  109. return PIX_FMT_NONE;
  110. }
  111. static inline int libopenjpeg_ispacked(enum PixelFormat pix_fmt) {
  112. int i, component_plane;
  113. if (pix_fmt == PIX_FMT_GRAY16)
  114. return 0;
  115. component_plane = av_pix_fmt_descriptors[pix_fmt].comp[0].plane;
  116. for (i = 1; i < av_pix_fmt_descriptors[pix_fmt].nb_components; i++) {
  117. if (component_plane != av_pix_fmt_descriptors[pix_fmt].comp[i].plane)
  118. return 0;
  119. }
  120. return 1;
  121. }
  122. static inline void libopenjpeg_copy_to_packed8(AVFrame *picture, opj_image_t *image) {
  123. uint8_t *img_ptr;
  124. int index, x, y, c;
  125. for (y = 0; y < picture->height; y++) {
  126. index = y*picture->width;
  127. img_ptr = picture->data[0] + y*picture->linesize[0];
  128. for (x = 0; x < picture->width; x++, index++) {
  129. for (c = 0; c < image->numcomps; c++) {
  130. *img_ptr++ = image->comps[c].data[index];
  131. }
  132. }
  133. }
  134. }
  135. static inline void libopenjpeg_copy_to_packed16(AVFrame *picture, opj_image_t *image) {
  136. uint16_t *img_ptr;
  137. int index, x, y, c;
  138. int adjust[4];
  139. for (x = 0; x < image->numcomps; x++)
  140. adjust[x] = FFMAX(FFMIN(16 - image->comps[x].prec, 8), 0);
  141. for (y = 0; y < picture->height; y++) {
  142. index = y*picture->width;
  143. img_ptr = (uint16_t*) (picture->data[0] + y*picture->linesize[0]);
  144. for (x = 0; x < picture->width; x++, index++) {
  145. for (c = 0; c < image->numcomps; c++) {
  146. *img_ptr++ = image->comps[c].data[index] << adjust[c];
  147. }
  148. }
  149. }
  150. }
  151. static inline void libopenjpeg_copyto8(AVFrame *picture, opj_image_t *image) {
  152. int *comp_data;
  153. uint8_t *img_ptr;
  154. int index, x, y;
  155. for (index = 0; index < image->numcomps; index++) {
  156. comp_data = image->comps[index].data;
  157. for (y = 0; y < image->comps[index].h; y++) {
  158. img_ptr = picture->data[index] + y * picture->linesize[index];
  159. for (x = 0; x < image->comps[index].w; x++) {
  160. *img_ptr = (uint8_t) *comp_data;
  161. img_ptr++;
  162. comp_data++;
  163. }
  164. }
  165. }
  166. }
  167. static inline void libopenjpeg_copyto16(AVFrame *picture, opj_image_t *image) {
  168. int *comp_data;
  169. uint16_t *img_ptr;
  170. int index, x, y;
  171. for (index = 0; index < image->numcomps; index++) {
  172. comp_data = image->comps[index].data;
  173. for (y = 0; y < image->comps[index].h; y++) {
  174. img_ptr = (uint16_t*) (picture->data[index] + y * picture->linesize[index]);
  175. for (x = 0; x < image->comps[index].w; x++) {
  176. *img_ptr = *comp_data;
  177. img_ptr++;
  178. comp_data++;
  179. }
  180. }
  181. }
  182. }
  183. static av_cold int libopenjpeg_decode_init(AVCodecContext *avctx)
  184. {
  185. LibOpenJPEGContext *ctx = avctx->priv_data;
  186. opj_set_default_decoder_parameters(&ctx->dec_params);
  187. avcodec_get_frame_defaults(&ctx->image);
  188. avctx->coded_frame = &ctx->image;
  189. return 0;
  190. }
  191. static av_cold int libopenjpeg_decode_init_thread_copy(AVCodecContext *avctx)
  192. {
  193. LibOpenJPEGContext *ctx = avctx->priv_data;
  194. avctx->coded_frame = &ctx->image;
  195. return 0;
  196. }
  197. static int libopenjpeg_decode_frame(AVCodecContext *avctx,
  198. void *data, int *data_size,
  199. AVPacket *avpkt)
  200. {
  201. uint8_t *buf = avpkt->data;
  202. int buf_size = avpkt->size;
  203. LibOpenJPEGContext *ctx = avctx->priv_data;
  204. AVFrame *picture = &ctx->image, *output = data;
  205. opj_dinfo_t *dec;
  206. opj_cio_t *stream;
  207. opj_image_t *image;
  208. int width, height, ret = -1;
  209. int pixel_size = 0;
  210. int ispacked = 0;
  211. int i;
  212. *data_size = 0;
  213. // Check if input is a raw jpeg2k codestream or in jp2 wrapping
  214. if ((AV_RB32(buf) == 12) &&
  215. (AV_RB32(buf + 4) == JP2_SIG_TYPE) &&
  216. (AV_RB32(buf + 8) == JP2_SIG_VALUE)) {
  217. dec = opj_create_decompress(CODEC_JP2);
  218. } else {
  219. /* If the AVPacket contains a jp2c box, then skip to
  220. * the starting byte of the codestream. */
  221. if (AV_RB32(buf + 4) == AV_RB32("jp2c"))
  222. buf += 8;
  223. dec = opj_create_decompress(CODEC_J2K);
  224. }
  225. if (!dec) {
  226. av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n");
  227. return -1;
  228. }
  229. opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL);
  230. ctx->dec_params.cp_limit_decoding = LIMIT_TO_MAIN_HEADER;
  231. ctx->dec_params.cp_layer = ctx->lowqual;
  232. // Tie decoder with decoding parameters
  233. opj_setup_decoder(dec, &ctx->dec_params);
  234. stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
  235. if (!stream) {
  236. av_log(avctx, AV_LOG_ERROR,
  237. "Codestream could not be opened for reading.\n");
  238. opj_destroy_decompress(dec);
  239. return -1;
  240. }
  241. // Decode the header only.
  242. image = opj_decode_with_info(dec, stream, NULL);
  243. opj_cio_close(stream);
  244. if (!image) {
  245. av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
  246. opj_destroy_decompress(dec);
  247. return -1;
  248. }
  249. width = image->x1 - image->x0;
  250. height = image->y1 - image->y0;
  251. if (av_image_check_size(width, height, 0, avctx) < 0) {
  252. av_log(avctx, AV_LOG_ERROR,
  253. "%dx%d dimension invalid.\n", width, height);
  254. goto done;
  255. }
  256. avcodec_set_dimensions(avctx, width, height);
  257. if (avctx->pix_fmt != PIX_FMT_NONE)
  258. if (!libopenjpeg_matches_pix_fmt(image, avctx->pix_fmt))
  259. avctx->pix_fmt = PIX_FMT_NONE;
  260. if (avctx->pix_fmt == PIX_FMT_NONE)
  261. avctx->pix_fmt = libopenjpeg_guess_pix_fmt(image);
  262. if (avctx->pix_fmt == PIX_FMT_NONE) {
  263. av_log(avctx, AV_LOG_ERROR, "Unable to determine pixel format\n");
  264. goto done;
  265. }
  266. for (i = 0; i < image->numcomps; i++)
  267. if (image->comps[i].prec > avctx->bits_per_raw_sample)
  268. avctx->bits_per_raw_sample = image->comps[i].prec;
  269. if (picture->data[0])
  270. ff_thread_release_buffer(avctx, picture);
  271. if (ff_thread_get_buffer(avctx, picture) < 0) {
  272. av_log(avctx, AV_LOG_ERROR, "ff_thread_get_buffer() failed\n");
  273. goto done;
  274. }
  275. ctx->dec_params.cp_limit_decoding = NO_LIMITATION;
  276. ctx->dec_params.cp_reduce = avctx->lowres;
  277. // Tie decoder with decoding parameters.
  278. opj_setup_decoder(dec, &ctx->dec_params);
  279. stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
  280. if (!stream) {
  281. av_log(avctx, AV_LOG_ERROR,
  282. "Codestream could not be opened for reading.\n");
  283. goto done;
  284. }
  285. opj_image_destroy(image);
  286. // Decode the codestream
  287. image = opj_decode_with_info(dec, stream, NULL);
  288. opj_cio_close(stream);
  289. if (!image) {
  290. av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
  291. goto done;
  292. }
  293. pixel_size = av_pix_fmt_descriptors[avctx->pix_fmt].comp[0].step_minus1 + 1;
  294. ispacked = libopenjpeg_ispacked(avctx->pix_fmt);
  295. switch (pixel_size) {
  296. case 1:
  297. if (ispacked) {
  298. libopenjpeg_copy_to_packed8(picture, image);
  299. } else {
  300. libopenjpeg_copyto8(picture, image);
  301. }
  302. break;
  303. case 2:
  304. if (ispacked) {
  305. libopenjpeg_copy_to_packed8(picture, image);
  306. } else {
  307. libopenjpeg_copyto16(picture, image);
  308. }
  309. break;
  310. case 3:
  311. case 4:
  312. if (ispacked) {
  313. libopenjpeg_copy_to_packed8(picture, image);
  314. }
  315. break;
  316. case 6:
  317. case 8:
  318. if (ispacked) {
  319. libopenjpeg_copy_to_packed16(picture, image);
  320. }
  321. break;
  322. default:
  323. av_log(avctx, AV_LOG_ERROR, "unsupported pixel size %d\n", pixel_size);
  324. goto done;
  325. }
  326. *output = ctx->image;
  327. *data_size = sizeof(AVPicture);
  328. ret = buf_size;
  329. done:
  330. opj_image_destroy(image);
  331. opj_destroy_decompress(dec);
  332. return ret;
  333. }
  334. static av_cold int libopenjpeg_decode_close(AVCodecContext *avctx)
  335. {
  336. LibOpenJPEGContext *ctx = avctx->priv_data;
  337. if (ctx->image.data[0])
  338. ff_thread_release_buffer(avctx, &ctx->image);
  339. return 0;
  340. }
  341. #define OFFSET(x) offsetof(LibOpenJPEGContext, x)
  342. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  343. static const AVOption options[] = {
  344. { "lowqual", "Limit the number of layers used for decoding", OFFSET(lowqual), AV_OPT_TYPE_INT, { 0 }, 0, INT_MAX, VD },
  345. { NULL },
  346. };
  347. static const AVClass class = {
  348. .class_name = "libopenjpeg",
  349. .item_name = av_default_item_name,
  350. .option = options,
  351. .version = LIBAVUTIL_VERSION_INT,
  352. };
  353. AVCodec ff_libopenjpeg_decoder = {
  354. .name = "libopenjpeg",
  355. .type = AVMEDIA_TYPE_VIDEO,
  356. .id = CODEC_ID_JPEG2000,
  357. .priv_data_size = sizeof(LibOpenJPEGContext),
  358. .init = libopenjpeg_decode_init,
  359. .close = libopenjpeg_decode_close,
  360. .decode = libopenjpeg_decode_frame,
  361. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  362. .max_lowres = 5,
  363. .long_name = NULL_IF_CONFIG_SMALL("OpenJPEG JPEG 2000"),
  364. .priv_class = &class,
  365. .init_thread_copy = ONLY_IF_THREADS_ENABLED(libopenjpeg_decode_init_thread_copy),
  366. };