You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

397 lines
13KB

  1. /*
  2. * JPEG 2000 decoding support via OpenJPEG
  3. * Copyright (c) 2009 Jaikrishnan Menon <realityman@gmx.net>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * JPEG 2000 decoder using libopenjpeg
  24. */
  25. #include "libavutil/imgutils.h"
  26. #include "libavutil/pixfmt.h"
  27. #include "avcodec.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "thread.h"
  30. #define OPJ_STATIC
  31. #include <openjpeg.h>
  32. #define JP2_SIG_TYPE 0x6A502020
  33. #define JP2_SIG_VALUE 0x0D0A870A
  34. // pix_fmts with lower bpp have to be listed before
  35. // similar pix_fmts with higher bpp.
  36. #define RGB_PIXEL_FORMATS PIX_FMT_RGB24,PIX_FMT_RGBA,PIX_FMT_RGB48,PIX_FMT_RGBA64
  37. #define GRAY_PIXEL_FORMATS PIX_FMT_GRAY8,PIX_FMT_GRAY8A,PIX_FMT_GRAY16
  38. #define YUV_PIXEL_FORMATS PIX_FMT_YUV410P,PIX_FMT_YUV411P,PIX_FMT_YUVA420P, \
  39. PIX_FMT_YUV420P,PIX_FMT_YUV422P,PIX_FMT_YUVA422P, \
  40. PIX_FMT_YUV440P,PIX_FMT_YUV444P,PIX_FMT_YUVA444P, \
  41. PIX_FMT_YUV420P9,PIX_FMT_YUV422P9,PIX_FMT_YUV444P9, \
  42. PIX_FMT_YUV420P10,PIX_FMT_YUV422P10,PIX_FMT_YUV444P10, \
  43. PIX_FMT_YUV420P12,PIX_FMT_YUV422P12,PIX_FMT_YUV444P12, \
  44. PIX_FMT_YUV420P14,PIX_FMT_YUV422P14,PIX_FMT_YUV444P14, \
  45. PIX_FMT_YUV420P16,PIX_FMT_YUV422P16,PIX_FMT_YUV444P16
  46. static const enum PixelFormat libopenjpeg_rgb_pix_fmts[] = {RGB_PIXEL_FORMATS};
  47. static const enum PixelFormat libopenjpeg_gray_pix_fmts[] = {GRAY_PIXEL_FORMATS};
  48. static const enum PixelFormat libopenjpeg_yuv_pix_fmts[] = {YUV_PIXEL_FORMATS};
  49. static const enum PixelFormat libopenjpeg_all_pix_fmts[] = {RGB_PIXEL_FORMATS,GRAY_PIXEL_FORMATS,YUV_PIXEL_FORMATS};
  50. typedef struct {
  51. opj_dparameters_t dec_params;
  52. AVFrame image;
  53. } LibOpenJPEGContext;
  54. static inline int libopenjpeg_matches_pix_fmt(const opj_image_t *image, enum PixelFormat pix_fmt){
  55. AVPixFmtDescriptor descriptor = av_pix_fmt_descriptors[pix_fmt];
  56. int match = 1;
  57. if (descriptor.nb_components != image->numcomps) {
  58. return 0;
  59. }
  60. switch (descriptor.nb_components) {
  61. case 4: match = match && descriptor.comp[3].depth_minus1 + 1 >= image->comps[3].prec &&
  62. 1 == image->comps[3].dx &&
  63. 1 == image->comps[3].dy;
  64. case 3: match = match && descriptor.comp[2].depth_minus1 + 1 >= image->comps[2].prec &&
  65. 1 << descriptor.log2_chroma_w == image->comps[2].dx &&
  66. 1 << descriptor.log2_chroma_h == image->comps[2].dy;
  67. case 2: match = match && descriptor.comp[1].depth_minus1 + 1 >= image->comps[1].prec &&
  68. 1 << descriptor.log2_chroma_w == image->comps[1].dx &&
  69. 1 << descriptor.log2_chroma_h == image->comps[1].dy;
  70. case 1: match = match && descriptor.comp[0].depth_minus1 + 1 >= image->comps[0].prec &&
  71. 1 == image->comps[0].dx &&
  72. 1 == image->comps[0].dy;
  73. default:
  74. break;
  75. }
  76. return match;
  77. }
  78. static inline enum PixelFormat libopenjpeg_guess_pix_fmt(const opj_image_t *image) {
  79. int index;
  80. const enum PixelFormat *possible_fmts = NULL;
  81. int possible_fmts_nb = 0;
  82. switch (image->color_space) {
  83. case CLRSPC_SRGB:
  84. possible_fmts = libopenjpeg_rgb_pix_fmts;
  85. possible_fmts_nb = FF_ARRAY_ELEMS(libopenjpeg_rgb_pix_fmts);
  86. break;
  87. case CLRSPC_GRAY:
  88. possible_fmts = libopenjpeg_gray_pix_fmts;
  89. possible_fmts_nb = FF_ARRAY_ELEMS(libopenjpeg_gray_pix_fmts);
  90. break;
  91. case CLRSPC_SYCC:
  92. possible_fmts = libopenjpeg_yuv_pix_fmts;
  93. possible_fmts_nb = FF_ARRAY_ELEMS(libopenjpeg_yuv_pix_fmts);
  94. break;
  95. default:
  96. possible_fmts = libopenjpeg_all_pix_fmts;
  97. possible_fmts_nb = FF_ARRAY_ELEMS(libopenjpeg_all_pix_fmts);
  98. break;
  99. }
  100. for (index = 0; index < possible_fmts_nb; ++index) {
  101. if (libopenjpeg_matches_pix_fmt(image, possible_fmts[index])) {
  102. return possible_fmts[index];
  103. }
  104. }
  105. return PIX_FMT_NONE;
  106. }
  107. static inline int libopenjpeg_ispacked(enum PixelFormat pix_fmt) {
  108. int i, component_plane;
  109. if (pix_fmt == PIX_FMT_GRAY16)
  110. return 0;
  111. component_plane = av_pix_fmt_descriptors[pix_fmt].comp[0].plane;
  112. for(i = 1; i < av_pix_fmt_descriptors[pix_fmt].nb_components; i++) {
  113. if (component_plane != av_pix_fmt_descriptors[pix_fmt].comp[i].plane)
  114. return 0;
  115. }
  116. return 1;
  117. }
  118. static inline void libopenjpeg_copy_to_packed8(AVFrame *picture, opj_image_t *image) {
  119. uint8_t *img_ptr;
  120. int index, x, y, c;
  121. for(y = 0; y < picture->height; y++) {
  122. index = y*picture->width;
  123. img_ptr = picture->data[0] + y*picture->linesize[0];
  124. for(x = 0; x < picture->width; x++, index++) {
  125. for(c = 0; c < image->numcomps; c++) {
  126. *img_ptr++ = image->comps[c].data[index];
  127. }
  128. }
  129. }
  130. }
  131. static inline void libopenjpeg_copy_to_packed16(AVFrame *picture, opj_image_t *image) {
  132. uint16_t *img_ptr;
  133. int index, x, y, c;
  134. int adjust[4];
  135. for (x = 0; x < image->numcomps; x++) {
  136. adjust[x] = FFMAX(FFMIN(16 - image->comps[x].prec, 8), 0);
  137. }
  138. for (y = 0; y < picture->height; y++) {
  139. index = y*picture->width;
  140. img_ptr = (uint16_t*) (picture->data[0] + y*picture->linesize[0]);
  141. for (x = 0; x < picture->width; x++, index++) {
  142. for (c = 0; c < image->numcomps; c++) {
  143. *img_ptr++ = image->comps[c].data[index] << adjust[c];
  144. }
  145. }
  146. }
  147. }
  148. static inline void libopenjpeg_copyto8(AVFrame *picture, opj_image_t *image) {
  149. int *comp_data;
  150. uint8_t *img_ptr;
  151. int index, x, y;
  152. for(index = 0; index < image->numcomps; index++) {
  153. comp_data = image->comps[index].data;
  154. for(y = 0; y < image->comps[index].h; y++) {
  155. img_ptr = picture->data[index] + y * picture->linesize[index];
  156. for(x = 0; x < image->comps[index].w; x++) {
  157. *img_ptr = (uint8_t) *comp_data;
  158. img_ptr++;
  159. comp_data++;
  160. }
  161. }
  162. }
  163. }
  164. static inline void libopenjpeg_copyto16(AVFrame *picture, opj_image_t *image) {
  165. int *comp_data;
  166. uint16_t *img_ptr;
  167. int index, x, y;
  168. for(index = 0; index < image->numcomps; index++) {
  169. comp_data = image->comps[index].data;
  170. for(y = 0; y < image->comps[index].h; y++) {
  171. img_ptr = (uint16_t*) (picture->data[index] + y * picture->linesize[index]);
  172. for(x = 0; x < image->comps[index].w; x++) {
  173. *img_ptr = *comp_data;
  174. img_ptr++;
  175. comp_data++;
  176. }
  177. }
  178. }
  179. }
  180. static av_cold int libopenjpeg_decode_init(AVCodecContext *avctx)
  181. {
  182. LibOpenJPEGContext *ctx = avctx->priv_data;
  183. opj_set_default_decoder_parameters(&ctx->dec_params);
  184. avcodec_get_frame_defaults(&ctx->image);
  185. avctx->coded_frame = &ctx->image;
  186. return 0;
  187. }
  188. static av_cold int libopenjpeg_decode_init_thread_copy(AVCodecContext *avctx)
  189. {
  190. LibOpenJPEGContext *ctx = avctx->priv_data;
  191. avctx->coded_frame = &ctx->image;
  192. return 0;
  193. }
  194. static int libopenjpeg_decode_frame(AVCodecContext *avctx,
  195. void *data, int *data_size,
  196. AVPacket *avpkt)
  197. {
  198. uint8_t *buf = avpkt->data;
  199. int buf_size = avpkt->size;
  200. LibOpenJPEGContext *ctx = avctx->priv_data;
  201. AVFrame *picture = &ctx->image, *output = data;
  202. opj_dinfo_t *dec;
  203. opj_cio_t *stream;
  204. opj_image_t *image;
  205. int width, height, ret = -1;
  206. int pixel_size = 0;
  207. int ispacked = 0;
  208. int i;
  209. *data_size = 0;
  210. // Check if input is a raw jpeg2k codestream or in jp2 wrapping
  211. if((AV_RB32(buf) == 12) &&
  212. (AV_RB32(buf + 4) == JP2_SIG_TYPE) &&
  213. (AV_RB32(buf + 8) == JP2_SIG_VALUE)) {
  214. dec = opj_create_decompress(CODEC_JP2);
  215. } else {
  216. // If the AVPacket contains a jp2c box, then skip to
  217. // the starting byte of the codestream.
  218. if (AV_RB32(buf + 4) == AV_RB32("jp2c"))
  219. buf += 8;
  220. dec = opj_create_decompress(CODEC_J2K);
  221. }
  222. if(!dec) {
  223. av_log(avctx, AV_LOG_ERROR, "Error initializing decoder.\n");
  224. return -1;
  225. }
  226. opj_set_event_mgr((opj_common_ptr)dec, NULL, NULL);
  227. ctx->dec_params.cp_limit_decoding = LIMIT_TO_MAIN_HEADER;
  228. // Tie decoder with decoding parameters
  229. opj_setup_decoder(dec, &ctx->dec_params);
  230. stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
  231. if(!stream) {
  232. av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
  233. opj_destroy_decompress(dec);
  234. return -1;
  235. }
  236. // Decode the header only
  237. image = opj_decode_with_info(dec, stream, NULL);
  238. opj_cio_close(stream);
  239. if(!image) {
  240. av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
  241. opj_destroy_decompress(dec);
  242. return -1;
  243. }
  244. width = image->x1 - image->x0;
  245. height = image->y1 - image->y0;
  246. if(av_image_check_size(width, height, 0, avctx) < 0) {
  247. av_log(avctx, AV_LOG_ERROR, "%dx%d dimension invalid.\n", width, height);
  248. goto done;
  249. }
  250. avcodec_set_dimensions(avctx, width, height);
  251. if (avctx->pix_fmt != PIX_FMT_NONE) {
  252. if (!libopenjpeg_matches_pix_fmt(image, avctx->pix_fmt)) {
  253. avctx->pix_fmt = PIX_FMT_NONE;
  254. }
  255. }
  256. if (avctx->pix_fmt == PIX_FMT_NONE) {
  257. avctx->pix_fmt = libopenjpeg_guess_pix_fmt(image);
  258. }
  259. if (avctx->pix_fmt == PIX_FMT_NONE) {
  260. av_log(avctx, AV_LOG_ERROR, "Unable to determine pixel format\n");
  261. goto done;
  262. }
  263. for (i = 0; i < image->numcomps; i++)
  264. if (image->comps[i].prec > avctx->bits_per_raw_sample)
  265. avctx->bits_per_raw_sample = image->comps[i].prec;
  266. if(picture->data[0])
  267. ff_thread_release_buffer(avctx, picture);
  268. if(ff_thread_get_buffer(avctx, picture) < 0){
  269. av_log(avctx, AV_LOG_ERROR, "ff_thread_get_buffer() failed\n");
  270. goto done;
  271. }
  272. ctx->dec_params.cp_limit_decoding = NO_LIMITATION;
  273. ctx->dec_params.cp_reduce = avctx->lowres;
  274. // Tie decoder with decoding parameters
  275. opj_setup_decoder(dec, &ctx->dec_params);
  276. stream = opj_cio_open((opj_common_ptr)dec, buf, buf_size);
  277. if(!stream) {
  278. av_log(avctx, AV_LOG_ERROR, "Codestream could not be opened for reading.\n");
  279. goto done;
  280. }
  281. opj_image_destroy(image);
  282. // Decode the codestream
  283. image = opj_decode_with_info(dec, stream, NULL);
  284. opj_cio_close(stream);
  285. if(!image) {
  286. av_log(avctx, AV_LOG_ERROR, "Error decoding codestream.\n");
  287. goto done;
  288. }
  289. pixel_size = av_pix_fmt_descriptors[avctx->pix_fmt].comp[0].step_minus1 + 1;
  290. ispacked = libopenjpeg_ispacked(avctx->pix_fmt);
  291. switch (pixel_size) {
  292. case 1:
  293. if (ispacked) {
  294. libopenjpeg_copy_to_packed8(picture, image);
  295. } else {
  296. libopenjpeg_copyto8(picture, image);
  297. }
  298. break;
  299. case 2:
  300. if (ispacked) {
  301. libopenjpeg_copy_to_packed8(picture, image);
  302. } else {
  303. libopenjpeg_copyto16(picture, image);
  304. }
  305. break;
  306. case 3:
  307. case 4:
  308. if (ispacked) {
  309. libopenjpeg_copy_to_packed8(picture, image);
  310. }
  311. break;
  312. case 6:
  313. case 8:
  314. if (ispacked) {
  315. libopenjpeg_copy_to_packed16(picture, image);
  316. }
  317. break;
  318. default:
  319. av_log(avctx, AV_LOG_ERROR, "unsupported pixel size %d\n", pixel_size);
  320. goto done;
  321. }
  322. *output = ctx->image;
  323. *data_size = sizeof(AVPicture);
  324. ret = buf_size;
  325. done:
  326. opj_image_destroy(image);
  327. opj_destroy_decompress(dec);
  328. return ret;
  329. }
  330. static av_cold int libopenjpeg_decode_close(AVCodecContext *avctx)
  331. {
  332. LibOpenJPEGContext *ctx = avctx->priv_data;
  333. if(ctx->image.data[0])
  334. ff_thread_release_buffer(avctx, &ctx->image);
  335. return 0 ;
  336. }
  337. AVCodec ff_libopenjpeg_decoder = {
  338. .name = "libopenjpeg",
  339. .type = AVMEDIA_TYPE_VIDEO,
  340. .id = CODEC_ID_JPEG2000,
  341. .priv_data_size = sizeof(LibOpenJPEGContext),
  342. .init = libopenjpeg_decode_init,
  343. .close = libopenjpeg_decode_close,
  344. .decode = libopenjpeg_decode_frame,
  345. .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
  346. .max_lowres = 5,
  347. .long_name = NULL_IF_CONFIG_SMALL("OpenJPEG JPEG 2000"),
  348. .init_thread_copy = ONLY_IF_THREADS_ENABLED(libopenjpeg_decode_init_thread_copy),
  349. };