You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

864 lines
30KB

  1. /*
  2. * JPEG 2000 encoding support via OpenJPEG
  3. * Copyright (c) 2011 Michael Bradshaw <mjbshaw gmail com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * JPEG 2000 encoder using libopenjpeg
  24. */
  25. #define OPJ_STATIC
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/common.h"
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/intreadwrite.h"
  30. #include "libavutil/opt.h"
  31. #include "avcodec.h"
  32. #include "internal.h"
  33. #if HAVE_OPENJPEG_2_1_OPENJPEG_H
  34. # include <openjpeg-2.1/openjpeg.h>
  35. #elif HAVE_OPENJPEG_2_0_OPENJPEG_H
  36. # include <openjpeg-2.0/openjpeg.h>
  37. #elif HAVE_OPENJPEG_1_5_OPENJPEG_H
  38. # include <openjpeg-1.5/openjpeg.h>
  39. #else
  40. # include <openjpeg.h>
  41. #endif
  42. #if HAVE_OPENJPEG_2_1_OPENJPEG_H || HAVE_OPENJPEG_2_0_OPENJPEG_H
  43. # define OPENJPEG_MAJOR_VERSION 2
  44. # define OPJ(x) OPJ_##x
  45. #else
  46. # define OPENJPEG_MAJOR_VERSION 1
  47. # define OPJ(x) x
  48. #endif
  49. typedef struct LibOpenJPEGContext {
  50. AVClass *avclass;
  51. opj_image_t *image;
  52. opj_cparameters_t enc_params;
  53. #if OPENJPEG_MAJOR_VERSION == 1
  54. opj_event_mgr_t event_mgr;
  55. #endif // OPENJPEG_MAJOR_VERSION == 1
  56. int format;
  57. int profile;
  58. int prog_order;
  59. int cinema_mode;
  60. int numresolution;
  61. int numlayers;
  62. int disto_alloc;
  63. int fixed_alloc;
  64. int fixed_quality;
  65. } LibOpenJPEGContext;
  66. static void error_callback(const char *msg, void *data)
  67. {
  68. av_log(data, AV_LOG_ERROR, "%s\n", msg);
  69. }
  70. static void warning_callback(const char *msg, void *data)
  71. {
  72. av_log(data, AV_LOG_WARNING, "%s\n", msg);
  73. }
  74. static void info_callback(const char *msg, void *data)
  75. {
  76. av_log(data, AV_LOG_DEBUG, "%s\n", msg);
  77. }
  78. #if OPENJPEG_MAJOR_VERSION == 2
  79. typedef struct PacketWriter {
  80. int pos;
  81. AVPacket *packet;
  82. } PacketWriter;
  83. static OPJ_SIZE_T stream_write(void *out_buffer, OPJ_SIZE_T nb_bytes, void *user_data)
  84. {
  85. PacketWriter *writer = user_data;
  86. AVPacket *packet = writer->packet;
  87. int remaining = packet->size - writer->pos;
  88. if (nb_bytes > remaining) {
  89. OPJ_SIZE_T needed = nb_bytes - remaining;
  90. int max_growth = INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE - packet->size;
  91. if (needed > max_growth) {
  92. return (OPJ_SIZE_T)-1;
  93. }
  94. if (av_grow_packet(packet, (int)needed)) {
  95. return (OPJ_SIZE_T)-1;
  96. }
  97. }
  98. memcpy(packet->data + writer->pos, out_buffer, nb_bytes);
  99. writer->pos += (int)nb_bytes;
  100. return nb_bytes;
  101. }
  102. static OPJ_OFF_T stream_skip(OPJ_OFF_T nb_bytes, void *user_data)
  103. {
  104. PacketWriter *writer = user_data;
  105. AVPacket *packet = writer->packet;
  106. if (nb_bytes < 0) {
  107. if (writer->pos == 0) {
  108. return (OPJ_SIZE_T)-1;
  109. }
  110. if (nb_bytes + writer->pos < 0) {
  111. nb_bytes = -writer->pos;
  112. }
  113. } else {
  114. int remaining = packet->size - writer->pos;
  115. if (nb_bytes > remaining) {
  116. OPJ_SIZE_T needed = nb_bytes - remaining;
  117. int max_growth = INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE - packet->size;
  118. if (needed > max_growth) {
  119. return (OPJ_SIZE_T)-1;
  120. }
  121. if (av_grow_packet(packet, (int)needed)) {
  122. return (OPJ_SIZE_T)-1;
  123. }
  124. }
  125. }
  126. writer->pos += (int)nb_bytes;
  127. return nb_bytes;
  128. }
  129. static OPJ_BOOL stream_seek(OPJ_OFF_T nb_bytes, void *user_data)
  130. {
  131. PacketWriter *writer = user_data;
  132. AVPacket *packet = writer->packet;
  133. if (nb_bytes < 0) {
  134. return OPJ_FALSE;
  135. }
  136. if (nb_bytes > packet->size) {
  137. if (nb_bytes > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE ||
  138. av_grow_packet(packet, (int)nb_bytes - packet->size)) {
  139. return OPJ_FALSE;
  140. }
  141. }
  142. writer->pos = (int)nb_bytes;
  143. return OPJ_TRUE;
  144. }
  145. #endif // OPENJPEG_MAJOR_VERSION == 2
  146. static void cinema_parameters(opj_cparameters_t *p)
  147. {
  148. p->tile_size_on = 0;
  149. p->cp_tdx = 1;
  150. p->cp_tdy = 1;
  151. /* Tile part */
  152. p->tp_flag = 'C';
  153. p->tp_on = 1;
  154. /* Tile and Image shall be at (0, 0) */
  155. p->cp_tx0 = 0;
  156. p->cp_ty0 = 0;
  157. p->image_offset_x0 = 0;
  158. p->image_offset_y0 = 0;
  159. /* Codeblock size= 32 * 32 */
  160. p->cblockw_init = 32;
  161. p->cblockh_init = 32;
  162. p->csty |= 0x01;
  163. /* The progression order shall be CPRL */
  164. p->prog_order = OPJ(CPRL);
  165. /* No ROI */
  166. p->roi_compno = -1;
  167. /* No subsampling */
  168. p->subsampling_dx = 1;
  169. p->subsampling_dy = 1;
  170. /* 9-7 transform */
  171. p->irreversible = 1;
  172. p->tcp_mct = 1;
  173. }
  174. static opj_image_t *mj2_create_image(AVCodecContext *avctx, opj_cparameters_t *parameters)
  175. {
  176. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  177. opj_image_cmptparm_t cmptparm[4] = {{0}};
  178. opj_image_t *img;
  179. int i;
  180. int sub_dx[4];
  181. int sub_dy[4];
  182. int numcomps;
  183. OPJ_COLOR_SPACE color_space = OPJ(CLRSPC_UNKNOWN);
  184. sub_dx[0] = sub_dx[3] = 1;
  185. sub_dy[0] = sub_dy[3] = 1;
  186. sub_dx[1] = sub_dx[2] = 1 << desc->log2_chroma_w;
  187. sub_dy[1] = sub_dy[2] = 1 << desc->log2_chroma_h;
  188. numcomps = desc->nb_components;
  189. switch (avctx->pix_fmt) {
  190. case AV_PIX_FMT_GRAY8:
  191. case AV_PIX_FMT_YA8:
  192. case AV_PIX_FMT_GRAY16:
  193. case AV_PIX_FMT_YA16:
  194. color_space = OPJ(CLRSPC_GRAY);
  195. break;
  196. case AV_PIX_FMT_RGB24:
  197. case AV_PIX_FMT_RGBA:
  198. case AV_PIX_FMT_RGB48:
  199. case AV_PIX_FMT_RGBA64:
  200. case AV_PIX_FMT_GBR24P:
  201. case AV_PIX_FMT_GBRP9:
  202. case AV_PIX_FMT_GBRP10:
  203. case AV_PIX_FMT_GBRP12:
  204. case AV_PIX_FMT_GBRP14:
  205. case AV_PIX_FMT_GBRP16:
  206. case AV_PIX_FMT_XYZ12:
  207. color_space = OPJ(CLRSPC_SRGB);
  208. break;
  209. case AV_PIX_FMT_YUV410P:
  210. case AV_PIX_FMT_YUV411P:
  211. case AV_PIX_FMT_YUV420P:
  212. case AV_PIX_FMT_YUV422P:
  213. case AV_PIX_FMT_YUV440P:
  214. case AV_PIX_FMT_YUV444P:
  215. case AV_PIX_FMT_YUVA420P:
  216. case AV_PIX_FMT_YUVA422P:
  217. case AV_PIX_FMT_YUVA444P:
  218. case AV_PIX_FMT_YUV420P9:
  219. case AV_PIX_FMT_YUV422P9:
  220. case AV_PIX_FMT_YUV444P9:
  221. case AV_PIX_FMT_YUVA420P9:
  222. case AV_PIX_FMT_YUVA422P9:
  223. case AV_PIX_FMT_YUVA444P9:
  224. case AV_PIX_FMT_YUV420P10:
  225. case AV_PIX_FMT_YUV422P10:
  226. case AV_PIX_FMT_YUV444P10:
  227. case AV_PIX_FMT_YUVA420P10:
  228. case AV_PIX_FMT_YUVA422P10:
  229. case AV_PIX_FMT_YUVA444P10:
  230. case AV_PIX_FMT_YUV420P12:
  231. case AV_PIX_FMT_YUV422P12:
  232. case AV_PIX_FMT_YUV444P12:
  233. case AV_PIX_FMT_YUV420P14:
  234. case AV_PIX_FMT_YUV422P14:
  235. case AV_PIX_FMT_YUV444P14:
  236. case AV_PIX_FMT_YUV420P16:
  237. case AV_PIX_FMT_YUV422P16:
  238. case AV_PIX_FMT_YUV444P16:
  239. case AV_PIX_FMT_YUVA420P16:
  240. case AV_PIX_FMT_YUVA422P16:
  241. case AV_PIX_FMT_YUVA444P16:
  242. color_space = OPJ(CLRSPC_SYCC);
  243. break;
  244. default:
  245. av_log(avctx, AV_LOG_ERROR,
  246. "The requested pixel format '%s' is not supported\n",
  247. av_get_pix_fmt_name(avctx->pix_fmt));
  248. return NULL;
  249. }
  250. for (i = 0; i < numcomps; i++) {
  251. cmptparm[i].prec = desc->comp[i].depth;
  252. cmptparm[i].bpp = desc->comp[i].depth;
  253. cmptparm[i].sgnd = 0;
  254. cmptparm[i].dx = sub_dx[i];
  255. cmptparm[i].dy = sub_dy[i];
  256. cmptparm[i].w = (avctx->width + sub_dx[i] - 1) / sub_dx[i];
  257. cmptparm[i].h = (avctx->height + sub_dy[i] - 1) / sub_dy[i];
  258. }
  259. img = opj_image_create(numcomps, cmptparm, color_space);
  260. if (!img)
  261. return NULL;
  262. // x0, y0 is the top left corner of the image
  263. // x1, y1 is the width, height of the reference grid
  264. img->x0 = 0;
  265. img->y0 = 0;
  266. img->x1 = (avctx->width - 1) * parameters->subsampling_dx + 1;
  267. img->y1 = (avctx->height - 1) * parameters->subsampling_dy + 1;
  268. return img;
  269. }
  270. static av_cold int libopenjpeg_encode_init(AVCodecContext *avctx)
  271. {
  272. LibOpenJPEGContext *ctx = avctx->priv_data;
  273. int err = 0;
  274. opj_set_default_encoder_parameters(&ctx->enc_params);
  275. #if HAVE_OPENJPEG_2_1_OPENJPEG_H
  276. switch (ctx->cinema_mode) {
  277. case OPJ_CINEMA2K_24:
  278. ctx->enc_params.rsiz = OPJ_PROFILE_CINEMA_2K;
  279. ctx->enc_params.max_cs_size = OPJ_CINEMA_24_CS;
  280. ctx->enc_params.max_comp_size = OPJ_CINEMA_24_COMP;
  281. break;
  282. case OPJ_CINEMA2K_48:
  283. ctx->enc_params.rsiz = OPJ_PROFILE_CINEMA_2K;
  284. ctx->enc_params.max_cs_size = OPJ_CINEMA_48_CS;
  285. ctx->enc_params.max_comp_size = OPJ_CINEMA_48_COMP;
  286. break;
  287. case OPJ_CINEMA4K_24:
  288. ctx->enc_params.rsiz = OPJ_PROFILE_CINEMA_4K;
  289. ctx->enc_params.max_cs_size = OPJ_CINEMA_24_CS;
  290. ctx->enc_params.max_comp_size = OPJ_CINEMA_24_COMP;
  291. break;
  292. }
  293. switch (ctx->profile) {
  294. case OPJ_CINEMA2K:
  295. if (ctx->enc_params.rsiz == OPJ_PROFILE_CINEMA_4K) {
  296. err = AVERROR(EINVAL);
  297. break;
  298. }
  299. ctx->enc_params.rsiz = OPJ_PROFILE_CINEMA_2K;
  300. break;
  301. case OPJ_CINEMA4K:
  302. if (ctx->enc_params.rsiz == OPJ_PROFILE_CINEMA_2K) {
  303. err = AVERROR(EINVAL);
  304. break;
  305. }
  306. ctx->enc_params.rsiz = OPJ_PROFILE_CINEMA_4K;
  307. break;
  308. }
  309. if (err) {
  310. av_log(avctx, AV_LOG_ERROR,
  311. "Invalid parameter pairing: cinema_mode and profile conflict.\n");
  312. goto fail;
  313. }
  314. #else
  315. ctx->enc_params.cp_rsiz = ctx->profile;
  316. ctx->enc_params.cp_cinema = ctx->cinema_mode;
  317. #endif
  318. ctx->enc_params.mode = !!avctx->global_quality;
  319. ctx->enc_params.prog_order = ctx->prog_order;
  320. ctx->enc_params.numresolution = ctx->numresolution;
  321. ctx->enc_params.cp_disto_alloc = ctx->disto_alloc;
  322. ctx->enc_params.cp_fixed_alloc = ctx->fixed_alloc;
  323. ctx->enc_params.cp_fixed_quality = ctx->fixed_quality;
  324. ctx->enc_params.tcp_numlayers = ctx->numlayers;
  325. ctx->enc_params.tcp_rates[0] = FFMAX(avctx->compression_level, 0) * 2;
  326. if (ctx->cinema_mode > 0) {
  327. cinema_parameters(&ctx->enc_params);
  328. }
  329. ctx->image = mj2_create_image(avctx, &ctx->enc_params);
  330. if (!ctx->image) {
  331. av_log(avctx, AV_LOG_ERROR, "Error creating the mj2 image\n");
  332. err = AVERROR(EINVAL);
  333. goto fail;
  334. }
  335. return 0;
  336. fail:
  337. opj_image_destroy(ctx->image);
  338. ctx->image = NULL;
  339. return err;
  340. }
  341. static int libopenjpeg_copy_packed8(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
  342. {
  343. int compno;
  344. int x;
  345. int y;
  346. int *image_line;
  347. int frame_index;
  348. const int numcomps = image->numcomps;
  349. for (compno = 0; compno < numcomps; ++compno) {
  350. if (image->comps[compno].w > frame->linesize[0] / numcomps) {
  351. av_log(avctx, AV_LOG_ERROR, "Error: frame's linesize is too small for the image\n");
  352. return 0;
  353. }
  354. }
  355. for (compno = 0; compno < numcomps; ++compno) {
  356. for (y = 0; y < avctx->height; ++y) {
  357. image_line = image->comps[compno].data + y * image->comps[compno].w;
  358. frame_index = y * frame->linesize[0] + compno;
  359. for (x = 0; x < avctx->width; ++x) {
  360. image_line[x] = frame->data[0][frame_index];
  361. frame_index += numcomps;
  362. }
  363. for (; x < image->comps[compno].w; ++x) {
  364. image_line[x] = image_line[x - 1];
  365. }
  366. }
  367. for (; y < image->comps[compno].h; ++y) {
  368. image_line = image->comps[compno].data + y * image->comps[compno].w;
  369. for (x = 0; x < image->comps[compno].w; ++x) {
  370. image_line[x] = image_line[x - image->comps[compno].w];
  371. }
  372. }
  373. }
  374. return 1;
  375. }
  376. // for XYZ 12 bit
  377. static int libopenjpeg_copy_packed12(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
  378. {
  379. int compno;
  380. int x, y;
  381. int *image_line;
  382. int frame_index;
  383. const int numcomps = image->numcomps;
  384. uint16_t *frame_ptr = (uint16_t *)frame->data[0];
  385. for (compno = 0; compno < numcomps; ++compno) {
  386. if (image->comps[compno].w > frame->linesize[0] / numcomps) {
  387. av_log(avctx, AV_LOG_ERROR, "Error: frame's linesize is too small for the image\n");
  388. return 0;
  389. }
  390. }
  391. for (compno = 0; compno < numcomps; ++compno) {
  392. for (y = 0; y < avctx->height; ++y) {
  393. image_line = image->comps[compno].data + y * image->comps[compno].w;
  394. frame_index = y * (frame->linesize[0] / 2) + compno;
  395. for (x = 0; x < avctx->width; ++x) {
  396. image_line[x] = frame_ptr[frame_index] >> 4;
  397. frame_index += numcomps;
  398. }
  399. for (; x < image->comps[compno].w; ++x) {
  400. image_line[x] = image_line[x - 1];
  401. }
  402. }
  403. for (; y < image->comps[compno].h; ++y) {
  404. image_line = image->comps[compno].data + y * image->comps[compno].w;
  405. for (x = 0; x < image->comps[compno].w; ++x) {
  406. image_line[x] = image_line[x - image->comps[compno].w];
  407. }
  408. }
  409. }
  410. return 1;
  411. }
  412. static int libopenjpeg_copy_packed16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
  413. {
  414. int compno;
  415. int x;
  416. int y;
  417. int *image_line;
  418. int frame_index;
  419. const int numcomps = image->numcomps;
  420. uint16_t *frame_ptr = (uint16_t*)frame->data[0];
  421. for (compno = 0; compno < numcomps; ++compno) {
  422. if (image->comps[compno].w > frame->linesize[0] / numcomps) {
  423. av_log(avctx, AV_LOG_ERROR, "Error: frame's linesize is too small for the image\n");
  424. return 0;
  425. }
  426. }
  427. for (compno = 0; compno < numcomps; ++compno) {
  428. for (y = 0; y < avctx->height; ++y) {
  429. image_line = image->comps[compno].data + y * image->comps[compno].w;
  430. frame_index = y * (frame->linesize[0] / 2) + compno;
  431. for (x = 0; x < avctx->width; ++x) {
  432. image_line[x] = frame_ptr[frame_index];
  433. frame_index += numcomps;
  434. }
  435. for (; x < image->comps[compno].w; ++x) {
  436. image_line[x] = image_line[x - 1];
  437. }
  438. }
  439. for (; y < image->comps[compno].h; ++y) {
  440. image_line = image->comps[compno].data + y * image->comps[compno].w;
  441. for (x = 0; x < image->comps[compno].w; ++x) {
  442. image_line[x] = image_line[x - image->comps[compno].w];
  443. }
  444. }
  445. }
  446. return 1;
  447. }
  448. static int libopenjpeg_copy_unpacked8(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
  449. {
  450. int compno;
  451. int x;
  452. int y;
  453. int width;
  454. int height;
  455. int *image_line;
  456. int frame_index;
  457. const int numcomps = image->numcomps;
  458. for (compno = 0; compno < numcomps; ++compno) {
  459. if (image->comps[compno].w > frame->linesize[compno]) {
  460. av_log(avctx, AV_LOG_ERROR, "Error: frame's linesize is too small for the image\n");
  461. return 0;
  462. }
  463. }
  464. for (compno = 0; compno < numcomps; ++compno) {
  465. width = avctx->width / image->comps[compno].dx;
  466. height = avctx->height / image->comps[compno].dy;
  467. for (y = 0; y < height; ++y) {
  468. image_line = image->comps[compno].data + y * image->comps[compno].w;
  469. frame_index = y * frame->linesize[compno];
  470. for (x = 0; x < width; ++x)
  471. image_line[x] = frame->data[compno][frame_index++];
  472. for (; x < image->comps[compno].w; ++x) {
  473. image_line[x] = image_line[x - 1];
  474. }
  475. }
  476. for (; y < image->comps[compno].h; ++y) {
  477. image_line = image->comps[compno].data + y * image->comps[compno].w;
  478. for (x = 0; x < image->comps[compno].w; ++x) {
  479. image_line[x] = image_line[x - image->comps[compno].w];
  480. }
  481. }
  482. }
  483. return 1;
  484. }
  485. static int libopenjpeg_copy_unpacked16(AVCodecContext *avctx, const AVFrame *frame, opj_image_t *image)
  486. {
  487. int compno;
  488. int x;
  489. int y;
  490. int width;
  491. int height;
  492. int *image_line;
  493. int frame_index;
  494. const int numcomps = image->numcomps;
  495. uint16_t *frame_ptr;
  496. for (compno = 0; compno < numcomps; ++compno) {
  497. if (image->comps[compno].w > frame->linesize[compno]) {
  498. av_log(avctx, AV_LOG_ERROR, "Error: frame's linesize is too small for the image\n");
  499. return 0;
  500. }
  501. }
  502. for (compno = 0; compno < numcomps; ++compno) {
  503. width = avctx->width / image->comps[compno].dx;
  504. height = avctx->height / image->comps[compno].dy;
  505. frame_ptr = (uint16_t *)frame->data[compno];
  506. for (y = 0; y < height; ++y) {
  507. image_line = image->comps[compno].data + y * image->comps[compno].w;
  508. frame_index = y * (frame->linesize[compno] / 2);
  509. for (x = 0; x < width; ++x)
  510. image_line[x] = frame_ptr[frame_index++];
  511. for (; x < image->comps[compno].w; ++x) {
  512. image_line[x] = image_line[x - 1];
  513. }
  514. }
  515. for (; y < image->comps[compno].h; ++y) {
  516. image_line = image->comps[compno].data + y * image->comps[compno].w;
  517. for (x = 0; x < image->comps[compno].w; ++x) {
  518. image_line[x] = image_line[x - image->comps[compno].w];
  519. }
  520. }
  521. }
  522. return 1;
  523. }
  524. static int libopenjpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  525. const AVFrame *frame, int *got_packet)
  526. {
  527. LibOpenJPEGContext *ctx = avctx->priv_data;
  528. opj_image_t *image = ctx->image;
  529. #if OPENJPEG_MAJOR_VERSION == 1
  530. opj_cinfo_t *compress = NULL;
  531. opj_cio_t *stream = NULL;
  532. int len;
  533. #else // OPENJPEG_MAJOR_VERSION == 2
  534. opj_codec_t *compress = NULL;
  535. opj_stream_t *stream = NULL;
  536. PacketWriter writer = { 0 };
  537. #endif // OPENJPEG_MAJOR_VERSION == 1
  538. int cpyresult = 0;
  539. int ret;
  540. AVFrame *gbrframe;
  541. switch (avctx->pix_fmt) {
  542. case AV_PIX_FMT_RGB24:
  543. case AV_PIX_FMT_RGBA:
  544. case AV_PIX_FMT_YA8:
  545. cpyresult = libopenjpeg_copy_packed8(avctx, frame, image);
  546. break;
  547. case AV_PIX_FMT_XYZ12:
  548. cpyresult = libopenjpeg_copy_packed12(avctx, frame, image);
  549. break;
  550. case AV_PIX_FMT_RGB48:
  551. case AV_PIX_FMT_RGBA64:
  552. case AV_PIX_FMT_YA16:
  553. cpyresult = libopenjpeg_copy_packed16(avctx, frame, image);
  554. break;
  555. case AV_PIX_FMT_GBR24P:
  556. case AV_PIX_FMT_GBRP9:
  557. case AV_PIX_FMT_GBRP10:
  558. case AV_PIX_FMT_GBRP12:
  559. case AV_PIX_FMT_GBRP14:
  560. case AV_PIX_FMT_GBRP16:
  561. gbrframe = av_frame_clone(frame);
  562. if (!gbrframe)
  563. return AVERROR(ENOMEM);
  564. gbrframe->data[0] = frame->data[2]; // swap to be rgb
  565. gbrframe->data[1] = frame->data[0];
  566. gbrframe->data[2] = frame->data[1];
  567. gbrframe->linesize[0] = frame->linesize[2];
  568. gbrframe->linesize[1] = frame->linesize[0];
  569. gbrframe->linesize[2] = frame->linesize[1];
  570. if (avctx->pix_fmt == AV_PIX_FMT_GBR24P) {
  571. cpyresult = libopenjpeg_copy_unpacked8(avctx, gbrframe, image);
  572. } else {
  573. cpyresult = libopenjpeg_copy_unpacked16(avctx, gbrframe, image);
  574. }
  575. av_frame_free(&gbrframe);
  576. break;
  577. case AV_PIX_FMT_GRAY8:
  578. case AV_PIX_FMT_YUV410P:
  579. case AV_PIX_FMT_YUV411P:
  580. case AV_PIX_FMT_YUV420P:
  581. case AV_PIX_FMT_YUV422P:
  582. case AV_PIX_FMT_YUV440P:
  583. case AV_PIX_FMT_YUV444P:
  584. case AV_PIX_FMT_YUVA420P:
  585. case AV_PIX_FMT_YUVA422P:
  586. case AV_PIX_FMT_YUVA444P:
  587. cpyresult = libopenjpeg_copy_unpacked8(avctx, frame, image);
  588. break;
  589. case AV_PIX_FMT_GRAY16:
  590. case AV_PIX_FMT_YUV420P9:
  591. case AV_PIX_FMT_YUV422P9:
  592. case AV_PIX_FMT_YUV444P9:
  593. case AV_PIX_FMT_YUVA420P9:
  594. case AV_PIX_FMT_YUVA422P9:
  595. case AV_PIX_FMT_YUVA444P9:
  596. case AV_PIX_FMT_YUV444P10:
  597. case AV_PIX_FMT_YUV422P10:
  598. case AV_PIX_FMT_YUV420P10:
  599. case AV_PIX_FMT_YUVA444P10:
  600. case AV_PIX_FMT_YUVA422P10:
  601. case AV_PIX_FMT_YUVA420P10:
  602. case AV_PIX_FMT_YUV420P12:
  603. case AV_PIX_FMT_YUV422P12:
  604. case AV_PIX_FMT_YUV444P12:
  605. case AV_PIX_FMT_YUV420P14:
  606. case AV_PIX_FMT_YUV422P14:
  607. case AV_PIX_FMT_YUV444P14:
  608. case AV_PIX_FMT_YUV444P16:
  609. case AV_PIX_FMT_YUV422P16:
  610. case AV_PIX_FMT_YUV420P16:
  611. case AV_PIX_FMT_YUVA444P16:
  612. case AV_PIX_FMT_YUVA422P16:
  613. case AV_PIX_FMT_YUVA420P16:
  614. cpyresult = libopenjpeg_copy_unpacked16(avctx, frame, image);
  615. break;
  616. default:
  617. av_log(avctx, AV_LOG_ERROR,
  618. "The frame's pixel format '%s' is not supported\n",
  619. av_get_pix_fmt_name(avctx->pix_fmt));
  620. return AVERROR(EINVAL);
  621. break;
  622. }
  623. if (!cpyresult) {
  624. av_log(avctx, AV_LOG_ERROR,
  625. "Could not copy the frame data to the internal image buffer\n");
  626. return -1;
  627. }
  628. #if OPENJPEG_MAJOR_VERSION == 2
  629. if ((ret = ff_alloc_packet2(avctx, pkt, 1024, 0)) < 0) {
  630. return ret;
  631. }
  632. #endif // OPENJPEG_MAJOR_VERSION == 2
  633. compress = opj_create_compress(ctx->format);
  634. if (!compress) {
  635. av_log(avctx, AV_LOG_ERROR, "Error creating the compressor\n");
  636. ret = AVERROR(ENOMEM);
  637. goto done;
  638. }
  639. #if OPENJPEG_MAJOR_VERSION == 1
  640. opj_setup_encoder(compress, &ctx->enc_params, image);
  641. stream = opj_cio_open((opj_common_ptr) compress, NULL, 0);
  642. #else // OPENJPEG_MAJOR_VERSION == 2
  643. if (!opj_set_error_handler(compress, error_callback, avctx) ||
  644. !opj_set_warning_handler(compress, warning_callback, avctx) ||
  645. !opj_set_info_handler(compress, info_callback, avctx)) {
  646. av_log(avctx, AV_LOG_ERROR, "Error setting the compressor handlers\n");
  647. ret = AVERROR_EXTERNAL;
  648. goto done;
  649. }
  650. if (!opj_setup_encoder(compress, &ctx->enc_params, image)) {
  651. av_log(avctx, AV_LOG_ERROR, "Error setting up the compressor\n");
  652. ret = AVERROR_EXTERNAL;
  653. goto done;
  654. }
  655. stream = opj_stream_default_create(OPJ_STREAM_WRITE);
  656. #endif // OPENJPEG_MAJOR_VERSION == 1
  657. if (!stream) {
  658. av_log(avctx, AV_LOG_ERROR, "Error creating the cio stream\n");
  659. ret = AVERROR(ENOMEM);
  660. goto done;
  661. }
  662. #if OPENJPEG_MAJOR_VERSION == 1
  663. memset(&ctx->event_mgr, 0, sizeof(ctx->event_mgr));
  664. ctx->event_mgr.info_handler = info_callback;
  665. ctx->event_mgr.error_handler = error_callback;
  666. ctx->event_mgr.warning_handler = warning_callback;
  667. opj_set_event_mgr((opj_common_ptr) compress, &ctx->event_mgr, avctx);
  668. if (!opj_encode(compress, stream, image, NULL)) {
  669. av_log(avctx, AV_LOG_ERROR, "Error during the opj encode\n");
  670. ret = AVERROR_EXTERNAL;
  671. goto done;
  672. }
  673. len = cio_tell(stream);
  674. if ((ret = ff_alloc_packet2(avctx, pkt, len, 0)) < 0) {
  675. goto done;
  676. }
  677. memcpy(pkt->data, stream->buffer, len);
  678. #else // OPENJPEG_MAJOR_VERSION == 2
  679. writer.packet = pkt;
  680. opj_stream_set_write_function(stream, stream_write);
  681. opj_stream_set_skip_function(stream, stream_skip);
  682. opj_stream_set_seek_function(stream, stream_seek);
  683. #if HAVE_OPENJPEG_2_1_OPENJPEG_H
  684. opj_stream_set_user_data(stream, &writer, NULL);
  685. #elif HAVE_OPENJPEG_2_0_OPENJPEG_H
  686. opj_stream_set_user_data(stream, &writer);
  687. #else
  688. #error Missing call to opj_stream_set_user_data
  689. #endif
  690. if (!opj_start_compress(compress, ctx->image, stream) ||
  691. !opj_encode(compress, stream) ||
  692. !opj_end_compress(compress, stream)) {
  693. av_log(avctx, AV_LOG_ERROR, "Error during the opj encode\n");
  694. ret = AVERROR_EXTERNAL;
  695. goto done;
  696. }
  697. av_shrink_packet(pkt, writer.pos);
  698. #endif // OPENJPEG_MAJOR_VERSION == 1
  699. pkt->flags |= AV_PKT_FLAG_KEY;
  700. *got_packet = 1;
  701. ret = 0;
  702. done:
  703. #if OPENJPEG_MAJOR_VERSION == 2
  704. opj_stream_destroy(stream);
  705. opj_destroy_codec(compress);
  706. #else
  707. opj_cio_close(stream);
  708. opj_destroy_compress(compress);
  709. #endif
  710. return ret;
  711. }
  712. static av_cold int libopenjpeg_encode_close(AVCodecContext *avctx)
  713. {
  714. LibOpenJPEGContext *ctx = avctx->priv_data;
  715. opj_image_destroy(ctx->image);
  716. ctx->image = NULL;
  717. return 0;
  718. }
  719. #define OFFSET(x) offsetof(LibOpenJPEGContext, x)
  720. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  721. static const AVOption options[] = {
  722. { "format", "Codec Format", OFFSET(format), AV_OPT_TYPE_INT, { .i64 = OPJ(CODEC_JP2) }, OPJ(CODEC_J2K), OPJ(CODEC_JP2), VE, "format" },
  723. { "j2k", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(CODEC_J2K) }, 0, 0, VE, "format" },
  724. { "jp2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(CODEC_JP2) }, 0, 0, VE, "format" },
  725. { "profile", NULL, OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = OPJ(STD_RSIZ) }, OPJ(STD_RSIZ), OPJ(CINEMA4K), VE, "profile" },
  726. { "jpeg2000", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(STD_RSIZ) }, 0, 0, VE, "profile" },
  727. { "cinema2k", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(CINEMA2K) }, 0, 0, VE, "profile" },
  728. { "cinema4k", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(CINEMA4K) }, 0, 0, VE, "profile" },
  729. { "cinema_mode", "Digital Cinema", OFFSET(cinema_mode), AV_OPT_TYPE_INT, { .i64 = OPJ(OFF) }, OPJ(OFF), OPJ(CINEMA4K_24), VE, "cinema_mode" },
  730. { "off", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(OFF) }, 0, 0, VE, "cinema_mode" },
  731. { "2k_24", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(CINEMA2K_24) }, 0, 0, VE, "cinema_mode" },
  732. { "2k_48", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(CINEMA2K_48) }, 0, 0, VE, "cinema_mode" },
  733. { "4k_24", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(CINEMA4K_24) }, 0, 0, VE, "cinema_mode" },
  734. { "prog_order", "Progression Order", OFFSET(prog_order), AV_OPT_TYPE_INT, { .i64 = OPJ(LRCP) }, OPJ(LRCP), OPJ(CPRL), VE, "prog_order" },
  735. { "lrcp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(LRCP) }, 0, 0, VE, "prog_order" },
  736. { "rlcp", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(RLCP) }, 0, 0, VE, "prog_order" },
  737. { "rpcl", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(RPCL) }, 0, 0, VE, "prog_order" },
  738. { "pcrl", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(PCRL) }, 0, 0, VE, "prog_order" },
  739. { "cprl", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = OPJ(CPRL) }, 0, 0, VE, "prog_order" },
  740. { "numresolution", NULL, OFFSET(numresolution), AV_OPT_TYPE_INT, { .i64 = 6 }, 1, INT_MAX, VE },
  741. { "numlayers", NULL, OFFSET(numlayers), AV_OPT_TYPE_INT, { .i64 = 1 }, 1, 10, VE },
  742. { "disto_alloc", NULL, OFFSET(disto_alloc), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, VE },
  743. { "fixed_alloc", NULL, OFFSET(fixed_alloc), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  744. { "fixed_quality", NULL, OFFSET(fixed_quality), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VE },
  745. { NULL },
  746. };
  747. static const AVClass openjpeg_class = {
  748. .class_name = "libopenjpeg",
  749. .item_name = av_default_item_name,
  750. .option = options,
  751. .version = LIBAVUTIL_VERSION_INT,
  752. };
  753. AVCodec ff_libopenjpeg_encoder = {
  754. .name = "libopenjpeg",
  755. .long_name = NULL_IF_CONFIG_SMALL("OpenJPEG JPEG 2000"),
  756. .type = AVMEDIA_TYPE_VIDEO,
  757. .id = AV_CODEC_ID_JPEG2000,
  758. .priv_data_size = sizeof(LibOpenJPEGContext),
  759. .init = libopenjpeg_encode_init,
  760. .encode2 = libopenjpeg_encode_frame,
  761. .close = libopenjpeg_encode_close,
  762. .capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_INTRA_ONLY,
  763. .pix_fmts = (const enum AVPixelFormat[]) {
  764. AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB48,
  765. AV_PIX_FMT_RGBA64, AV_PIX_FMT_GBR24P,
  766. AV_PIX_FMT_GBRP9, AV_PIX_FMT_GBRP10, AV_PIX_FMT_GBRP12, AV_PIX_FMT_GBRP14, AV_PIX_FMT_GBRP16,
  767. AV_PIX_FMT_GRAY8, AV_PIX_FMT_YA8, AV_PIX_FMT_GRAY16, AV_PIX_FMT_YA16,
  768. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVA420P,
  769. AV_PIX_FMT_YUV440P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVA422P,
  770. AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUVA444P,
  771. AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
  772. AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
  773. AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
  774. AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
  775. AV_PIX_FMT_YUV420P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV444P12,
  776. AV_PIX_FMT_YUV420P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV444P14,
  777. AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
  778. AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
  779. AV_PIX_FMT_XYZ12,
  780. AV_PIX_FMT_NONE
  781. },
  782. .priv_class = &openjpeg_class,
  783. };