You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

337 lines
11KB

  1. /*
  2. * lossless JPEG encoder
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2003 Alex Beregszaszi
  5. * Copyright (c) 2003-2004 Michael Niedermayer
  6. *
  7. * Support for external huffman table, various fixes (AVID workaround),
  8. * aspecting, new decode_frame mechanism and apple mjpeg-b support
  9. * by Alex Beregszaszi
  10. *
  11. * This file is part of Libav.
  12. *
  13. * Libav is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2.1 of the License, or (at your option) any later version.
  17. *
  18. * Libav is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with Libav; if not, write to the Free Software
  25. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  26. */
  27. /**
  28. * @file
  29. * lossless JPEG encoder.
  30. */
  31. #include "libavutil/frame.h"
  32. #include "libavutil/mem.h"
  33. #include "libavutil/pixdesc.h"
  34. #include "avcodec.h"
  35. #include "idctdsp.h"
  36. #include "internal.h"
  37. #include "jpegtables.h"
  38. #include "mjpegenc_common.h"
  39. #include "mjpeg.h"
  40. #include "mjpegenc.h"
  41. typedef struct LJpegEncContext {
  42. IDCTDSPContext idsp;
  43. ScanTable scantable;
  44. uint16_t matrix[64];
  45. int vsample[3];
  46. int hsample[3];
  47. uint16_t huff_code_dc_luminance[12];
  48. uint16_t huff_code_dc_chrominance[12];
  49. uint8_t huff_size_dc_luminance[12];
  50. uint8_t huff_size_dc_chrominance[12];
  51. uint16_t (*scratch)[4];
  52. } LJpegEncContext;
  53. static int ljpeg_encode_bgr(AVCodecContext *avctx, PutBitContext *pb,
  54. const AVFrame *frame)
  55. {
  56. LJpegEncContext *s = avctx->priv_data;
  57. const int width = frame->width;
  58. const int height = frame->height;
  59. const int linesize = frame->linesize[0];
  60. uint16_t (*buffer)[4] = s->scratch;
  61. const int predictor = avctx->prediction_method+1;
  62. int left[3], top[3], topleft[3];
  63. int x, y, i;
  64. for (i = 0; i < 3; i++)
  65. buffer[0][i] = 1 << (9 - 1);
  66. for (y = 0; y < height; y++) {
  67. const int modified_predictor = y ? predictor : 1;
  68. uint8_t *ptr = frame->data[0] + (linesize * y);
  69. if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) < width * 3 * 3) {
  70. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  71. return -1;
  72. }
  73. for (i = 0; i < 3; i++)
  74. top[i]= left[i]= topleft[i]= buffer[0][i];
  75. for (x = 0; x < width; x++) {
  76. buffer[x][1] = ptr[3 * x + 0] - ptr[3 * x + 1] + 0x100;
  77. buffer[x][2] = ptr[3 * x + 2] - ptr[3 * x + 1] + 0x100;
  78. buffer[x][0] = (ptr[3 * x + 0] + 2 * ptr[3 * x + 1] + ptr[3 * x + 2]) >> 2;
  79. for (i = 0; i < 3; i++) {
  80. int pred, diff;
  81. PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
  82. topleft[i] = top[i];
  83. top[i] = buffer[x+1][i];
  84. left[i] = buffer[x][i];
  85. diff = ((left[i] - pred + 0x100) & 0x1FF) - 0x100;
  86. if (i == 0)
  87. ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
  88. else
  89. ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
  90. }
  91. }
  92. }
  93. return 0;
  94. }
  95. static inline void ljpeg_encode_yuv_mb(LJpegEncContext *s, PutBitContext *pb,
  96. const AVFrame *frame, int predictor,
  97. int mb_x, int mb_y)
  98. {
  99. int i;
  100. if (mb_x == 0 || mb_y == 0) {
  101. for (i = 0; i < 3; i++) {
  102. uint8_t *ptr;
  103. int x, y, h, v, linesize;
  104. h = s->hsample[i];
  105. v = s->vsample[i];
  106. linesize = frame->linesize[i];
  107. for (y = 0; y < v; y++) {
  108. for (x = 0; x < h; x++) {
  109. int pred;
  110. ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
  111. if (y == 0 && mb_y == 0) {
  112. if (x == 0 && mb_x == 0)
  113. pred = 128;
  114. else
  115. pred = ptr[-1];
  116. } else {
  117. if (x == 0 && mb_x == 0) {
  118. pred = ptr[-linesize];
  119. } else {
  120. PREDICT(pred, ptr[-linesize - 1], ptr[-linesize],
  121. ptr[-1], predictor);
  122. }
  123. }
  124. if (i == 0)
  125. ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
  126. else
  127. ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
  128. }
  129. }
  130. }
  131. } else {
  132. for (i = 0; i < 3; i++) {
  133. uint8_t *ptr;
  134. int x, y, h, v, linesize;
  135. h = s->hsample[i];
  136. v = s->vsample[i];
  137. linesize = frame->linesize[i];
  138. for (y = 0; y < v; y++) {
  139. for (x = 0; x < h; x++) {
  140. int pred;
  141. ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
  142. PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor);
  143. if (i == 0)
  144. ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
  145. else
  146. ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
  147. }
  148. }
  149. }
  150. }
  151. }
  152. static int ljpeg_encode_yuv(AVCodecContext *avctx, PutBitContext *pb,
  153. const AVFrame *frame)
  154. {
  155. const int predictor = avctx->prediction_method + 1;
  156. LJpegEncContext *s = avctx->priv_data;
  157. const int mb_width = (avctx->width + s->hsample[0] - 1) / s->hsample[0];
  158. const int mb_height = (avctx->height + s->vsample[0] - 1) / s->vsample[0];
  159. int mb_x, mb_y;
  160. for (mb_y = 0; mb_y < mb_height; mb_y++) {
  161. if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) <
  162. mb_width * 4 * 3 * s->hsample[0] * s->vsample[0]) {
  163. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  164. return -1;
  165. }
  166. for (mb_x = 0; mb_x < mb_width; mb_x++)
  167. ljpeg_encode_yuv_mb(s, pb, frame, predictor, mb_x, mb_y);
  168. }
  169. return 0;
  170. }
  171. static int ljpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  172. const AVFrame *pict, int *got_packet)
  173. {
  174. LJpegEncContext *s = avctx->priv_data;
  175. PutBitContext pb;
  176. const int width = avctx->width;
  177. const int height = avctx->height;
  178. const int mb_width = (width + s->hsample[0] - 1) / s->hsample[0];
  179. const int mb_height = (height + s->vsample[0] - 1) / s->vsample[0];
  180. int max_pkt_size = AV_INPUT_BUFFER_MIN_SIZE;
  181. int ret, header_bits;
  182. if (avctx->pix_fmt == AV_PIX_FMT_BGR24)
  183. max_pkt_size += width * height * 3 * 3;
  184. else {
  185. max_pkt_size += mb_width * mb_height * 3 * 4
  186. * s->hsample[0] * s->vsample[0];
  187. }
  188. if ((ret = ff_alloc_packet(pkt, max_pkt_size)) < 0) {
  189. av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", max_pkt_size);
  190. return ret;
  191. }
  192. init_put_bits(&pb, pkt->data, pkt->size);
  193. ff_mjpeg_encode_picture_header(avctx, &pb, &s->scantable,
  194. s->matrix);
  195. header_bits = put_bits_count(&pb);
  196. if (avctx->pix_fmt == AV_PIX_FMT_BGR24)
  197. ret = ljpeg_encode_bgr(avctx, &pb, pict);
  198. else
  199. ret = ljpeg_encode_yuv(avctx, &pb, pict);
  200. if (ret < 0)
  201. return ret;
  202. emms_c();
  203. ff_mjpeg_encode_picture_trailer(&pb, header_bits);
  204. flush_put_bits(&pb);
  205. pkt->size = put_bits_ptr(&pb) - pb.buf;
  206. pkt->flags |= AV_PKT_FLAG_KEY;
  207. *got_packet = 1;
  208. return 0;
  209. }
  210. static av_cold int ljpeg_encode_close(AVCodecContext *avctx)
  211. {
  212. LJpegEncContext *s = avctx->priv_data;
  213. av_freep(&s->scratch);
  214. return 0;
  215. }
  216. static av_cold int ljpeg_encode_init(AVCodecContext *avctx)
  217. {
  218. LJpegEncContext *s = avctx->priv_data;
  219. int chroma_v_shift, chroma_h_shift;
  220. if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
  221. avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
  222. avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
  223. avctx->color_range == AVCOL_RANGE_MPEG) &&
  224. avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
  225. av_log(avctx, AV_LOG_ERROR,
  226. "Limited range YUV is non-standard, set strict_std_compliance to "
  227. "at least unofficial to use it.\n");
  228. return AVERROR(EINVAL);
  229. }
  230. #if FF_API_CODED_FRAME
  231. FF_DISABLE_DEPRECATION_WARNINGS
  232. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  233. avctx->coded_frame->key_frame = 1;
  234. FF_ENABLE_DEPRECATION_WARNINGS
  235. #endif
  236. s->scratch = av_malloc_array(avctx->width + 1, sizeof(*s->scratch));
  237. ff_idctdsp_init(&s->idsp, avctx);
  238. ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
  239. ff_zigzag_direct);
  240. av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
  241. &chroma_v_shift);
  242. if (avctx->pix_fmt == AV_PIX_FMT_BGR24) {
  243. s->vsample[0] = s->hsample[0] =
  244. s->vsample[1] = s->hsample[1] =
  245. s->vsample[2] = s->hsample[2] = 1;
  246. } else {
  247. s->vsample[0] = 2;
  248. s->vsample[1] = 2 >> chroma_v_shift;
  249. s->vsample[2] = 2 >> chroma_v_shift;
  250. s->hsample[0] = 2;
  251. s->hsample[1] = 2 >> chroma_h_shift;
  252. s->hsample[2] = 2 >> chroma_h_shift;
  253. }
  254. ff_mjpeg_build_huffman_codes(s->huff_size_dc_luminance,
  255. s->huff_code_dc_luminance,
  256. avpriv_mjpeg_bits_dc_luminance,
  257. avpriv_mjpeg_val_dc);
  258. ff_mjpeg_build_huffman_codes(s->huff_size_dc_chrominance,
  259. s->huff_code_dc_chrominance,
  260. avpriv_mjpeg_bits_dc_chrominance,
  261. avpriv_mjpeg_val_dc);
  262. return 0;
  263. }
  264. AVCodec ff_ljpeg_encoder = {
  265. .name = "ljpeg",
  266. .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
  267. .type = AVMEDIA_TYPE_VIDEO,
  268. .id = AV_CODEC_ID_LJPEG,
  269. .priv_data_size = sizeof(LJpegEncContext),
  270. .init = ljpeg_encode_init,
  271. .encode2 = ljpeg_encode_frame,
  272. .close = ljpeg_encode_close,
  273. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUVJ420P,
  274. AV_PIX_FMT_YUVJ422P,
  275. AV_PIX_FMT_YUVJ444P,
  276. AV_PIX_FMT_BGR24,
  277. AV_PIX_FMT_YUV420P,
  278. AV_PIX_FMT_YUV422P,
  279. AV_PIX_FMT_YUV444P,
  280. AV_PIX_FMT_NONE },
  281. };