You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

339 lines
11KB

  1. /*
  2. * lossless JPEG encoder
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. * Copyright (c) 2003 Alex Beregszaszi
  5. * Copyright (c) 2003-2004 Michael Niedermayer
  6. *
  7. * Support for external huffman table, various fixes (AVID workaround),
  8. * aspecting, new decode_frame mechanism and apple mjpeg-b support
  9. * by Alex Beregszaszi
  10. *
  11. * This file is part of Libav.
  12. *
  13. * Libav is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2.1 of the License, or (at your option) any later version.
  17. *
  18. * Libav is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with Libav; if not, write to the Free Software
  25. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  26. */
  27. /**
  28. * @file
  29. * lossless JPEG encoder.
  30. */
  31. #include "libavutil/frame.h"
  32. #include "libavutil/mem.h"
  33. #include "libavutil/pixdesc.h"
  34. #include "avcodec.h"
  35. #include "idctdsp.h"
  36. #include "internal.h"
  37. #include "jpegtables.h"
  38. #include "mjpegenc_common.h"
  39. #include "mpegvideo.h"
  40. #include "mjpeg.h"
  41. #include "mjpegenc.h"
  42. typedef struct LJpegEncContext {
  43. IDCTDSPContext idsp;
  44. ScanTable scantable;
  45. uint16_t matrix[64];
  46. int vsample[3];
  47. int hsample[3];
  48. uint16_t huff_code_dc_luminance[12];
  49. uint16_t huff_code_dc_chrominance[12];
  50. uint8_t huff_size_dc_luminance[12];
  51. uint8_t huff_size_dc_chrominance[12];
  52. uint16_t (*scratch)[4];
  53. } LJpegEncContext;
  54. static int ljpeg_encode_bgr(AVCodecContext *avctx, PutBitContext *pb,
  55. const AVFrame *frame)
  56. {
  57. LJpegEncContext *s = avctx->priv_data;
  58. const int width = frame->width;
  59. const int height = frame->height;
  60. const int linesize = frame->linesize[0];
  61. uint16_t (*buffer)[4] = s->scratch;
  62. const int predictor = avctx->prediction_method+1;
  63. int left[3], top[3], topleft[3];
  64. int x, y, i;
  65. for (i = 0; i < 3; i++)
  66. buffer[0][i] = 1 << (9 - 1);
  67. for (y = 0; y < height; y++) {
  68. const int modified_predictor = y ? predictor : 1;
  69. uint8_t *ptr = frame->data[0] + (linesize * y);
  70. if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) < width * 3 * 3) {
  71. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  72. return -1;
  73. }
  74. for (i = 0; i < 3; i++)
  75. top[i]= left[i]= topleft[i]= buffer[0][i];
  76. for (x = 0; x < width; x++) {
  77. buffer[x][1] = ptr[3 * x + 0] - ptr[3 * x + 1] + 0x100;
  78. buffer[x][2] = ptr[3 * x + 2] - ptr[3 * x + 1] + 0x100;
  79. buffer[x][0] = (ptr[3 * x + 0] + 2 * ptr[3 * x + 1] + ptr[3 * x + 2]) >> 2;
  80. for (i = 0; i < 3; i++) {
  81. int pred, diff;
  82. PREDICT(pred, topleft[i], top[i], left[i], modified_predictor);
  83. topleft[i] = top[i];
  84. top[i] = buffer[x+1][i];
  85. left[i] = buffer[x][i];
  86. diff = ((left[i] - pred + 0x100) & 0x1FF) - 0x100;
  87. if (i == 0)
  88. ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
  89. else
  90. ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
  91. }
  92. }
  93. }
  94. return 0;
  95. }
  96. static inline void ljpeg_encode_yuv_mb(LJpegEncContext *s, PutBitContext *pb,
  97. const AVFrame *frame, int predictor,
  98. int mb_x, int mb_y)
  99. {
  100. int i;
  101. if (mb_x == 0 || mb_y == 0) {
  102. for (i = 0; i < 3; i++) {
  103. uint8_t *ptr;
  104. int x, y, h, v, linesize;
  105. h = s->hsample[i];
  106. v = s->vsample[i];
  107. linesize = frame->linesize[i];
  108. for (y = 0; y < v; y++) {
  109. for (x = 0; x < h; x++) {
  110. int pred;
  111. ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
  112. if (y == 0 && mb_y == 0) {
  113. if (x == 0 && mb_x == 0)
  114. pred = 128;
  115. else
  116. pred = ptr[-1];
  117. } else {
  118. if (x == 0 && mb_x == 0) {
  119. pred = ptr[-linesize];
  120. } else {
  121. PREDICT(pred, ptr[-linesize - 1], ptr[-linesize],
  122. ptr[-1], predictor);
  123. }
  124. }
  125. if (i == 0)
  126. ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
  127. else
  128. ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
  129. }
  130. }
  131. }
  132. } else {
  133. for (i = 0; i < 3; i++) {
  134. uint8_t *ptr;
  135. int x, y, h, v, linesize;
  136. h = s->hsample[i];
  137. v = s->vsample[i];
  138. linesize = frame->linesize[i];
  139. for (y = 0; y < v; y++) {
  140. for (x = 0; x < h; x++) {
  141. int pred;
  142. ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap
  143. PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor);
  144. if (i == 0)
  145. ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly
  146. else
  147. ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance);
  148. }
  149. }
  150. }
  151. }
  152. }
  153. static int ljpeg_encode_yuv(AVCodecContext *avctx, PutBitContext *pb,
  154. const AVFrame *frame)
  155. {
  156. const int predictor = avctx->prediction_method + 1;
  157. LJpegEncContext *s = avctx->priv_data;
  158. const int mb_width = (avctx->width + s->hsample[0] - 1) / s->hsample[0];
  159. const int mb_height = (avctx->height + s->vsample[0] - 1) / s->vsample[0];
  160. int mb_x, mb_y;
  161. for (mb_y = 0; mb_y < mb_height; mb_y++) {
  162. if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) <
  163. mb_width * 4 * 3 * s->hsample[0] * s->vsample[0]) {
  164. av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n");
  165. return -1;
  166. }
  167. for (mb_x = 0; mb_x < mb_width; mb_x++)
  168. ljpeg_encode_yuv_mb(s, pb, frame, predictor, mb_x, mb_y);
  169. }
  170. return 0;
  171. }
  172. static int ljpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  173. const AVFrame *pict, int *got_packet)
  174. {
  175. LJpegEncContext *s = avctx->priv_data;
  176. PutBitContext pb;
  177. const int width = avctx->width;
  178. const int height = avctx->height;
  179. const int mb_width = (width + s->hsample[0] - 1) / s->hsample[0];
  180. const int mb_height = (height + s->vsample[0] - 1) / s->vsample[0];
  181. int max_pkt_size = FF_MIN_BUFFER_SIZE;
  182. int ret, header_bits;
  183. if (avctx->pix_fmt == AV_PIX_FMT_BGR24)
  184. max_pkt_size += width * height * 3 * 3;
  185. else {
  186. max_pkt_size += mb_width * mb_height * 3 * 4
  187. * s->hsample[0] * s->vsample[0];
  188. }
  189. if ((ret = ff_alloc_packet(pkt, max_pkt_size)) < 0) {
  190. av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", max_pkt_size);
  191. return ret;
  192. }
  193. init_put_bits(&pb, pkt->data, pkt->size);
  194. ff_mjpeg_encode_picture_header(avctx, &pb, &s->scantable,
  195. s->matrix);
  196. header_bits = put_bits_count(&pb);
  197. if (avctx->pix_fmt == AV_PIX_FMT_BGR24)
  198. ret = ljpeg_encode_bgr(avctx, &pb, pict);
  199. else
  200. ret = ljpeg_encode_yuv(avctx, &pb, pict);
  201. if (ret < 0)
  202. return ret;
  203. emms_c();
  204. ff_mjpeg_encode_picture_trailer(&pb, header_bits);
  205. flush_put_bits(&pb);
  206. pkt->size = put_bits_ptr(&pb) - pb.buf;
  207. pkt->flags |= AV_PKT_FLAG_KEY;
  208. *got_packet = 1;
  209. return 0;
  210. }
  211. static av_cold int ljpeg_encode_close(AVCodecContext *avctx)
  212. {
  213. LJpegEncContext *s = avctx->priv_data;
  214. av_frame_free(&avctx->coded_frame);
  215. av_freep(&s->scratch);
  216. return 0;
  217. }
  218. static av_cold int ljpeg_encode_init(AVCodecContext *avctx)
  219. {
  220. LJpegEncContext *s = avctx->priv_data;
  221. int chroma_v_shift, chroma_h_shift;
  222. if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
  223. avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
  224. avctx->pix_fmt == AV_PIX_FMT_YUV444P ||
  225. avctx->color_range == AVCOL_RANGE_MPEG) &&
  226. avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
  227. av_log(avctx, AV_LOG_ERROR,
  228. "Limited range YUV is non-standard, set strict_std_compliance to "
  229. "at least unofficial to use it.\n");
  230. return AVERROR(EINVAL);
  231. }
  232. avctx->coded_frame = av_frame_alloc();
  233. if (!avctx->coded_frame)
  234. return AVERROR(ENOMEM);
  235. avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
  236. avctx->coded_frame->key_frame = 1;
  237. s->scratch = av_malloc_array(avctx->width + 1, sizeof(*s->scratch));
  238. ff_idctdsp_init(&s->idsp, avctx);
  239. ff_init_scantable(s->idsp.idct_permutation, &s->scantable,
  240. ff_zigzag_direct);
  241. av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift,
  242. &chroma_v_shift);
  243. if (avctx->pix_fmt == AV_PIX_FMT_BGR24) {
  244. s->vsample[0] = s->hsample[0] =
  245. s->vsample[1] = s->hsample[1] =
  246. s->vsample[2] = s->hsample[2] = 1;
  247. } else {
  248. s->vsample[0] = 2;
  249. s->vsample[1] = 2 >> chroma_v_shift;
  250. s->vsample[2] = 2 >> chroma_v_shift;
  251. s->hsample[0] = 2;
  252. s->hsample[1] = 2 >> chroma_h_shift;
  253. s->hsample[2] = 2 >> chroma_h_shift;
  254. }
  255. ff_mjpeg_build_huffman_codes(s->huff_size_dc_luminance,
  256. s->huff_code_dc_luminance,
  257. avpriv_mjpeg_bits_dc_luminance,
  258. avpriv_mjpeg_val_dc);
  259. ff_mjpeg_build_huffman_codes(s->huff_size_dc_chrominance,
  260. s->huff_code_dc_chrominance,
  261. avpriv_mjpeg_bits_dc_chrominance,
  262. avpriv_mjpeg_val_dc);
  263. return 0;
  264. }
  265. AVCodec ff_ljpeg_encoder = {
  266. .name = "ljpeg",
  267. .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"),
  268. .type = AVMEDIA_TYPE_VIDEO,
  269. .id = AV_CODEC_ID_LJPEG,
  270. .priv_data_size = sizeof(LJpegEncContext),
  271. .init = ljpeg_encode_init,
  272. .encode2 = ljpeg_encode_frame,
  273. .close = ljpeg_encode_close,
  274. .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUVJ420P,
  275. AV_PIX_FMT_YUVJ422P,
  276. AV_PIX_FMT_YUVJ444P,
  277. AV_PIX_FMT_BGR24,
  278. AV_PIX_FMT_YUV420P,
  279. AV_PIX_FMT_YUV422P,
  280. AV_PIX_FMT_YUV444P,
  281. AV_PIX_FMT_NONE },
  282. };