You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

270 lines
9.1KB

  1. /*
  2. * Raw Video Decoder
  3. * Copyright (c) 2001 Fabrice Bellard
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Raw Video Decoder
  24. */
  25. #include "avcodec.h"
  26. #include "internal.h"
  27. #include "raw.h"
  28. #include "libavutil/buffer.h"
  29. #include "libavutil/common.h"
  30. #include "libavutil/intreadwrite.h"
  31. #include "libavutil/imgutils.h"
  32. typedef struct RawVideoContext {
  33. AVBufferRef *palette;
  34. int frame_size; /* size of the frame in bytes */
  35. int flip;
  36. int is_2_4_bpp; // 2 or 4 bpp raw in avi/mov
  37. int is_yuv2;
  38. } RawVideoContext;
  39. static const PixelFormatTag pix_fmt_bps_avi[] = {
  40. { AV_PIX_FMT_PAL8, 4 },
  41. { AV_PIX_FMT_PAL8, 8 },
  42. { AV_PIX_FMT_RGB444, 12 },
  43. { AV_PIX_FMT_RGB555, 15 },
  44. { AV_PIX_FMT_RGB555, 16 },
  45. { AV_PIX_FMT_BGR24, 24 },
  46. { AV_PIX_FMT_RGB32, 32 },
  47. { AV_PIX_FMT_NONE, 0 },
  48. };
  49. static const PixelFormatTag pix_fmt_bps_mov[] = {
  50. { AV_PIX_FMT_MONOWHITE, 1 },
  51. { AV_PIX_FMT_PAL8, 2 },
  52. { AV_PIX_FMT_PAL8, 4 },
  53. { AV_PIX_FMT_PAL8, 8 },
  54. // FIXME swscale does not support 16 bit in .mov, sample 16bit.mov
  55. // http://developer.apple.com/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html
  56. { AV_PIX_FMT_RGB555BE, 16 },
  57. { AV_PIX_FMT_RGB24, 24 },
  58. { AV_PIX_FMT_ARGB, 32 },
  59. { AV_PIX_FMT_MONOWHITE,33 },
  60. { AV_PIX_FMT_NONE, 0 },
  61. };
  62. static enum AVPixelFormat find_pix_fmt(const PixelFormatTag *tags,
  63. unsigned int fourcc)
  64. {
  65. while (tags->pix_fmt >= 0) {
  66. if (tags->fourcc == fourcc)
  67. return tags->pix_fmt;
  68. tags++;
  69. }
  70. return AV_PIX_FMT_YUV420P;
  71. }
  72. static av_cold int raw_init_decoder(AVCodecContext *avctx)
  73. {
  74. RawVideoContext *context = avctx->priv_data;
  75. const AVPixFmtDescriptor *desc;
  76. if (avctx->codec_tag == MKTAG('r', 'a', 'w', ' '))
  77. avctx->pix_fmt = find_pix_fmt(pix_fmt_bps_mov,
  78. avctx->bits_per_coded_sample);
  79. else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
  80. avctx->pix_fmt = find_pix_fmt(pix_fmt_bps_avi,
  81. avctx->bits_per_coded_sample);
  82. else if (avctx->codec_tag)
  83. avctx->pix_fmt = find_pix_fmt(ff_raw_pix_fmt_tags, avctx->codec_tag);
  84. else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
  85. avctx->pix_fmt = find_pix_fmt(pix_fmt_bps_avi,
  86. avctx->bits_per_coded_sample);
  87. desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  88. if (!desc) {
  89. av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
  90. return AVERROR(EINVAL);
  91. }
  92. if (desc->flags & (AV_PIX_FMT_FLAG_PAL | AV_PIX_FMT_FLAG_PSEUDOPAL)) {
  93. context->palette = av_buffer_alloc(AVPALETTE_SIZE);
  94. if (!context->palette)
  95. return AVERROR(ENOMEM);
  96. if (desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
  97. avpriv_set_systematic_pal2((uint32_t*)context->palette->data, avctx->pix_fmt);
  98. else
  99. memset(context->palette->data, 0, AVPALETTE_SIZE);
  100. }
  101. context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
  102. avctx->width,
  103. avctx->height, 1);
  104. if ((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) &&
  105. avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
  106. (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ')))
  107. context->is_2_4_bpp = 1;
  108. if ((avctx->extradata_size >= 9 &&
  109. !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
  110. avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
  111. avctx->codec_tag == MKTAG('W','R','A','W'))
  112. context->flip = 1;
  113. if (avctx->codec_tag == AV_RL32("yuv2") &&
  114. avctx->pix_fmt == AV_PIX_FMT_YUYV422)
  115. context->is_yuv2 = 1;
  116. return 0;
  117. }
  118. static void flip(AVCodecContext *avctx, AVFrame *frame)
  119. {
  120. frame->data[0] += frame->linesize[0] * (avctx->height - 1);
  121. frame->linesize[0] *= -1;
  122. }
  123. static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
  124. AVPacket *avpkt)
  125. {
  126. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  127. RawVideoContext *context = avctx->priv_data;
  128. const uint8_t *buf = avpkt->data;
  129. int buf_size = avpkt->size;
  130. int need_copy = !avpkt->buf || context->is_2_4_bpp || context->is_yuv2;
  131. int res;
  132. AVFrame *frame = data;
  133. frame->pict_type = AV_PICTURE_TYPE_I;
  134. frame->key_frame = 1;
  135. res = ff_decode_frame_props(avctx, frame);
  136. if (res < 0)
  137. return res;
  138. if (buf_size < context->frame_size - (avctx->pix_fmt == AV_PIX_FMT_PAL8 ?
  139. AVPALETTE_SIZE : 0))
  140. return -1;
  141. if (need_copy)
  142. frame->buf[0] = av_buffer_alloc(context->frame_size);
  143. else
  144. frame->buf[0] = av_buffer_ref(avpkt->buf);
  145. if (!frame->buf[0])
  146. return AVERROR(ENOMEM);
  147. //2bpp and 4bpp raw in avi and mov (yes this is ugly ...)
  148. if (context->is_2_4_bpp) {
  149. int i;
  150. uint8_t *dst = frame->buf[0]->data;
  151. buf_size = context->frame_size - AVPALETTE_SIZE;
  152. if (avctx->bits_per_coded_sample == 4) {
  153. for (i = 0; 2 * i + 1 < buf_size; i++) {
  154. dst[2 * i + 0] = buf[i] >> 4;
  155. dst[2 * i + 1] = buf[i] & 15;
  156. }
  157. } else {
  158. for (i = 0; 4 * i + 3 < buf_size; i++) {
  159. dst[4 * i + 0] = buf[i] >> 6;
  160. dst[4 * i + 1] = buf[i] >> 4 & 3;
  161. dst[4 * i + 2] = buf[i] >> 2 & 3;
  162. dst[4 * i + 3] = buf[i] & 3;
  163. }
  164. }
  165. buf = dst;
  166. } else if (need_copy) {
  167. memcpy(frame->buf[0]->data, buf, FFMIN(buf_size, context->frame_size));
  168. buf = frame->buf[0]->data;
  169. }
  170. if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
  171. avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
  172. buf += buf_size - context->frame_size;
  173. if ((res = av_image_fill_arrays(frame->data, frame->linesize,
  174. buf, avctx->pix_fmt,
  175. avctx->width, avctx->height, 1)) < 0)
  176. return res;
  177. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  178. const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE,
  179. NULL);
  180. if (pal) {
  181. av_buffer_unref(&context->palette);
  182. context->palette = av_buffer_alloc(AVPALETTE_SIZE);
  183. if (!context->palette)
  184. return AVERROR(ENOMEM);
  185. memcpy(context->palette->data, pal, AVPALETTE_SIZE);
  186. frame->palette_has_changed = 1;
  187. }
  188. }
  189. if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) ||
  190. (desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)) {
  191. frame->buf[1] = av_buffer_ref(context->palette);
  192. if (!frame->buf[1])
  193. return AVERROR(ENOMEM);
  194. frame->data[1] = frame->buf[1]->data;
  195. }
  196. if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
  197. ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
  198. frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
  199. if (context->flip)
  200. flip(avctx, frame);
  201. if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
  202. avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
  203. avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
  204. avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
  205. FFSWAP(uint8_t *, frame->data[1], frame->data[2]);
  206. if (avctx->codec_tag == AV_RL32("yuv2") &&
  207. avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
  208. int x, y;
  209. uint8_t *line = frame->data[0];
  210. for (y = 0; y < avctx->height; y++) {
  211. for (x = 0; x < avctx->width; x++)
  212. line[2 * x + 1] ^= 0x80;
  213. line += frame->linesize[0];
  214. }
  215. }
  216. *got_frame = 1;
  217. return buf_size;
  218. }
  219. static av_cold int raw_close_decoder(AVCodecContext *avctx)
  220. {
  221. RawVideoContext *context = avctx->priv_data;
  222. av_buffer_unref(&context->palette);
  223. return 0;
  224. }
  225. AVCodec ff_rawvideo_decoder = {
  226. .name = "rawvideo",
  227. .long_name = NULL_IF_CONFIG_SMALL("raw video"),
  228. .type = AVMEDIA_TYPE_VIDEO,
  229. .id = AV_CODEC_ID_RAWVIDEO,
  230. .priv_data_size = sizeof(RawVideoContext),
  231. .init = raw_init_decoder,
  232. .close = raw_close_decoder,
  233. .decode = raw_decode,
  234. };