You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

265 lines
9.0KB

  1. /*
  2. * Raw Video Decoder
  3. * Copyright (c) 2001 Fabrice Bellard
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Raw Video Decoder
  24. */
  25. #include "avcodec.h"
  26. #include "raw.h"
  27. #include "libavutil/buffer.h"
  28. #include "libavutil/common.h"
  29. #include "libavutil/intreadwrite.h"
  30. #include "libavutil/imgutils.h"
  31. typedef struct RawVideoContext {
  32. AVBufferRef *palette;
  33. int frame_size; /* size of the frame in bytes */
  34. int flip;
  35. int is_2_4_bpp; // 2 or 4 bpp raw in avi/mov
  36. int is_yuv2;
  37. } RawVideoContext;
  38. static const PixelFormatTag pix_fmt_bps_avi[] = {
  39. { AV_PIX_FMT_PAL8, 4 },
  40. { AV_PIX_FMT_PAL8, 8 },
  41. { AV_PIX_FMT_RGB444, 12 },
  42. { AV_PIX_FMT_RGB555, 15 },
  43. { AV_PIX_FMT_RGB555, 16 },
  44. { AV_PIX_FMT_BGR24, 24 },
  45. { AV_PIX_FMT_RGB32, 32 },
  46. { AV_PIX_FMT_NONE, 0 },
  47. };
  48. static const PixelFormatTag pix_fmt_bps_mov[] = {
  49. { AV_PIX_FMT_MONOWHITE, 1 },
  50. { AV_PIX_FMT_PAL8, 2 },
  51. { AV_PIX_FMT_PAL8, 4 },
  52. { AV_PIX_FMT_PAL8, 8 },
  53. // FIXME swscale does not support 16 bit in .mov, sample 16bit.mov
  54. // http://developer.apple.com/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html
  55. { AV_PIX_FMT_RGB555BE, 16 },
  56. { AV_PIX_FMT_RGB24, 24 },
  57. { AV_PIX_FMT_ARGB, 32 },
  58. { AV_PIX_FMT_MONOWHITE,33 },
  59. { AV_PIX_FMT_NONE, 0 },
  60. };
  61. static enum AVPixelFormat find_pix_fmt(const PixelFormatTag *tags,
  62. unsigned int fourcc)
  63. {
  64. while (tags->pix_fmt >= 0) {
  65. if (tags->fourcc == fourcc)
  66. return tags->pix_fmt;
  67. tags++;
  68. }
  69. return AV_PIX_FMT_YUV420P;
  70. }
  71. static av_cold int raw_init_decoder(AVCodecContext *avctx)
  72. {
  73. RawVideoContext *context = avctx->priv_data;
  74. const AVPixFmtDescriptor *desc;
  75. if (avctx->codec_tag == MKTAG('r', 'a', 'w', ' '))
  76. avctx->pix_fmt = find_pix_fmt(pix_fmt_bps_mov,
  77. avctx->bits_per_coded_sample);
  78. else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
  79. avctx->pix_fmt = find_pix_fmt(pix_fmt_bps_avi,
  80. avctx->bits_per_coded_sample);
  81. else if (avctx->codec_tag)
  82. avctx->pix_fmt = find_pix_fmt(ff_raw_pix_fmt_tags, avctx->codec_tag);
  83. else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
  84. avctx->pix_fmt = find_pix_fmt(pix_fmt_bps_avi,
  85. avctx->bits_per_coded_sample);
  86. desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  87. if (!desc) {
  88. av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
  89. return AVERROR(EINVAL);
  90. }
  91. if (desc->flags & (AV_PIX_FMT_FLAG_PAL | AV_PIX_FMT_FLAG_PSEUDOPAL)) {
  92. context->palette = av_buffer_alloc(AVPALETTE_SIZE);
  93. if (!context->palette)
  94. return AVERROR(ENOMEM);
  95. if (desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)
  96. avpriv_set_systematic_pal2((uint32_t*)context->palette->data, avctx->pix_fmt);
  97. else
  98. memset(context->palette->data, 0, AVPALETTE_SIZE);
  99. }
  100. context->frame_size = avpicture_get_size(avctx->pix_fmt, avctx->width,
  101. avctx->height);
  102. if ((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) &&
  103. avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
  104. (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ')))
  105. context->is_2_4_bpp = 1;
  106. if ((avctx->extradata_size >= 9 &&
  107. !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
  108. avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
  109. avctx->codec_tag == MKTAG('W','R','A','W'))
  110. context->flip = 1;
  111. if (avctx->codec_tag == AV_RL32("yuv2") &&
  112. avctx->pix_fmt == AV_PIX_FMT_YUYV422)
  113. context->is_yuv2 = 1;
  114. return 0;
  115. }
  116. static void flip(AVCodecContext *avctx, AVPicture *picture)
  117. {
  118. picture->data[0] += picture->linesize[0] * (avctx->height - 1);
  119. picture->linesize[0] *= -1;
  120. }
  121. static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
  122. AVPacket *avpkt)
  123. {
  124. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
  125. RawVideoContext *context = avctx->priv_data;
  126. const uint8_t *buf = avpkt->data;
  127. int buf_size = avpkt->size;
  128. int need_copy = !avpkt->buf || context->is_2_4_bpp || context->is_yuv2;
  129. int res;
  130. AVFrame *frame = data;
  131. AVPicture *picture = data;
  132. frame->pict_type = AV_PICTURE_TYPE_I;
  133. frame->key_frame = 1;
  134. frame->reordered_opaque = avctx->reordered_opaque;
  135. frame->pkt_pts = avctx->pkt->pts;
  136. if (buf_size < context->frame_size - (avctx->pix_fmt == AV_PIX_FMT_PAL8 ?
  137. AVPALETTE_SIZE : 0))
  138. return -1;
  139. if (need_copy)
  140. frame->buf[0] = av_buffer_alloc(context->frame_size);
  141. else
  142. frame->buf[0] = av_buffer_ref(avpkt->buf);
  143. if (!frame->buf[0])
  144. return AVERROR(ENOMEM);
  145. //2bpp and 4bpp raw in avi and mov (yes this is ugly ...)
  146. if (context->is_2_4_bpp) {
  147. int i;
  148. uint8_t *dst = frame->buf[0]->data;
  149. buf_size = context->frame_size - AVPALETTE_SIZE;
  150. if (avctx->bits_per_coded_sample == 4) {
  151. for (i = 0; 2 * i + 1 < buf_size; i++) {
  152. dst[2 * i + 0] = buf[i] >> 4;
  153. dst[2 * i + 1] = buf[i] & 15;
  154. }
  155. } else {
  156. for (i = 0; 4 * i + 3 < buf_size; i++) {
  157. dst[4 * i + 0] = buf[i] >> 6;
  158. dst[4 * i + 1] = buf[i] >> 4 & 3;
  159. dst[4 * i + 2] = buf[i] >> 2 & 3;
  160. dst[4 * i + 3] = buf[i] & 3;
  161. }
  162. }
  163. buf = dst;
  164. } else if (need_copy) {
  165. memcpy(frame->buf[0]->data, buf, FFMIN(buf_size, context->frame_size));
  166. buf = frame->buf[0]->data;
  167. }
  168. if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
  169. avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
  170. buf += buf_size - context->frame_size;
  171. if ((res = avpicture_fill(picture, buf, avctx->pix_fmt,
  172. avctx->width, avctx->height)) < 0)
  173. return res;
  174. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  175. const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE,
  176. NULL);
  177. if (pal) {
  178. av_buffer_unref(&context->palette);
  179. context->palette = av_buffer_alloc(AVPALETTE_SIZE);
  180. if (!context->palette)
  181. return AVERROR(ENOMEM);
  182. memcpy(context->palette->data, pal, AVPALETTE_SIZE);
  183. frame->palette_has_changed = 1;
  184. }
  185. }
  186. if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) ||
  187. (desc->flags & AV_PIX_FMT_FLAG_PSEUDOPAL)) {
  188. frame->buf[1] = av_buffer_ref(context->palette);
  189. if (!frame->buf[1])
  190. return AVERROR(ENOMEM);
  191. frame->data[1] = frame->buf[1]->data;
  192. }
  193. if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
  194. ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
  195. frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
  196. if (context->flip)
  197. flip(avctx, picture);
  198. if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
  199. avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
  200. avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
  201. avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
  202. FFSWAP(uint8_t *, picture->data[1], picture->data[2]);
  203. if (avctx->codec_tag == AV_RL32("yuv2") &&
  204. avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
  205. int x, y;
  206. uint8_t *line = picture->data[0];
  207. for (y = 0; y < avctx->height; y++) {
  208. for (x = 0; x < avctx->width; x++)
  209. line[2 * x + 1] ^= 0x80;
  210. line += picture->linesize[0];
  211. }
  212. }
  213. *got_frame = 1;
  214. return buf_size;
  215. }
  216. static av_cold int raw_close_decoder(AVCodecContext *avctx)
  217. {
  218. RawVideoContext *context = avctx->priv_data;
  219. av_buffer_unref(&context->palette);
  220. return 0;
  221. }
  222. AVCodec ff_rawvideo_decoder = {
  223. .name = "rawvideo",
  224. .long_name = NULL_IF_CONFIG_SMALL("raw video"),
  225. .type = AVMEDIA_TYPE_VIDEO,
  226. .id = AV_CODEC_ID_RAWVIDEO,
  227. .priv_data_size = sizeof(RawVideoContext),
  228. .init = raw_init_decoder,
  229. .close = raw_close_decoder,
  230. .decode = raw_decode,
  231. };