You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

360 lines
11KB

  1. /*
  2. * Linux video grab interface
  3. * Copyright (c) 2000,2001 Fabrice Bellard.
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #undef __STRICT_ANSI__ //workaround due to broken kernel headers
  22. #include "config.h"
  23. #include "libavformat/avformat.h"
  24. #include "libavcodec/dsputil.h"
  25. #include <unistd.h>
  26. #include <fcntl.h>
  27. #include <sys/ioctl.h>
  28. #include <sys/mman.h>
  29. #include <sys/time.h>
  30. #define _LINUX_TIME_H 1
  31. #include <linux/videodev.h>
  32. #include <time.h>
  33. #include <strings.h>
  34. typedef struct {
  35. int fd;
  36. int frame_format; /* see VIDEO_PALETTE_xxx */
  37. int use_mmap;
  38. int width, height;
  39. int frame_rate;
  40. int frame_rate_base;
  41. int64_t time_frame;
  42. int frame_size;
  43. struct video_capability video_cap;
  44. struct video_audio audio_saved;
  45. uint8_t *video_buf;
  46. struct video_mbuf gb_buffers;
  47. struct video_mmap gb_buf;
  48. int gb_frame;
  49. } VideoData;
  50. static const struct {
  51. int palette;
  52. int depth;
  53. enum PixelFormat pix_fmt;
  54. } video_formats [] = {
  55. {.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = PIX_FMT_YUV420P },
  56. {.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
  57. {.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = PIX_FMT_UYVY422 },
  58. {.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = PIX_FMT_YUYV422 },
  59. /* NOTE: v4l uses BGR24, not RGB24 */
  60. {.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = PIX_FMT_BGR24 },
  61. {.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = PIX_FMT_BGR565 },
  62. {.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = PIX_FMT_GRAY8 },
  63. };
  64. static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
  65. {
  66. VideoData *s = s1->priv_data;
  67. AVStream *st;
  68. int width, height;
  69. int video_fd, frame_size;
  70. int ret, frame_rate, frame_rate_base;
  71. int desired_palette, desired_depth;
  72. struct video_tuner tuner;
  73. struct video_audio audio;
  74. struct video_picture pict;
  75. int j;
  76. int vformat_num = sizeof(video_formats) / sizeof(video_formats[0]);
  77. if (ap->width <= 0 || ap->height <= 0) {
  78. av_log(s1, AV_LOG_ERROR, "Wrong size (%dx%d)\n", ap->width, ap->height);
  79. return -1;
  80. }
  81. if (ap->time_base.den <= 0) {
  82. av_log(s1, AV_LOG_ERROR, "Wrong time base (%d)\n", ap->time_base.den);
  83. return -1;
  84. }
  85. width = ap->width;
  86. height = ap->height;
  87. frame_rate = ap->time_base.den;
  88. frame_rate_base = ap->time_base.num;
  89. if((unsigned)width > 32767 || (unsigned)height > 32767) {
  90. av_log(s1, AV_LOG_ERROR, "Capture size is out of range: %dx%d\n",
  91. width, height);
  92. return -1;
  93. }
  94. st = av_new_stream(s1, 0);
  95. if (!st)
  96. return AVERROR(ENOMEM);
  97. av_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
  98. s->width = width;
  99. s->height = height;
  100. s->frame_rate = frame_rate;
  101. s->frame_rate_base = frame_rate_base;
  102. video_fd = open(s1->filename, O_RDWR);
  103. if (video_fd < 0) {
  104. av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
  105. goto fail;
  106. }
  107. if (ioctl(video_fd,VIDIOCGCAP, &s->video_cap) < 0) {
  108. av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
  109. goto fail;
  110. }
  111. if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
  112. av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
  113. goto fail;
  114. }
  115. desired_palette = -1;
  116. desired_depth = -1;
  117. for (j = 0; j < vformat_num; j++) {
  118. if (ap->pix_fmt == video_formats[j].pix_fmt) {
  119. desired_palette = video_formats[j].palette;
  120. desired_depth = video_formats[j].depth;
  121. break;
  122. }
  123. }
  124. /* set tv standard */
  125. if (ap->standard && !ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
  126. if (!strcasecmp(ap->standard, "pal"))
  127. tuner.mode = VIDEO_MODE_PAL;
  128. else if (!strcasecmp(ap->standard, "secam"))
  129. tuner.mode = VIDEO_MODE_SECAM;
  130. else
  131. tuner.mode = VIDEO_MODE_NTSC;
  132. ioctl(video_fd, VIDIOCSTUNER, &tuner);
  133. }
  134. /* unmute audio */
  135. audio.audio = 0;
  136. ioctl(video_fd, VIDIOCGAUDIO, &audio);
  137. memcpy(&s->audio_saved, &audio, sizeof(audio));
  138. audio.flags &= ~VIDEO_AUDIO_MUTE;
  139. ioctl(video_fd, VIDIOCSAUDIO, &audio);
  140. ioctl(video_fd, VIDIOCGPICT, &pict);
  141. #if 0
  142. printf("v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
  143. pict.colour,
  144. pict.hue,
  145. pict.brightness,
  146. pict.contrast,
  147. pict.whiteness);
  148. #endif
  149. /* try to choose a suitable video format */
  150. pict.palette = desired_palette;
  151. pict.depth= desired_depth;
  152. if (desired_palette == -1 || (ret = ioctl(video_fd, VIDIOCSPICT, &pict)) < 0) {
  153. for (j = 0; j < vformat_num; j++) {
  154. pict.palette = video_formats[j].palette;
  155. pict.depth = video_formats[j].depth;
  156. if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
  157. break;
  158. }
  159. if (j >= vformat_num)
  160. goto fail1;
  161. }
  162. ret = ioctl(video_fd,VIDIOCGMBUF,&s->gb_buffers);
  163. if (ret < 0) {
  164. /* try to use read based access */
  165. struct video_window win;
  166. int val;
  167. win.x = 0;
  168. win.y = 0;
  169. win.width = width;
  170. win.height = height;
  171. win.chromakey = -1;
  172. win.flags = 0;
  173. ioctl(video_fd, VIDIOCSWIN, &win);
  174. s->frame_format = pict.palette;
  175. val = 1;
  176. ioctl(video_fd, VIDIOCCAPTURE, &val);
  177. s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
  178. s->use_mmap = 0;
  179. } else {
  180. s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_SHARED,video_fd,0);
  181. if ((unsigned char*)-1 == s->video_buf) {
  182. s->video_buf = mmap(0,s->gb_buffers.size,PROT_READ|PROT_WRITE,MAP_PRIVATE,video_fd,0);
  183. if ((unsigned char*)-1 == s->video_buf) {
  184. av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
  185. goto fail;
  186. }
  187. }
  188. s->gb_frame = 0;
  189. s->time_frame = av_gettime() * s->frame_rate / s->frame_rate_base;
  190. /* start to grab the first frame */
  191. s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
  192. s->gb_buf.height = height;
  193. s->gb_buf.width = width;
  194. s->gb_buf.format = pict.palette;
  195. ret = ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
  196. if (ret < 0) {
  197. if (errno != EAGAIN) {
  198. fail1:
  199. av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not support suitable format\n");
  200. } else {
  201. av_log(s1, AV_LOG_ERROR,"Fatal: grab device does not receive any video signal\n");
  202. }
  203. goto fail;
  204. }
  205. for (j = 1; j < s->gb_buffers.frames; j++) {
  206. s->gb_buf.frame = j;
  207. ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
  208. }
  209. s->frame_format = s->gb_buf.format;
  210. s->use_mmap = 1;
  211. }
  212. for (j = 0; j < vformat_num; j++) {
  213. if (s->frame_format == video_formats[j].palette) {
  214. frame_size = width * height * video_formats[j].depth / 8;
  215. st->codec->pix_fmt = video_formats[j].pix_fmt;
  216. break;
  217. }
  218. }
  219. if (j >= vformat_num)
  220. goto fail;
  221. s->fd = video_fd;
  222. s->frame_size = frame_size;
  223. st->codec->codec_type = CODEC_TYPE_VIDEO;
  224. st->codec->codec_id = CODEC_ID_RAWVIDEO;
  225. st->codec->width = width;
  226. st->codec->height = height;
  227. st->codec->time_base.den = frame_rate;
  228. st->codec->time_base.num = frame_rate_base;
  229. st->codec->bit_rate = frame_size * 1/av_q2d(st->codec->time_base) * 8;
  230. return 0;
  231. fail:
  232. if (video_fd >= 0)
  233. close(video_fd);
  234. return AVERROR(EIO);
  235. }
  236. static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
  237. {
  238. uint8_t *ptr;
  239. while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
  240. (errno == EAGAIN || errno == EINTR));
  241. ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
  242. memcpy(buf, ptr, s->frame_size);
  243. /* Setup to capture the next frame */
  244. s->gb_buf.frame = s->gb_frame;
  245. if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
  246. if (errno == EAGAIN)
  247. av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
  248. else
  249. av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
  250. return AVERROR(EIO);
  251. }
  252. /* This is now the grabbing frame */
  253. s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
  254. return s->frame_size;
  255. }
  256. static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
  257. {
  258. VideoData *s = s1->priv_data;
  259. int64_t curtime, delay;
  260. struct timespec ts;
  261. /* Calculate the time of the next frame */
  262. s->time_frame += INT64_C(1000000);
  263. /* wait based on the frame rate */
  264. for(;;) {
  265. curtime = av_gettime();
  266. delay = s->time_frame * s->frame_rate_base / s->frame_rate - curtime;
  267. if (delay <= 0) {
  268. if (delay < INT64_C(-1000000) * s->frame_rate_base / s->frame_rate) {
  269. /* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
  270. s->time_frame += INT64_C(1000000);
  271. }
  272. break;
  273. }
  274. ts.tv_sec = delay / 1000000;
  275. ts.tv_nsec = (delay % 1000000) * 1000;
  276. nanosleep(&ts, NULL);
  277. }
  278. if (av_new_packet(pkt, s->frame_size) < 0)
  279. return AVERROR(EIO);
  280. pkt->pts = curtime;
  281. /* read one frame */
  282. if (s->use_mmap) {
  283. return v4l_mm_read_picture(s, pkt->data);
  284. } else {
  285. if (read(s->fd, pkt->data, pkt->size) != pkt->size)
  286. return AVERROR(EIO);
  287. return s->frame_size;
  288. }
  289. }
  290. static int grab_read_close(AVFormatContext *s1)
  291. {
  292. VideoData *s = s1->priv_data;
  293. if (s->use_mmap)
  294. munmap(s->video_buf, s->gb_buffers.size);
  295. /* mute audio. we must force it because the BTTV driver does not
  296. return its state correctly */
  297. s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
  298. ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
  299. close(s->fd);
  300. return 0;
  301. }
  302. AVInputFormat v4l_demuxer = {
  303. "video4linux",
  304. NULL_IF_CONFIG_SMALL("video grab"),
  305. sizeof(VideoData),
  306. NULL,
  307. grab_read_header,
  308. grab_read_packet,
  309. grab_read_close,
  310. .flags = AVFMT_NOFILE,
  311. };