You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

731 lines
20KB

  1. /*
  2. * V4L2 context helper functions.
  3. *
  4. * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
  5. * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <linux/videodev2.h>
  24. #include <sys/ioctl.h>
  25. #include <sys/mman.h>
  26. #include <unistd.h>
  27. #include <fcntl.h>
  28. #include <poll.h>
  29. #include "libavcodec/avcodec.h"
  30. #include "libavcodec/internal.h"
  31. #include "v4l2_buffers.h"
  32. #include "v4l2_fmt.h"
  33. #include "v4l2_m2m.h"
  34. struct v4l2_format_update {
  35. uint32_t v4l2_fmt;
  36. int update_v4l2;
  37. enum AVPixelFormat av_fmt;
  38. int update_avfmt;
  39. };
  40. static inline V4L2m2mContext *ctx_to_m2mctx(V4L2Context *ctx)
  41. {
  42. return V4L2_TYPE_IS_OUTPUT(ctx->type) ?
  43. container_of(ctx, V4L2m2mContext, output) :
  44. container_of(ctx, V4L2m2mContext, capture);
  45. }
  46. static inline AVClass *logger(V4L2Context *ctx)
  47. {
  48. return ctx_to_m2mctx(ctx)->priv;
  49. }
  50. static inline unsigned int v4l2_get_width(struct v4l2_format *fmt)
  51. {
  52. return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width;
  53. }
  54. static inline unsigned int v4l2_get_height(struct v4l2_format *fmt)
  55. {
  56. return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height;
  57. }
  58. static inline unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2_format *fmt2)
  59. {
  60. struct v4l2_format *fmt1 = &ctx->format;
  61. int ret = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
  62. fmt1->fmt.pix_mp.width != fmt2->fmt.pix_mp.width ||
  63. fmt1->fmt.pix_mp.height != fmt2->fmt.pix_mp.height
  64. :
  65. fmt1->fmt.pix.width != fmt2->fmt.pix.width ||
  66. fmt1->fmt.pix.height != fmt2->fmt.pix.height;
  67. if (ret)
  68. av_log(logger(ctx), AV_LOG_DEBUG, "%s changed (%dx%d) -> (%dx%d)\n",
  69. ctx->name,
  70. v4l2_get_width(fmt1), v4l2_get_height(fmt1),
  71. v4l2_get_width(fmt2), v4l2_get_height(fmt2));
  72. return ret;
  73. }
  74. static inline int v4l2_type_supported(V4L2Context *ctx)
  75. {
  76. return ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE ||
  77. ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE ||
  78. ctx->type == V4L2_BUF_TYPE_VIDEO_CAPTURE ||
  79. ctx->type == V4L2_BUF_TYPE_VIDEO_OUTPUT;
  80. }
  81. static inline int v4l2_get_framesize_compressed(V4L2Context* ctx, int width, int height)
  82. {
  83. V4L2m2mContext *s = ctx_to_m2mctx(ctx);
  84. const int SZ_4K = 0x1000;
  85. int size;
  86. if (s->avctx && av_codec_is_decoder(s->avctx->codec))
  87. return ((width * height * 3 / 2) / 2) + 128;
  88. /* encoder */
  89. size = FFALIGN(height, 32) * FFALIGN(width, 32) * 3 / 2 / 2;
  90. return FFALIGN(size, SZ_4K);
  91. }
  92. static inline void v4l2_save_to_context(V4L2Context* ctx, struct v4l2_format_update *fmt)
  93. {
  94. ctx->format.type = ctx->type;
  95. if (fmt->update_avfmt)
  96. ctx->av_pix_fmt = fmt->av_fmt;
  97. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  98. /* update the sizes to handle the reconfiguration of the capture stream at runtime */
  99. ctx->format.fmt.pix_mp.height = ctx->height;
  100. ctx->format.fmt.pix_mp.width = ctx->width;
  101. if (fmt->update_v4l2) {
  102. ctx->format.fmt.pix_mp.pixelformat = fmt->v4l2_fmt;
  103. /* s5p-mfc requires the user to specify a buffer size */
  104. ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage =
  105. v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
  106. }
  107. } else {
  108. ctx->format.fmt.pix.height = ctx->height;
  109. ctx->format.fmt.pix.width = ctx->width;
  110. if (fmt->update_v4l2) {
  111. ctx->format.fmt.pix.pixelformat = fmt->v4l2_fmt;
  112. /* s5p-mfc requires the user to specify a buffer size */
  113. ctx->format.fmt.pix.sizeimage =
  114. v4l2_get_framesize_compressed(ctx, ctx->width, ctx->height);
  115. }
  116. }
  117. }
  118. /**
  119. * returns 1 if reinit was successful, negative if it failed
  120. * returns 0 if reinit was not executed
  121. */
  122. static int v4l2_handle_event(V4L2Context *ctx)
  123. {
  124. V4L2m2mContext *s = ctx_to_m2mctx(ctx);
  125. struct v4l2_format cap_fmt = s->capture.format;
  126. struct v4l2_format out_fmt = s->output.format;
  127. struct v4l2_event evt = { 0 };
  128. int full_reinit, reinit, ret;
  129. ret = ioctl(s->fd, VIDIOC_DQEVENT, &evt);
  130. if (ret < 0) {
  131. av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_DQEVENT\n", ctx->name);
  132. return 0;
  133. }
  134. if (evt.type != V4L2_EVENT_SOURCE_CHANGE)
  135. return 0;
  136. ret = ioctl(s->fd, VIDIOC_G_FMT, &out_fmt);
  137. if (ret) {
  138. av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->output.name);
  139. return 0;
  140. }
  141. ret = ioctl(s->fd, VIDIOC_G_FMT, &cap_fmt);
  142. if (ret) {
  143. av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->capture.name);
  144. return 0;
  145. }
  146. full_reinit = v4l2_resolution_changed(&s->output, &out_fmt);
  147. if (full_reinit) {
  148. s->output.height = v4l2_get_height(&out_fmt);
  149. s->output.width = v4l2_get_width(&out_fmt);
  150. }
  151. reinit = v4l2_resolution_changed(&s->capture, &cap_fmt);
  152. if (reinit) {
  153. s->capture.height = v4l2_get_height(&cap_fmt);
  154. s->capture.width = v4l2_get_width(&cap_fmt);
  155. }
  156. if (full_reinit || reinit)
  157. s->reinit = 1;
  158. if (full_reinit) {
  159. ret = ff_v4l2_m2m_codec_full_reinit(s);
  160. if (ret) {
  161. av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_full_reinit\n");
  162. return -EINVAL;
  163. }
  164. goto reinit_run;
  165. }
  166. if (reinit) {
  167. if (s->avctx)
  168. ret = ff_set_dimensions(s->avctx, s->capture.width, s->capture.height);
  169. if (ret < 0)
  170. av_log(logger(ctx), AV_LOG_WARNING, "update avcodec height and width\n");
  171. ret = ff_v4l2_m2m_codec_reinit(s);
  172. if (ret) {
  173. av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_reinit\n");
  174. return -EINVAL;
  175. }
  176. goto reinit_run;
  177. }
  178. /* dummy event received */
  179. return 0;
  180. /* reinit executed */
  181. reinit_run:
  182. return 1;
  183. }
  184. static int v4l2_stop_decode(V4L2Context *ctx)
  185. {
  186. struct v4l2_decoder_cmd cmd = {
  187. .cmd = V4L2_DEC_CMD_STOP,
  188. .flags = 0,
  189. };
  190. int ret;
  191. ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DECODER_CMD, &cmd);
  192. if (ret) {
  193. /* DECODER_CMD is optional */
  194. if (errno == ENOTTY)
  195. return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
  196. else
  197. return AVERROR(errno);
  198. }
  199. return 0;
  200. }
  201. static int v4l2_stop_encode(V4L2Context *ctx)
  202. {
  203. struct v4l2_encoder_cmd cmd = {
  204. .cmd = V4L2_ENC_CMD_STOP,
  205. .flags = 0,
  206. };
  207. int ret;
  208. ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENCODER_CMD, &cmd);
  209. if (ret) {
  210. /* ENCODER_CMD is optional */
  211. if (errno == ENOTTY)
  212. return ff_v4l2_context_set_status(ctx, VIDIOC_STREAMOFF);
  213. else
  214. return AVERROR(errno);
  215. }
  216. return 0;
  217. }
  218. static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout)
  219. {
  220. struct v4l2_plane planes[VIDEO_MAX_PLANES];
  221. struct v4l2_buffer buf = { 0 };
  222. V4L2Buffer* avbuf = NULL;
  223. struct pollfd pfd = {
  224. .events = POLLIN | POLLRDNORM | POLLPRI | POLLOUT | POLLWRNORM, /* default blocking capture */
  225. .fd = ctx_to_m2mctx(ctx)->fd,
  226. };
  227. int i, ret;
  228. /* if we are draining and there are no more capture buffers queued in the driver we are done */
  229. if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) {
  230. for (i = 0; i < ctx->num_buffers; i++) {
  231. /* capture buffer initialization happens during decode hence
  232. * detection happens at runtime
  233. */
  234. if (!ctx->buffers)
  235. break;
  236. if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER)
  237. goto start;
  238. }
  239. ctx->done = 1;
  240. return NULL;
  241. }
  242. start:
  243. if (V4L2_TYPE_IS_OUTPUT(ctx->type))
  244. pfd.events = POLLOUT | POLLWRNORM;
  245. else {
  246. /* no need to listen to requests for more input while draining */
  247. if (ctx_to_m2mctx(ctx)->draining)
  248. pfd.events = POLLIN | POLLRDNORM | POLLPRI;
  249. }
  250. for (;;) {
  251. ret = poll(&pfd, 1, timeout);
  252. if (ret > 0)
  253. break;
  254. if (errno == EINTR)
  255. continue;
  256. return NULL;
  257. }
  258. /* 0. handle errors */
  259. if (pfd.revents & POLLERR) {
  260. /* if we are trying to get free buffers but none have been queued yet
  261. no need to raise a warning */
  262. if (timeout == 0) {
  263. for (i = 0; i < ctx->num_buffers; i++) {
  264. if (ctx->buffers[i].status != V4L2BUF_AVAILABLE)
  265. av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
  266. }
  267. }
  268. else
  269. av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name);
  270. return NULL;
  271. }
  272. /* 1. handle resolution changes */
  273. if (pfd.revents & POLLPRI) {
  274. ret = v4l2_handle_event(ctx);
  275. if (ret < 0) {
  276. /* if re-init failed, abort */
  277. ctx->done = 1;
  278. return NULL;
  279. }
  280. if (ret) {
  281. /* if re-init was successful drop the buffer (if there was one)
  282. * since we had to reconfigure capture (unmap all buffers)
  283. */
  284. return NULL;
  285. }
  286. }
  287. /* 2. dequeue the buffer */
  288. if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) {
  289. if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) {
  290. /* there is a capture buffer ready */
  291. if (pfd.revents & (POLLIN | POLLRDNORM))
  292. goto dequeue;
  293. /* the driver is ready to accept more input; instead of waiting for the capture
  294. * buffer to complete we return NULL so input can proceed (we are single threaded)
  295. */
  296. if (pfd.revents & (POLLOUT | POLLWRNORM))
  297. return NULL;
  298. }
  299. dequeue:
  300. memset(&buf, 0, sizeof(buf));
  301. buf.memory = V4L2_MEMORY_MMAP;
  302. buf.type = ctx->type;
  303. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  304. memset(planes, 0, sizeof(planes));
  305. buf.length = VIDEO_MAX_PLANES;
  306. buf.m.planes = planes;
  307. }
  308. ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf);
  309. if (ret) {
  310. if (errno != EAGAIN) {
  311. ctx->done = 1;
  312. if (errno != EPIPE)
  313. av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n",
  314. ctx->name, av_err2str(AVERROR(errno)));
  315. }
  316. return NULL;
  317. }
  318. avbuf = &ctx->buffers[buf.index];
  319. avbuf->status = V4L2BUF_AVAILABLE;
  320. avbuf->buf = buf;
  321. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  322. memcpy(avbuf->planes, planes, sizeof(planes));
  323. avbuf->buf.m.planes = avbuf->planes;
  324. }
  325. return avbuf;
  326. }
  327. return NULL;
  328. }
  329. static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx)
  330. {
  331. int timeout = 0; /* return when no more buffers to dequeue */
  332. int i;
  333. /* get back as many output buffers as possible */
  334. if (V4L2_TYPE_IS_OUTPUT(ctx->type)) {
  335. do {
  336. } while (v4l2_dequeue_v4l2buf(ctx, timeout));
  337. }
  338. for (i = 0; i < ctx->num_buffers; i++) {
  339. if (ctx->buffers[i].status == V4L2BUF_AVAILABLE)
  340. return &ctx->buffers[i];
  341. }
  342. return NULL;
  343. }
  344. static int v4l2_release_buffers(V4L2Context* ctx)
  345. {
  346. struct v4l2_requestbuffers req = {
  347. .memory = V4L2_MEMORY_MMAP,
  348. .type = ctx->type,
  349. .count = 0, /* 0 -> unmaps buffers from the driver */
  350. };
  351. int i, j;
  352. for (i = 0; i < ctx->num_buffers; i++) {
  353. V4L2Buffer *buffer = &ctx->buffers[i];
  354. for (j = 0; j < buffer->num_planes; j++) {
  355. struct V4L2Plane_info *p = &buffer->plane_info[j];
  356. if (p->mm_addr && p->length)
  357. if (munmap(p->mm_addr, p->length) < 0)
  358. av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", ctx->name, av_err2str(AVERROR(errno)));
  359. }
  360. }
  361. return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req);
  362. }
  363. static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat pixfmt)
  364. {
  365. struct v4l2_format *fmt = &ctx->format;
  366. uint32_t v4l2_fmt;
  367. int ret;
  368. v4l2_fmt = ff_v4l2_format_avfmt_to_v4l2(pixfmt);
  369. if (!v4l2_fmt)
  370. return AVERROR(EINVAL);
  371. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type))
  372. fmt->fmt.pix_mp.pixelformat = v4l2_fmt;
  373. else
  374. fmt->fmt.pix.pixelformat = v4l2_fmt;
  375. fmt->type = ctx->type;
  376. ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, fmt);
  377. if (ret)
  378. return AVERROR(EINVAL);
  379. return 0;
  380. }
  381. static int v4l2_get_raw_format(V4L2Context* ctx, enum AVPixelFormat *p)
  382. {
  383. enum AVPixelFormat pixfmt = ctx->av_pix_fmt;
  384. struct v4l2_fmtdesc fdesc;
  385. int ret;
  386. memset(&fdesc, 0, sizeof(fdesc));
  387. fdesc.type = ctx->type;
  388. if (pixfmt != AV_PIX_FMT_NONE) {
  389. ret = v4l2_try_raw_format(ctx, pixfmt);
  390. if (!ret)
  391. return 0;
  392. }
  393. for (;;) {
  394. ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
  395. if (ret)
  396. return AVERROR(EINVAL);
  397. pixfmt = ff_v4l2_format_v4l2_to_avfmt(fdesc.pixelformat, AV_CODEC_ID_RAWVIDEO);
  398. ret = v4l2_try_raw_format(ctx, pixfmt);
  399. if (ret){
  400. fdesc.index++;
  401. continue;
  402. }
  403. *p = pixfmt;
  404. return 0;
  405. }
  406. return AVERROR(EINVAL);
  407. }
  408. static int v4l2_get_coded_format(V4L2Context* ctx, uint32_t *p)
  409. {
  410. struct v4l2_fmtdesc fdesc;
  411. uint32_t v4l2_fmt;
  412. int ret;
  413. /* translate to a valid v4l2 format */
  414. v4l2_fmt = ff_v4l2_format_avcodec_to_v4l2(ctx->av_codec_id);
  415. if (!v4l2_fmt)
  416. return AVERROR(EINVAL);
  417. /* check if the driver supports this format */
  418. memset(&fdesc, 0, sizeof(fdesc));
  419. fdesc.type = ctx->type;
  420. for (;;) {
  421. ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_ENUM_FMT, &fdesc);
  422. if (ret)
  423. return AVERROR(EINVAL);
  424. if (fdesc.pixelformat == v4l2_fmt)
  425. break;
  426. fdesc.index++;
  427. }
  428. *p = v4l2_fmt;
  429. return 0;
  430. }
  431. /*****************************************************************************
  432. *
  433. * V4L2 Context Interface
  434. *
  435. *****************************************************************************/
  436. int ff_v4l2_context_set_status(V4L2Context* ctx, uint32_t cmd)
  437. {
  438. int type = ctx->type;
  439. int ret;
  440. ret = ioctl(ctx_to_m2mctx(ctx)->fd, cmd, &type);
  441. if (ret < 0)
  442. return AVERROR(errno);
  443. ctx->streamon = (cmd == VIDIOC_STREAMON);
  444. return 0;
  445. }
  446. int ff_v4l2_context_enqueue_frame(V4L2Context* ctx, const AVFrame* frame)
  447. {
  448. V4L2m2mContext *s = ctx_to_m2mctx(ctx);
  449. V4L2Buffer* avbuf;
  450. int ret;
  451. if (!frame) {
  452. ret = v4l2_stop_encode(ctx);
  453. if (ret)
  454. av_log(logger(ctx), AV_LOG_ERROR, "%s stop_encode\n", ctx->name);
  455. s->draining= 1;
  456. return 0;
  457. }
  458. avbuf = v4l2_getfree_v4l2buf(ctx);
  459. if (!avbuf)
  460. return AVERROR(ENOMEM);
  461. ret = ff_v4l2_buffer_avframe_to_buf(frame, avbuf);
  462. if (ret)
  463. return ret;
  464. return ff_v4l2_buffer_enqueue(avbuf);
  465. }
  466. int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt)
  467. {
  468. V4L2m2mContext *s = ctx_to_m2mctx(ctx);
  469. V4L2Buffer* avbuf;
  470. int ret;
  471. if (!pkt->size) {
  472. ret = v4l2_stop_decode(ctx);
  473. if (ret)
  474. av_log(logger(ctx), AV_LOG_ERROR, "%s stop_decode\n", ctx->name);
  475. s->draining = 1;
  476. return 0;
  477. }
  478. avbuf = v4l2_getfree_v4l2buf(ctx);
  479. if (!avbuf)
  480. return AVERROR(EAGAIN);
  481. ret = ff_v4l2_buffer_avpkt_to_buf(pkt, avbuf);
  482. if (ret)
  483. return ret;
  484. return ff_v4l2_buffer_enqueue(avbuf);
  485. }
  486. int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* frame)
  487. {
  488. V4L2Buffer* avbuf = NULL;
  489. /*
  490. * blocks until:
  491. * 1. decoded frame available
  492. * 2. an input buffer is ready to be dequeued
  493. */
  494. avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
  495. if (!avbuf) {
  496. if (ctx->done)
  497. return AVERROR_EOF;
  498. return AVERROR(EAGAIN);
  499. }
  500. return ff_v4l2_buffer_buf_to_avframe(frame, avbuf);
  501. }
  502. int ff_v4l2_context_dequeue_packet(V4L2Context* ctx, AVPacket* pkt)
  503. {
  504. V4L2Buffer* avbuf = NULL;
  505. /*
  506. * blocks until:
  507. * 1. encoded packet available
  508. * 2. an input buffer ready to be dequeued
  509. */
  510. avbuf = v4l2_dequeue_v4l2buf(ctx, -1);
  511. if (!avbuf) {
  512. if (ctx->done)
  513. return AVERROR_EOF;
  514. return AVERROR(EAGAIN);
  515. }
  516. return ff_v4l2_buffer_buf_to_avpkt(pkt, avbuf);
  517. }
  518. int ff_v4l2_context_get_format(V4L2Context* ctx, int probe)
  519. {
  520. struct v4l2_format_update fmt = { 0 };
  521. int ret;
  522. if (ctx->av_codec_id == AV_CODEC_ID_RAWVIDEO) {
  523. ret = v4l2_get_raw_format(ctx, &fmt.av_fmt);
  524. if (ret)
  525. return ret;
  526. fmt.update_avfmt = !probe;
  527. v4l2_save_to_context(ctx, &fmt);
  528. /* format has been tried already */
  529. return ret;
  530. }
  531. ret = v4l2_get_coded_format(ctx, &fmt.v4l2_fmt);
  532. if (ret)
  533. return ret;
  534. fmt.update_v4l2 = 1;
  535. v4l2_save_to_context(ctx, &fmt);
  536. return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_TRY_FMT, &ctx->format);
  537. }
  538. int ff_v4l2_context_set_format(V4L2Context* ctx)
  539. {
  540. return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format);
  541. }
  542. void ff_v4l2_context_release(V4L2Context* ctx)
  543. {
  544. int ret;
  545. if (!ctx->buffers)
  546. return;
  547. ret = v4l2_release_buffers(ctx);
  548. if (ret)
  549. av_log(logger(ctx), AV_LOG_WARNING, "V4L2 failed to unmap the %s buffers\n", ctx->name);
  550. av_free(ctx->buffers);
  551. ctx->buffers = NULL;
  552. }
  553. int ff_v4l2_context_init(V4L2Context* ctx)
  554. {
  555. V4L2m2mContext *s = ctx_to_m2mctx(ctx);
  556. struct v4l2_requestbuffers req;
  557. int ret, i;
  558. if (!v4l2_type_supported(ctx)) {
  559. av_log(logger(ctx), AV_LOG_ERROR, "type %i not supported\n", ctx->type);
  560. return AVERROR_PATCHWELCOME;
  561. }
  562. ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format);
  563. if (ret)
  564. av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", ctx->name);
  565. memset(&req, 0, sizeof(req));
  566. req.count = ctx->num_buffers;
  567. req.memory = V4L2_MEMORY_MMAP;
  568. req.type = ctx->type;
  569. ret = ioctl(s->fd, VIDIOC_REQBUFS, &req);
  570. if (ret < 0) {
  571. av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_REQBUFS failed: %s\n", ctx->name, strerror(errno));
  572. return AVERROR(errno);
  573. }
  574. ctx->num_buffers = req.count;
  575. ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer));
  576. if (!ctx->buffers) {
  577. av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name);
  578. return AVERROR(ENOMEM);
  579. }
  580. for (i = 0; i < req.count; i++) {
  581. ctx->buffers[i].context = ctx;
  582. ret = ff_v4l2_buffer_initialize(&ctx->buffers[i], i);
  583. if (ret < 0) {
  584. av_log(logger(ctx), AV_LOG_ERROR, "%s buffer[%d] initialization (%s)\n", ctx->name, i, av_err2str(ret));
  585. goto error;
  586. }
  587. }
  588. av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name,
  589. V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : av_fourcc2str(ctx->format.fmt.pix.pixelformat),
  590. req.count,
  591. v4l2_get_width(&ctx->format),
  592. v4l2_get_height(&ctx->format),
  593. V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage,
  594. V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : ctx->format.fmt.pix.bytesperline);
  595. return 0;
  596. error:
  597. v4l2_release_buffers(ctx);
  598. av_free(ctx->buffers);
  599. ctx->buffers = NULL;
  600. return ret;
  601. }