You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

552 lines
17KB

  1. /*
  2. * V4L2 buffer helper functions.
  3. *
  4. * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
  5. * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <linux/videodev2.h>
  24. #include <sys/ioctl.h>
  25. #include <sys/mman.h>
  26. #include <unistd.h>
  27. #include <fcntl.h>
  28. #include <poll.h>
  29. #include "libavcodec/avcodec.h"
  30. #include "libavcodec/internal.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "v4l2_context.h"
  33. #include "v4l2_buffers.h"
  34. #include "v4l2_m2m.h"
  35. #define USEC_PER_SEC 1000000
  36. static inline V4L2m2mContext *buf_to_m2mctx(V4L2Buffer *buf)
  37. {
  38. return V4L2_TYPE_IS_OUTPUT(buf->context->type) ?
  39. container_of(buf->context, V4L2m2mContext, output) :
  40. container_of(buf->context, V4L2m2mContext, capture);
  41. }
  42. static inline AVCodecContext *logger(V4L2Buffer *buf)
  43. {
  44. return buf_to_m2mctx(buf)->avctx;
  45. }
  46. static inline void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
  47. {
  48. V4L2m2mContext *s = buf_to_m2mctx(out);
  49. AVRational v4l2_timebase = { 1, USEC_PER_SEC };
  50. int64_t v4l2_pts;
  51. if (pts == AV_NOPTS_VALUE)
  52. pts = 0;
  53. /* convert pts to v4l2 timebase */
  54. v4l2_pts = av_rescale_q(pts, s->avctx->time_base, v4l2_timebase);
  55. out->buf.timestamp.tv_usec = v4l2_pts % USEC_PER_SEC;
  56. out->buf.timestamp.tv_sec = v4l2_pts / USEC_PER_SEC;
  57. }
  58. static inline int64_t v4l2_get_pts(V4L2Buffer *avbuf)
  59. {
  60. V4L2m2mContext *s = buf_to_m2mctx(avbuf);
  61. AVRational v4l2_timebase = { 1, USEC_PER_SEC };
  62. int64_t v4l2_pts;
  63. /* convert pts back to encoder timebase */
  64. v4l2_pts = (int64_t)avbuf->buf.timestamp.tv_sec * USEC_PER_SEC +
  65. avbuf->buf.timestamp.tv_usec;
  66. return av_rescale_q(v4l2_pts, v4l2_timebase, s->avctx->time_base);
  67. }
  68. static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
  69. {
  70. enum v4l2_ycbcr_encoding ycbcr;
  71. enum v4l2_colorspace cs;
  72. cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  73. buf->context->format.fmt.pix_mp.colorspace :
  74. buf->context->format.fmt.pix.colorspace;
  75. ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  76. buf->context->format.fmt.pix_mp.ycbcr_enc:
  77. buf->context->format.fmt.pix.ycbcr_enc;
  78. switch(ycbcr) {
  79. case V4L2_YCBCR_ENC_XV709:
  80. case V4L2_YCBCR_ENC_709: return AVCOL_PRI_BT709;
  81. case V4L2_YCBCR_ENC_XV601:
  82. case V4L2_YCBCR_ENC_601:return AVCOL_PRI_BT470M;
  83. default:
  84. break;
  85. }
  86. switch(cs) {
  87. case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_PRI_BT470BG;
  88. case V4L2_COLORSPACE_SMPTE170M: return AVCOL_PRI_SMPTE170M;
  89. case V4L2_COLORSPACE_SMPTE240M: return AVCOL_PRI_SMPTE240M;
  90. case V4L2_COLORSPACE_BT2020: return AVCOL_PRI_BT2020;
  91. default:
  92. break;
  93. }
  94. return AVCOL_PRI_UNSPECIFIED;
  95. }
  96. static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
  97. {
  98. enum v4l2_quantization qt;
  99. qt = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  100. buf->context->format.fmt.pix_mp.quantization :
  101. buf->context->format.fmt.pix.quantization;
  102. switch (qt) {
  103. case V4L2_QUANTIZATION_LIM_RANGE: return AVCOL_RANGE_MPEG;
  104. case V4L2_QUANTIZATION_FULL_RANGE: return AVCOL_RANGE_JPEG;
  105. default:
  106. break;
  107. }
  108. return AVCOL_RANGE_UNSPECIFIED;
  109. }
  110. static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
  111. {
  112. enum v4l2_ycbcr_encoding ycbcr;
  113. enum v4l2_colorspace cs;
  114. cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  115. buf->context->format.fmt.pix_mp.colorspace :
  116. buf->context->format.fmt.pix.colorspace;
  117. ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  118. buf->context->format.fmt.pix_mp.ycbcr_enc:
  119. buf->context->format.fmt.pix.ycbcr_enc;
  120. switch(cs) {
  121. case V4L2_COLORSPACE_SRGB: return AVCOL_SPC_RGB;
  122. case V4L2_COLORSPACE_REC709: return AVCOL_SPC_BT709;
  123. case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_SPC_FCC;
  124. case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_SPC_BT470BG;
  125. case V4L2_COLORSPACE_SMPTE170M: return AVCOL_SPC_SMPTE170M;
  126. case V4L2_COLORSPACE_SMPTE240M: return AVCOL_SPC_SMPTE240M;
  127. case V4L2_COLORSPACE_BT2020:
  128. if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
  129. return AVCOL_SPC_BT2020_CL;
  130. else
  131. return AVCOL_SPC_BT2020_NCL;
  132. default:
  133. break;
  134. }
  135. return AVCOL_SPC_UNSPECIFIED;
  136. }
  137. static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
  138. {
  139. enum v4l2_ycbcr_encoding ycbcr;
  140. enum v4l2_xfer_func xfer;
  141. enum v4l2_colorspace cs;
  142. cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  143. buf->context->format.fmt.pix_mp.colorspace :
  144. buf->context->format.fmt.pix.colorspace;
  145. ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  146. buf->context->format.fmt.pix_mp.ycbcr_enc:
  147. buf->context->format.fmt.pix.ycbcr_enc;
  148. xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  149. buf->context->format.fmt.pix_mp.xfer_func:
  150. buf->context->format.fmt.pix.xfer_func;
  151. switch (xfer) {
  152. case V4L2_XFER_FUNC_709: return AVCOL_TRC_BT709;
  153. case V4L2_XFER_FUNC_SRGB: return AVCOL_TRC_IEC61966_2_1;
  154. default:
  155. break;
  156. }
  157. switch (cs) {
  158. case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_TRC_GAMMA22;
  159. case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_TRC_GAMMA28;
  160. case V4L2_COLORSPACE_SMPTE170M: return AVCOL_TRC_SMPTE170M;
  161. case V4L2_COLORSPACE_SMPTE240M: return AVCOL_TRC_SMPTE240M;
  162. default:
  163. break;
  164. }
  165. switch (ycbcr) {
  166. case V4L2_YCBCR_ENC_XV709:
  167. case V4L2_YCBCR_ENC_XV601: return AVCOL_TRC_BT1361_ECG;
  168. default:
  169. break;
  170. }
  171. return AVCOL_TRC_UNSPECIFIED;
  172. }
  173. static void v4l2_free_buffer(void *opaque, uint8_t *unused)
  174. {
  175. V4L2Buffer* avbuf = opaque;
  176. V4L2m2mContext *s = buf_to_m2mctx(avbuf);
  177. if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) {
  178. atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
  179. if (s->reinit) {
  180. if (!atomic_load(&s->refcount))
  181. sem_post(&s->refsync);
  182. } else {
  183. if (s->draining) {
  184. /* no need to queue more buffers to the driver */
  185. avbuf->status = V4L2BUF_AVAILABLE;
  186. }
  187. else if (avbuf->context->streamon)
  188. ff_v4l2_buffer_enqueue(avbuf);
  189. }
  190. av_buffer_unref(&avbuf->context_ref);
  191. }
  192. }
  193. static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
  194. {
  195. V4L2m2mContext *s = buf_to_m2mctx(in);
  196. if (plane >= in->num_planes)
  197. return AVERROR(EINVAL);
  198. /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
  199. *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
  200. in->plane_info[plane].length, v4l2_free_buffer, in, 0);
  201. if (!*buf)
  202. return AVERROR(ENOMEM);
  203. if (in->context_ref)
  204. atomic_fetch_add(&in->context_refcount, 1);
  205. else {
  206. in->context_ref = av_buffer_ref(s->self_ref);
  207. if (!in->context_ref) {
  208. av_buffer_unref(buf);
  209. return AVERROR(ENOMEM);
  210. }
  211. in->context_refcount = 1;
  212. }
  213. in->status = V4L2BUF_RET_USER;
  214. atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
  215. return 0;
  216. }
  217. static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, int offset, AVBufferRef* bref)
  218. {
  219. unsigned int bytesused, length;
  220. if (plane >= out->num_planes)
  221. return AVERROR(EINVAL);
  222. length = out->plane_info[plane].length;
  223. bytesused = FFMIN(size+offset, length);
  224. memcpy((uint8_t*)out->plane_info[plane].mm_addr+offset, data, FFMIN(size, length-offset));
  225. if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
  226. out->planes[plane].bytesused = bytesused;
  227. out->planes[plane].length = length;
  228. } else {
  229. out->buf.bytesused = bytesused;
  230. out->buf.length = length;
  231. }
  232. return 0;
  233. }
  234. static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf)
  235. {
  236. int i, ret;
  237. frame->format = avbuf->context->av_pix_fmt;
  238. for (i = 0; i < avbuf->num_planes; i++) {
  239. ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
  240. if (ret)
  241. return ret;
  242. frame->linesize[i] = avbuf->plane_info[i].bytesperline;
  243. frame->data[i] = frame->buf[i]->data;
  244. }
  245. /* fixup special cases */
  246. switch (avbuf->context->av_pix_fmt) {
  247. case AV_PIX_FMT_NV12:
  248. case AV_PIX_FMT_NV21:
  249. if (avbuf->num_planes > 1)
  250. break;
  251. frame->linesize[1] = avbuf->plane_info[0].bytesperline;
  252. frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
  253. break;
  254. case AV_PIX_FMT_YUV420P:
  255. if (avbuf->num_planes > 1)
  256. break;
  257. frame->linesize[1] = avbuf->plane_info[0].bytesperline >> 1;
  258. frame->linesize[2] = avbuf->plane_info[0].bytesperline >> 1;
  259. frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
  260. frame->data[2] = frame->data[1] + ((avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height) >> 2);
  261. break;
  262. default:
  263. break;
  264. }
  265. return 0;
  266. }
  267. static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
  268. {
  269. int i, ret;
  270. struct v4l2_format fmt = out->context->format;
  271. int pixel_format = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
  272. fmt.fmt.pix_mp.pixelformat : fmt.fmt.pix.pixelformat;
  273. int height = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
  274. fmt.fmt.pix_mp.height : fmt.fmt.pix.height;
  275. int is_planar_format = 0;
  276. switch (pixel_format) {
  277. case V4L2_PIX_FMT_YUV420M:
  278. case V4L2_PIX_FMT_YVU420M:
  279. case V4L2_PIX_FMT_YUV422M:
  280. case V4L2_PIX_FMT_YVU422M:
  281. case V4L2_PIX_FMT_YUV444M:
  282. case V4L2_PIX_FMT_YVU444M:
  283. case V4L2_PIX_FMT_NV12M:
  284. case V4L2_PIX_FMT_NV21M:
  285. case V4L2_PIX_FMT_NV12MT_16X16:
  286. case V4L2_PIX_FMT_NV12MT:
  287. case V4L2_PIX_FMT_NV16M:
  288. case V4L2_PIX_FMT_NV61M:
  289. is_planar_format = 1;
  290. }
  291. if (!is_planar_format) {
  292. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
  293. int planes_nb = 0;
  294. int offset = 0;
  295. for (i = 0; i < desc->nb_components; i++)
  296. planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
  297. for (i = 0; i < planes_nb; i++) {
  298. int size, h = height;
  299. if (i == 1 || i == 2) {
  300. h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
  301. }
  302. size = frame->linesize[i] * h;
  303. ret = v4l2_bufref_to_buf(out, 0, frame->data[i], size, offset, frame->buf[i]);
  304. if (ret)
  305. return ret;
  306. offset += size;
  307. }
  308. return 0;
  309. }
  310. for (i = 0; i < out->num_planes; i++) {
  311. ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, frame->buf[i]->size, 0, frame->buf[i]);
  312. if (ret)
  313. return ret;
  314. }
  315. return 0;
  316. }
  317. /******************************************************************************
  318. *
  319. * V4L2Buffer interface
  320. *
  321. ******************************************************************************/
  322. int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
  323. {
  324. v4l2_set_pts(out, frame->pts);
  325. return v4l2_buffer_swframe_to_buf(frame, out);
  326. }
  327. int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
  328. {
  329. V4L2m2mContext *s = buf_to_m2mctx(avbuf);
  330. int ret;
  331. av_frame_unref(frame);
  332. /* 1. get references to the actual data */
  333. ret = v4l2_buffer_buf_to_swframe(frame, avbuf);
  334. if (ret)
  335. return ret;
  336. /* 2. get frame information */
  337. frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
  338. frame->color_primaries = v4l2_get_color_primaries(avbuf);
  339. frame->colorspace = v4l2_get_color_space(avbuf);
  340. frame->color_range = v4l2_get_color_range(avbuf);
  341. frame->color_trc = v4l2_get_color_trc(avbuf);
  342. frame->pts = v4l2_get_pts(avbuf);
  343. /* these two values are updated also during re-init in v4l2_process_driver_event */
  344. frame->height = s->output.height;
  345. frame->width = s->output.width;
  346. /* 3. report errors upstream */
  347. if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
  348. av_log(logger(avbuf), AV_LOG_ERROR, "%s: driver decode error\n", avbuf->context->name);
  349. frame->decode_error_flags |= FF_DECODE_ERROR_INVALID_BITSTREAM;
  350. }
  351. return 0;
  352. }
  353. int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
  354. {
  355. int ret;
  356. av_packet_unref(pkt);
  357. ret = v4l2_buf_to_bufref(avbuf, 0, &pkt->buf);
  358. if (ret)
  359. return ret;
  360. pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
  361. pkt->data = pkt->buf->data;
  362. if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
  363. pkt->flags |= AV_PKT_FLAG_KEY;
  364. if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
  365. av_log(logger(avbuf), AV_LOG_ERROR, "%s driver encode error\n", avbuf->context->name);
  366. pkt->flags |= AV_PKT_FLAG_CORRUPT;
  367. }
  368. pkt->dts = pkt->pts = v4l2_get_pts(avbuf);
  369. return 0;
  370. }
  371. int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
  372. {
  373. int ret;
  374. ret = v4l2_bufref_to_buf(out, 0, pkt->data, pkt->size, 0, pkt->buf);
  375. if (ret)
  376. return ret;
  377. v4l2_set_pts(out, pkt->pts);
  378. if (pkt->flags & AV_PKT_FLAG_KEY)
  379. out->flags = V4L2_BUF_FLAG_KEYFRAME;
  380. return 0;
  381. }
  382. int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
  383. {
  384. V4L2Context *ctx = avbuf->context;
  385. int ret, i;
  386. avbuf->buf.memory = V4L2_MEMORY_MMAP;
  387. avbuf->buf.type = ctx->type;
  388. avbuf->buf.index = index;
  389. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  390. avbuf->buf.length = VIDEO_MAX_PLANES;
  391. avbuf->buf.m.planes = avbuf->planes;
  392. }
  393. ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf);
  394. if (ret < 0)
  395. return AVERROR(errno);
  396. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  397. avbuf->num_planes = 0;
  398. for (;;) {
  399. /* in MP, the V4L2 API states that buf.length means num_planes */
  400. if (avbuf->num_planes >= avbuf->buf.length)
  401. break;
  402. if (avbuf->buf.m.planes[avbuf->num_planes].length)
  403. avbuf->num_planes++;
  404. }
  405. } else
  406. avbuf->num_planes = 1;
  407. for (i = 0; i < avbuf->num_planes; i++) {
  408. avbuf->plane_info[i].bytesperline = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
  409. ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
  410. ctx->format.fmt.pix.bytesperline;
  411. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  412. avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
  413. avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
  414. PROT_READ | PROT_WRITE, MAP_SHARED,
  415. buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
  416. } else {
  417. avbuf->plane_info[i].length = avbuf->buf.length;
  418. avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
  419. PROT_READ | PROT_WRITE, MAP_SHARED,
  420. buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
  421. }
  422. if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
  423. return AVERROR(ENOMEM);
  424. }
  425. avbuf->status = V4L2BUF_AVAILABLE;
  426. if (V4L2_TYPE_IS_OUTPUT(ctx->type))
  427. return 0;
  428. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  429. avbuf->buf.m.planes = avbuf->planes;
  430. avbuf->buf.length = avbuf->num_planes;
  431. } else {
  432. avbuf->buf.bytesused = avbuf->planes[0].bytesused;
  433. avbuf->buf.length = avbuf->planes[0].length;
  434. }
  435. return ff_v4l2_buffer_enqueue(avbuf);
  436. }
  437. int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf)
  438. {
  439. int ret;
  440. avbuf->buf.flags = avbuf->flags;
  441. ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf);
  442. if (ret < 0)
  443. return AVERROR(errno);
  444. avbuf->status = V4L2BUF_IN_DRIVER;
  445. return 0;
  446. }