You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

562 lines
17KB

  1. /*
  2. * V4L2 buffer helper functions.
  3. *
  4. * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
  5. * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <linux/videodev2.h>
  24. #include <sys/ioctl.h>
  25. #include <sys/mman.h>
  26. #include <unistd.h>
  27. #include <fcntl.h>
  28. #include <poll.h>
  29. #include "libavcodec/avcodec.h"
  30. #include "libavcodec/internal.h"
  31. #include "libavutil/pixdesc.h"
  32. #include "v4l2_context.h"
  33. #include "v4l2_buffers.h"
  34. #include "v4l2_m2m.h"
  35. #define USEC_PER_SEC 1000000
  36. static inline V4L2m2mContext *buf_to_m2mctx(V4L2Buffer *buf)
  37. {
  38. return V4L2_TYPE_IS_OUTPUT(buf->context->type) ?
  39. container_of(buf->context, V4L2m2mContext, output) :
  40. container_of(buf->context, V4L2m2mContext, capture);
  41. }
  42. static inline AVCodecContext *logger(V4L2Buffer *buf)
  43. {
  44. return buf_to_m2mctx(buf)->avctx;
  45. }
  46. static inline void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
  47. {
  48. V4L2m2mContext *s = buf_to_m2mctx(out);
  49. AVRational v4l2_timebase = { 1, USEC_PER_SEC };
  50. int64_t v4l2_pts;
  51. if (pts == AV_NOPTS_VALUE)
  52. pts = 0;
  53. /* convert pts to v4l2 timebase */
  54. v4l2_pts = av_rescale_q(pts, s->avctx->time_base, v4l2_timebase);
  55. out->buf.timestamp.tv_usec = v4l2_pts % USEC_PER_SEC;
  56. out->buf.timestamp.tv_sec = v4l2_pts / USEC_PER_SEC;
  57. }
  58. static inline int64_t v4l2_get_pts(V4L2Buffer *avbuf)
  59. {
  60. V4L2m2mContext *s = buf_to_m2mctx(avbuf);
  61. AVRational v4l2_timebase = { 1, USEC_PER_SEC };
  62. int64_t v4l2_pts;
  63. /* convert pts back to encoder timebase */
  64. v4l2_pts = (int64_t)avbuf->buf.timestamp.tv_sec * USEC_PER_SEC +
  65. avbuf->buf.timestamp.tv_usec;
  66. return av_rescale_q(v4l2_pts, v4l2_timebase, s->avctx->time_base);
  67. }
  68. static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
  69. {
  70. enum v4l2_ycbcr_encoding ycbcr;
  71. enum v4l2_colorspace cs;
  72. cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  73. buf->context->format.fmt.pix_mp.colorspace :
  74. buf->context->format.fmt.pix.colorspace;
  75. ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  76. buf->context->format.fmt.pix_mp.ycbcr_enc:
  77. buf->context->format.fmt.pix.ycbcr_enc;
  78. switch(ycbcr) {
  79. case V4L2_YCBCR_ENC_XV709:
  80. case V4L2_YCBCR_ENC_709: return AVCOL_PRI_BT709;
  81. case V4L2_YCBCR_ENC_XV601:
  82. case V4L2_YCBCR_ENC_601:return AVCOL_PRI_BT470M;
  83. default:
  84. break;
  85. }
  86. switch(cs) {
  87. case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_PRI_BT470BG;
  88. case V4L2_COLORSPACE_SMPTE170M: return AVCOL_PRI_SMPTE170M;
  89. case V4L2_COLORSPACE_SMPTE240M: return AVCOL_PRI_SMPTE240M;
  90. case V4L2_COLORSPACE_BT2020: return AVCOL_PRI_BT2020;
  91. default:
  92. break;
  93. }
  94. return AVCOL_PRI_UNSPECIFIED;
  95. }
  96. static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
  97. {
  98. enum v4l2_quantization qt;
  99. qt = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  100. buf->context->format.fmt.pix_mp.quantization :
  101. buf->context->format.fmt.pix.quantization;
  102. switch (qt) {
  103. case V4L2_QUANTIZATION_LIM_RANGE: return AVCOL_RANGE_MPEG;
  104. case V4L2_QUANTIZATION_FULL_RANGE: return AVCOL_RANGE_JPEG;
  105. default:
  106. break;
  107. }
  108. return AVCOL_RANGE_UNSPECIFIED;
  109. }
  110. static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
  111. {
  112. enum v4l2_ycbcr_encoding ycbcr;
  113. enum v4l2_colorspace cs;
  114. cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  115. buf->context->format.fmt.pix_mp.colorspace :
  116. buf->context->format.fmt.pix.colorspace;
  117. ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  118. buf->context->format.fmt.pix_mp.ycbcr_enc:
  119. buf->context->format.fmt.pix.ycbcr_enc;
  120. switch(cs) {
  121. case V4L2_COLORSPACE_SRGB: return AVCOL_SPC_RGB;
  122. case V4L2_COLORSPACE_REC709: return AVCOL_SPC_BT709;
  123. case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_SPC_FCC;
  124. case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_SPC_BT470BG;
  125. case V4L2_COLORSPACE_SMPTE170M: return AVCOL_SPC_SMPTE170M;
  126. case V4L2_COLORSPACE_SMPTE240M: return AVCOL_SPC_SMPTE240M;
  127. case V4L2_COLORSPACE_BT2020:
  128. if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
  129. return AVCOL_SPC_BT2020_CL;
  130. else
  131. return AVCOL_SPC_BT2020_NCL;
  132. default:
  133. break;
  134. }
  135. return AVCOL_SPC_UNSPECIFIED;
  136. }
  137. static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
  138. {
  139. enum v4l2_ycbcr_encoding ycbcr;
  140. enum v4l2_xfer_func xfer;
  141. enum v4l2_colorspace cs;
  142. cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  143. buf->context->format.fmt.pix_mp.colorspace :
  144. buf->context->format.fmt.pix.colorspace;
  145. ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  146. buf->context->format.fmt.pix_mp.ycbcr_enc:
  147. buf->context->format.fmt.pix.ycbcr_enc;
  148. xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  149. buf->context->format.fmt.pix_mp.xfer_func:
  150. buf->context->format.fmt.pix.xfer_func;
  151. switch (xfer) {
  152. case V4L2_XFER_FUNC_709: return AVCOL_TRC_BT709;
  153. case V4L2_XFER_FUNC_SRGB: return AVCOL_TRC_IEC61966_2_1;
  154. default:
  155. break;
  156. }
  157. switch (cs) {
  158. case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_TRC_GAMMA22;
  159. case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_TRC_GAMMA28;
  160. case V4L2_COLORSPACE_SMPTE170M: return AVCOL_TRC_SMPTE170M;
  161. case V4L2_COLORSPACE_SMPTE240M: return AVCOL_TRC_SMPTE240M;
  162. default:
  163. break;
  164. }
  165. switch (ycbcr) {
  166. case V4L2_YCBCR_ENC_XV709:
  167. case V4L2_YCBCR_ENC_XV601: return AVCOL_TRC_BT1361_ECG;
  168. default:
  169. break;
  170. }
  171. return AVCOL_TRC_UNSPECIFIED;
  172. }
  173. static void v4l2_free_buffer(void *opaque, uint8_t *unused)
  174. {
  175. V4L2Buffer* avbuf = opaque;
  176. V4L2m2mContext *s = buf_to_m2mctx(avbuf);
  177. if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) {
  178. atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
  179. if (s->reinit) {
  180. if (!atomic_load(&s->refcount))
  181. sem_post(&s->refsync);
  182. } else {
  183. if (s->draining) {
  184. /* no need to queue more buffers to the driver */
  185. avbuf->status = V4L2BUF_AVAILABLE;
  186. }
  187. else if (avbuf->context->streamon)
  188. ff_v4l2_buffer_enqueue(avbuf);
  189. }
  190. av_buffer_unref(&avbuf->context_ref);
  191. }
  192. }
  193. static int v4l2_buf_increase_ref(V4L2Buffer *in)
  194. {
  195. V4L2m2mContext *s = buf_to_m2mctx(in);
  196. if (in->context_ref)
  197. atomic_fetch_add(&in->context_refcount, 1);
  198. else {
  199. in->context_ref = av_buffer_ref(s->self_ref);
  200. if (!in->context_ref)
  201. return AVERROR(ENOMEM);
  202. in->context_refcount = 1;
  203. }
  204. in->status = V4L2BUF_RET_USER;
  205. atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
  206. return 0;
  207. }
  208. static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
  209. {
  210. int ret;
  211. if (plane >= in->num_planes)
  212. return AVERROR(EINVAL);
  213. /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
  214. *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
  215. in->plane_info[plane].length, v4l2_free_buffer, in, 0);
  216. if (!*buf)
  217. return AVERROR(ENOMEM);
  218. ret = v4l2_buf_increase_ref(in);
  219. if (ret)
  220. av_buffer_unref(buf);
  221. return ret;
  222. }
  223. static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, int offset, AVBufferRef* bref)
  224. {
  225. unsigned int bytesused, length;
  226. if (plane >= out->num_planes)
  227. return AVERROR(EINVAL);
  228. length = out->plane_info[plane].length;
  229. bytesused = FFMIN(size+offset, length);
  230. memcpy((uint8_t*)out->plane_info[plane].mm_addr+offset, data, FFMIN(size, length-offset));
  231. if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
  232. out->planes[plane].bytesused = bytesused;
  233. out->planes[plane].length = length;
  234. } else {
  235. out->buf.bytesused = bytesused;
  236. out->buf.length = length;
  237. }
  238. return 0;
  239. }
  240. static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf)
  241. {
  242. int i, ret;
  243. frame->format = avbuf->context->av_pix_fmt;
  244. for (i = 0; i < avbuf->num_planes; i++) {
  245. ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
  246. if (ret)
  247. return ret;
  248. frame->linesize[i] = avbuf->plane_info[i].bytesperline;
  249. frame->data[i] = frame->buf[i]->data;
  250. }
  251. /* fixup special cases */
  252. switch (avbuf->context->av_pix_fmt) {
  253. case AV_PIX_FMT_NV12:
  254. case AV_PIX_FMT_NV21:
  255. if (avbuf->num_planes > 1)
  256. break;
  257. frame->linesize[1] = avbuf->plane_info[0].bytesperline;
  258. frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
  259. break;
  260. case AV_PIX_FMT_YUV420P:
  261. if (avbuf->num_planes > 1)
  262. break;
  263. frame->linesize[1] = avbuf->plane_info[0].bytesperline >> 1;
  264. frame->linesize[2] = avbuf->plane_info[0].bytesperline >> 1;
  265. frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
  266. frame->data[2] = frame->data[1] + ((avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height) >> 2);
  267. break;
  268. default:
  269. break;
  270. }
  271. return 0;
  272. }
  273. static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
  274. {
  275. int i, ret;
  276. struct v4l2_format fmt = out->context->format;
  277. int pixel_format = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
  278. fmt.fmt.pix_mp.pixelformat : fmt.fmt.pix.pixelformat;
  279. int height = V4L2_TYPE_IS_MULTIPLANAR(fmt.type) ?
  280. fmt.fmt.pix_mp.height : fmt.fmt.pix.height;
  281. int is_planar_format = 0;
  282. switch (pixel_format) {
  283. case V4L2_PIX_FMT_YUV420M:
  284. case V4L2_PIX_FMT_YVU420M:
  285. case V4L2_PIX_FMT_YUV422M:
  286. case V4L2_PIX_FMT_YVU422M:
  287. case V4L2_PIX_FMT_YUV444M:
  288. case V4L2_PIX_FMT_YVU444M:
  289. case V4L2_PIX_FMT_NV12M:
  290. case V4L2_PIX_FMT_NV21M:
  291. case V4L2_PIX_FMT_NV12MT_16X16:
  292. case V4L2_PIX_FMT_NV12MT:
  293. case V4L2_PIX_FMT_NV16M:
  294. case V4L2_PIX_FMT_NV61M:
  295. is_planar_format = 1;
  296. }
  297. if (!is_planar_format) {
  298. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format);
  299. int planes_nb = 0;
  300. int offset = 0;
  301. for (i = 0; i < desc->nb_components; i++)
  302. planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1);
  303. for (i = 0; i < planes_nb; i++) {
  304. int size, h = height;
  305. if (i == 1 || i == 2) {
  306. h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
  307. }
  308. size = frame->linesize[i] * h;
  309. ret = v4l2_bufref_to_buf(out, 0, frame->data[i], size, offset, frame->buf[i]);
  310. if (ret)
  311. return ret;
  312. offset += size;
  313. }
  314. return 0;
  315. }
  316. for (i = 0; i < out->num_planes; i++) {
  317. ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, frame->buf[i]->size, 0, frame->buf[i]);
  318. if (ret)
  319. return ret;
  320. }
  321. return 0;
  322. }
  323. /******************************************************************************
  324. *
  325. * V4L2Buffer interface
  326. *
  327. ******************************************************************************/
  328. int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out)
  329. {
  330. v4l2_set_pts(out, frame->pts);
  331. return v4l2_buffer_swframe_to_buf(frame, out);
  332. }
  333. int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
  334. {
  335. V4L2m2mContext *s = buf_to_m2mctx(avbuf);
  336. int ret;
  337. av_frame_unref(frame);
  338. /* 1. get references to the actual data */
  339. ret = v4l2_buffer_buf_to_swframe(frame, avbuf);
  340. if (ret)
  341. return ret;
  342. /* 2. get frame information */
  343. frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
  344. frame->color_primaries = v4l2_get_color_primaries(avbuf);
  345. frame->colorspace = v4l2_get_color_space(avbuf);
  346. frame->color_range = v4l2_get_color_range(avbuf);
  347. frame->color_trc = v4l2_get_color_trc(avbuf);
  348. frame->pts = v4l2_get_pts(avbuf);
  349. /* these two values are updated also during re-init in v4l2_process_driver_event */
  350. frame->height = s->output.height;
  351. frame->width = s->output.width;
  352. /* 3. report errors upstream */
  353. if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
  354. av_log(logger(avbuf), AV_LOG_ERROR, "%s: driver decode error\n", avbuf->context->name);
  355. frame->decode_error_flags |= FF_DECODE_ERROR_INVALID_BITSTREAM;
  356. }
  357. return 0;
  358. }
  359. int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
  360. {
  361. int ret;
  362. av_packet_unref(pkt);
  363. ret = v4l2_buf_to_bufref(avbuf, 0, &pkt->buf);
  364. if (ret)
  365. return ret;
  366. pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
  367. pkt->data = pkt->buf->data;
  368. if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
  369. pkt->flags |= AV_PKT_FLAG_KEY;
  370. if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
  371. av_log(logger(avbuf), AV_LOG_ERROR, "%s driver encode error\n", avbuf->context->name);
  372. pkt->flags |= AV_PKT_FLAG_CORRUPT;
  373. }
  374. pkt->dts = pkt->pts = v4l2_get_pts(avbuf);
  375. return 0;
  376. }
  377. int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
  378. {
  379. int ret;
  380. ret = v4l2_bufref_to_buf(out, 0, pkt->data, pkt->size, 0, pkt->buf);
  381. if (ret)
  382. return ret;
  383. v4l2_set_pts(out, pkt->pts);
  384. if (pkt->flags & AV_PKT_FLAG_KEY)
  385. out->flags = V4L2_BUF_FLAG_KEYFRAME;
  386. return 0;
  387. }
  388. int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
  389. {
  390. V4L2Context *ctx = avbuf->context;
  391. int ret, i;
  392. avbuf->buf.memory = V4L2_MEMORY_MMAP;
  393. avbuf->buf.type = ctx->type;
  394. avbuf->buf.index = index;
  395. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  396. avbuf->buf.length = VIDEO_MAX_PLANES;
  397. avbuf->buf.m.planes = avbuf->planes;
  398. }
  399. ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf);
  400. if (ret < 0)
  401. return AVERROR(errno);
  402. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  403. avbuf->num_planes = 0;
  404. for (;;) {
  405. /* in MP, the V4L2 API states that buf.length means num_planes */
  406. if (avbuf->num_planes >= avbuf->buf.length)
  407. break;
  408. if (avbuf->buf.m.planes[avbuf->num_planes].length)
  409. avbuf->num_planes++;
  410. }
  411. } else
  412. avbuf->num_planes = 1;
  413. for (i = 0; i < avbuf->num_planes; i++) {
  414. avbuf->plane_info[i].bytesperline = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
  415. ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
  416. ctx->format.fmt.pix.bytesperline;
  417. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  418. avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
  419. avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
  420. PROT_READ | PROT_WRITE, MAP_SHARED,
  421. buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
  422. } else {
  423. avbuf->plane_info[i].length = avbuf->buf.length;
  424. avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
  425. PROT_READ | PROT_WRITE, MAP_SHARED,
  426. buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
  427. }
  428. if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
  429. return AVERROR(ENOMEM);
  430. }
  431. avbuf->status = V4L2BUF_AVAILABLE;
  432. if (V4L2_TYPE_IS_OUTPUT(ctx->type))
  433. return 0;
  434. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  435. avbuf->buf.m.planes = avbuf->planes;
  436. avbuf->buf.length = avbuf->num_planes;
  437. } else {
  438. avbuf->buf.bytesused = avbuf->planes[0].bytesused;
  439. avbuf->buf.length = avbuf->planes[0].length;
  440. }
  441. return ff_v4l2_buffer_enqueue(avbuf);
  442. }
  443. int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf)
  444. {
  445. int ret;
  446. avbuf->buf.flags = avbuf->flags;
  447. ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf);
  448. if (ret < 0)
  449. return AVERROR(errno);
  450. avbuf->status = V4L2BUF_IN_DRIVER;
  451. return 0;
  452. }