You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

472 lines
14KB

  1. /*
  2. * V4L2 buffer helper functions.
  3. *
  4. * Copyright (C) 2017 Alexis Ballier <aballier@gentoo.org>
  5. * Copyright (C) 2017 Jorge Ramirez <jorge.ramirez-ortiz@linaro.org>
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. #include <linux/videodev2.h>
  24. #include <sys/ioctl.h>
  25. #include <sys/mman.h>
  26. #include <unistd.h>
  27. #include <fcntl.h>
  28. #include <poll.h>
  29. #include "libavcodec/avcodec.h"
  30. #include "libavcodec/internal.h"
  31. #include "v4l2_context.h"
  32. #include "v4l2_buffers.h"
  33. #include "v4l2_m2m.h"
  34. #define USEC_PER_SEC 1000000
  35. static inline V4L2m2mContext *buf_to_m2mctx(V4L2Buffer *buf)
  36. {
  37. return V4L2_TYPE_IS_OUTPUT(buf->context->type) ?
  38. container_of(buf->context, V4L2m2mContext, output) :
  39. container_of(buf->context, V4L2m2mContext, capture);
  40. }
  41. static inline AVCodecContext *logger(V4L2Buffer *buf)
  42. {
  43. return buf_to_m2mctx(buf)->avctx;
  44. }
  45. static inline void v4l2_set_pts(V4L2Buffer *out, int64_t pts)
  46. {
  47. V4L2m2mContext *s = buf_to_m2mctx(out);
  48. AVRational v4l2_timebase = { 1, USEC_PER_SEC };
  49. int64_t v4l2_pts;
  50. if (pts == AV_NOPTS_VALUE)
  51. pts = 0;
  52. /* convert pts to v4l2 timebase */
  53. v4l2_pts = av_rescale_q(pts, s->avctx->time_base, v4l2_timebase);
  54. out->buf.timestamp.tv_usec = v4l2_pts % USEC_PER_SEC;
  55. out->buf.timestamp.tv_sec = v4l2_pts / USEC_PER_SEC;
  56. }
  57. static inline uint64_t v4l2_get_pts(V4L2Buffer *avbuf)
  58. {
  59. V4L2m2mContext *s = buf_to_m2mctx(avbuf);
  60. AVRational v4l2_timebase = { 1, USEC_PER_SEC };
  61. int64_t v4l2_pts;
  62. /* convert pts back to encoder timebase */
  63. v4l2_pts = avbuf->buf.timestamp.tv_sec * USEC_PER_SEC + avbuf->buf.timestamp.tv_usec;
  64. return av_rescale_q(v4l2_pts, v4l2_timebase, s->avctx->time_base);
  65. }
  66. static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf)
  67. {
  68. enum v4l2_ycbcr_encoding ycbcr;
  69. enum v4l2_colorspace cs;
  70. cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  71. buf->context->format.fmt.pix_mp.colorspace :
  72. buf->context->format.fmt.pix.colorspace;
  73. ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  74. buf->context->format.fmt.pix_mp.ycbcr_enc:
  75. buf->context->format.fmt.pix.ycbcr_enc;
  76. switch(ycbcr) {
  77. case V4L2_YCBCR_ENC_XV709:
  78. case V4L2_YCBCR_ENC_709: return AVCOL_PRI_BT709;
  79. case V4L2_YCBCR_ENC_XV601:
  80. case V4L2_YCBCR_ENC_601:return AVCOL_PRI_BT470M;
  81. default:
  82. break;
  83. }
  84. switch(cs) {
  85. case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_PRI_BT470BG;
  86. case V4L2_COLORSPACE_SMPTE170M: return AVCOL_PRI_SMPTE170M;
  87. case V4L2_COLORSPACE_SMPTE240M: return AVCOL_PRI_SMPTE240M;
  88. case V4L2_COLORSPACE_BT2020: return AVCOL_PRI_BT2020;
  89. default:
  90. break;
  91. }
  92. return AVCOL_PRI_UNSPECIFIED;
  93. }
  94. static enum AVColorRange v4l2_get_color_range(V4L2Buffer *buf)
  95. {
  96. enum v4l2_quantization qt;
  97. qt = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  98. buf->context->format.fmt.pix_mp.quantization :
  99. buf->context->format.fmt.pix.quantization;
  100. switch (qt) {
  101. case V4L2_QUANTIZATION_LIM_RANGE: return AVCOL_RANGE_MPEG;
  102. case V4L2_QUANTIZATION_FULL_RANGE: return AVCOL_RANGE_JPEG;
  103. default:
  104. break;
  105. }
  106. return AVCOL_RANGE_UNSPECIFIED;
  107. }
  108. static enum AVColorSpace v4l2_get_color_space(V4L2Buffer *buf)
  109. {
  110. enum v4l2_ycbcr_encoding ycbcr;
  111. enum v4l2_colorspace cs;
  112. cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  113. buf->context->format.fmt.pix_mp.colorspace :
  114. buf->context->format.fmt.pix.colorspace;
  115. ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  116. buf->context->format.fmt.pix_mp.ycbcr_enc:
  117. buf->context->format.fmt.pix.ycbcr_enc;
  118. switch(cs) {
  119. case V4L2_COLORSPACE_SRGB: return AVCOL_SPC_RGB;
  120. case V4L2_COLORSPACE_REC709: return AVCOL_SPC_BT709;
  121. case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_SPC_FCC;
  122. case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_SPC_BT470BG;
  123. case V4L2_COLORSPACE_SMPTE170M: return AVCOL_SPC_SMPTE170M;
  124. case V4L2_COLORSPACE_SMPTE240M: return AVCOL_SPC_SMPTE240M;
  125. case V4L2_COLORSPACE_BT2020:
  126. if (ycbcr == V4L2_YCBCR_ENC_BT2020_CONST_LUM)
  127. return AVCOL_SPC_BT2020_CL;
  128. else
  129. return AVCOL_SPC_BT2020_NCL;
  130. default:
  131. break;
  132. }
  133. return AVCOL_SPC_UNSPECIFIED;
  134. }
  135. static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf)
  136. {
  137. enum v4l2_ycbcr_encoding ycbcr;
  138. enum v4l2_xfer_func xfer;
  139. enum v4l2_colorspace cs;
  140. cs = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  141. buf->context->format.fmt.pix_mp.colorspace :
  142. buf->context->format.fmt.pix.colorspace;
  143. ycbcr = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  144. buf->context->format.fmt.pix_mp.ycbcr_enc:
  145. buf->context->format.fmt.pix.ycbcr_enc;
  146. xfer = V4L2_TYPE_IS_MULTIPLANAR(buf->buf.type) ?
  147. buf->context->format.fmt.pix_mp.xfer_func:
  148. buf->context->format.fmt.pix.xfer_func;
  149. switch (xfer) {
  150. case V4L2_XFER_FUNC_709: return AVCOL_TRC_BT709;
  151. case V4L2_XFER_FUNC_SRGB: return AVCOL_TRC_IEC61966_2_1;
  152. default:
  153. break;
  154. }
  155. switch (cs) {
  156. case V4L2_COLORSPACE_470_SYSTEM_M: return AVCOL_TRC_GAMMA22;
  157. case V4L2_COLORSPACE_470_SYSTEM_BG: return AVCOL_TRC_GAMMA28;
  158. case V4L2_COLORSPACE_SMPTE170M: return AVCOL_TRC_SMPTE170M;
  159. case V4L2_COLORSPACE_SMPTE240M: return AVCOL_TRC_SMPTE240M;
  160. default:
  161. break;
  162. }
  163. switch (ycbcr) {
  164. case V4L2_YCBCR_ENC_XV709:
  165. case V4L2_YCBCR_ENC_XV601: return AVCOL_TRC_BT1361_ECG;
  166. default:
  167. break;
  168. }
  169. return AVCOL_TRC_UNSPECIFIED;
  170. }
  171. static void v4l2_free_buffer(void *opaque, uint8_t *unused)
  172. {
  173. V4L2Buffer* avbuf = opaque;
  174. V4L2m2mContext *s = buf_to_m2mctx(avbuf);
  175. if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) {
  176. atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel);
  177. if (s->reinit) {
  178. if (!atomic_load(&s->refcount))
  179. sem_post(&s->refsync);
  180. } else if (avbuf->context->streamon)
  181. ff_v4l2_buffer_enqueue(avbuf);
  182. av_buffer_unref(&avbuf->context_ref);
  183. }
  184. }
  185. static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf)
  186. {
  187. V4L2m2mContext *s = buf_to_m2mctx(in);
  188. if (plane >= in->num_planes)
  189. return AVERROR(EINVAL);
  190. /* even though most encoders return 0 in data_offset encoding vp8 does require this value */
  191. *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset,
  192. in->plane_info[plane].length, v4l2_free_buffer, in, 0);
  193. if (!*buf)
  194. return AVERROR(ENOMEM);
  195. if (in->context_ref)
  196. atomic_fetch_add(&in->context_refcount, 1);
  197. else {
  198. in->context_ref = av_buffer_ref(s->self_ref);
  199. if (!in->context_ref) {
  200. av_buffer_unref(buf);
  201. return AVERROR(ENOMEM);
  202. }
  203. in->context_refcount = 1;
  204. }
  205. in->status = V4L2BUF_RET_USER;
  206. atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed);
  207. return 0;
  208. }
  209. static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, AVBufferRef* bref)
  210. {
  211. unsigned int bytesused, length;
  212. if (plane >= out->num_planes)
  213. return AVERROR(EINVAL);
  214. bytesused = FFMIN(size, out->plane_info[plane].length);
  215. length = out->plane_info[plane].length;
  216. memcpy(out->plane_info[plane].mm_addr, data, FFMIN(size, out->plane_info[plane].length));
  217. if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) {
  218. out->planes[plane].bytesused = bytesused;
  219. out->planes[plane].length = length;
  220. } else {
  221. out->buf.bytesused = bytesused;
  222. out->buf.length = length;
  223. }
  224. return 0;
  225. }
  226. /******************************************************************************
  227. *
  228. * V4L2uffer interface
  229. *
  230. ******************************************************************************/
  231. int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer* out)
  232. {
  233. int i, ret;
  234. for(i = 0; i < out->num_planes; i++) {
  235. ret = v4l2_bufref_to_buf(out, i, frame->buf[i]->data, frame->buf[i]->size, frame->buf[i]);
  236. if (ret)
  237. return ret;
  238. }
  239. v4l2_set_pts(out, frame->pts);
  240. return 0;
  241. }
  242. int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf)
  243. {
  244. V4L2m2mContext *s = buf_to_m2mctx(avbuf);
  245. int i, ret;
  246. av_frame_unref(frame);
  247. /* 1. get references to the actual data */
  248. for (i = 0; i < avbuf->num_planes; i++) {
  249. ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]);
  250. if (ret)
  251. return ret;
  252. frame->linesize[i] = avbuf->plane_info[i].bytesperline;
  253. frame->data[i] = frame->buf[i]->data;
  254. }
  255. /* 1.1 fixup special cases */
  256. switch (avbuf->context->av_pix_fmt) {
  257. case AV_PIX_FMT_NV12:
  258. if (avbuf->num_planes > 1)
  259. break;
  260. frame->linesize[1] = avbuf->plane_info[0].bytesperline;
  261. frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height;
  262. break;
  263. default:
  264. break;
  265. }
  266. /* 2. get frame information */
  267. frame->key_frame = !!(avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME);
  268. frame->format = avbuf->context->av_pix_fmt;
  269. frame->color_primaries = v4l2_get_color_primaries(avbuf);
  270. frame->colorspace = v4l2_get_color_space(avbuf);
  271. frame->color_range = v4l2_get_color_range(avbuf);
  272. frame->color_trc = v4l2_get_color_trc(avbuf);
  273. frame->pts = v4l2_get_pts(avbuf);
  274. /* these two values are updated also during re-init in v4l2_process_driver_event */
  275. frame->height = s->output.height;
  276. frame->width = s->output.width;
  277. /* 3. report errors upstream */
  278. if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
  279. av_log(logger(avbuf), AV_LOG_ERROR, "%s: driver decode error\n", avbuf->context->name);
  280. frame->decode_error_flags |= FF_DECODE_ERROR_INVALID_BITSTREAM;
  281. }
  282. return 0;
  283. }
  284. int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf)
  285. {
  286. int ret;
  287. av_packet_unref(pkt);
  288. ret = v4l2_buf_to_bufref(avbuf, 0, &pkt->buf);
  289. if (ret)
  290. return ret;
  291. pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused;
  292. pkt->data = pkt->buf->data;
  293. if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME)
  294. pkt->flags |= AV_PKT_FLAG_KEY;
  295. if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) {
  296. av_log(logger(avbuf), AV_LOG_ERROR, "%s driver encode error\n", avbuf->context->name);
  297. pkt->flags |= AV_PKT_FLAG_CORRUPT;
  298. }
  299. pkt->dts = pkt->pts = v4l2_get_pts(avbuf);
  300. return 0;
  301. }
  302. int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out)
  303. {
  304. int ret;
  305. ret = v4l2_bufref_to_buf(out, 0, pkt->data, pkt->size, pkt->buf);
  306. if (ret)
  307. return ret;
  308. v4l2_set_pts(out, pkt->pts);
  309. if (pkt->flags & AV_PKT_FLAG_KEY)
  310. out->flags = V4L2_BUF_FLAG_KEYFRAME;
  311. return 0;
  312. }
  313. int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index)
  314. {
  315. V4L2Context *ctx = avbuf->context;
  316. int ret, i;
  317. avbuf->buf.memory = V4L2_MEMORY_MMAP;
  318. avbuf->buf.type = ctx->type;
  319. avbuf->buf.index = index;
  320. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  321. avbuf->buf.length = VIDEO_MAX_PLANES;
  322. avbuf->buf.m.planes = avbuf->planes;
  323. }
  324. ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf);
  325. if (ret < 0)
  326. return AVERROR(errno);
  327. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  328. avbuf->num_planes = 0;
  329. for (;;) {
  330. /* in MP, the V4L2 API states that buf.length means num_planes */
  331. if (avbuf->num_planes >= avbuf->buf.length)
  332. break;
  333. if (avbuf->buf.m.planes[avbuf->num_planes].length)
  334. avbuf->num_planes++;
  335. }
  336. } else
  337. avbuf->num_planes = 1;
  338. for (i = 0; i < avbuf->num_planes; i++) {
  339. avbuf->plane_info[i].bytesperline = V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ?
  340. ctx->format.fmt.pix_mp.plane_fmt[i].bytesperline :
  341. ctx->format.fmt.pix.bytesperline;
  342. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  343. avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length;
  344. avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.m.planes[i].length,
  345. PROT_READ | PROT_WRITE, MAP_SHARED,
  346. buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.planes[i].m.mem_offset);
  347. } else {
  348. avbuf->plane_info[i].length = avbuf->buf.length;
  349. avbuf->plane_info[i].mm_addr = mmap(NULL, avbuf->buf.length,
  350. PROT_READ | PROT_WRITE, MAP_SHARED,
  351. buf_to_m2mctx(avbuf)->fd, avbuf->buf.m.offset);
  352. }
  353. if (avbuf->plane_info[i].mm_addr == MAP_FAILED)
  354. return AVERROR(ENOMEM);
  355. }
  356. avbuf->status = V4L2BUF_AVAILABLE;
  357. if (V4L2_TYPE_IS_OUTPUT(ctx->type))
  358. return 0;
  359. if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) {
  360. avbuf->buf.m.planes = avbuf->planes;
  361. avbuf->buf.length = avbuf->num_planes;
  362. } else {
  363. avbuf->buf.bytesused = avbuf->planes[0].bytesused;
  364. avbuf->buf.length = avbuf->planes[0].length;
  365. }
  366. return ff_v4l2_buffer_enqueue(avbuf);
  367. }
  368. int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf)
  369. {
  370. int ret;
  371. avbuf->buf.flags = avbuf->flags;
  372. ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf);
  373. if (ret < 0)
  374. return AVERROR(errno);
  375. avbuf->status = V4L2BUF_IN_DRIVER;
  376. return 0;
  377. }