You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

341 lines
11KB

  1. /*
  2. * Newtek NDI input
  3. * Copyright (c) 2017 Maksym Veremeyenko
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavformat/avformat.h"
  22. #include "libavformat/internal.h"
  23. #include "libavutil/opt.h"
  24. #include "libavutil/imgutils.h"
  25. #include "libndi_newtek_common.h"
  26. struct NDIContext {
  27. const AVClass *cclass;
  28. /* Options */
  29. int find_sources;
  30. int64_t wait_sources;
  31. int allow_video_fields;
  32. /* Runtime */
  33. NDIlib_recv_create_t *recv;
  34. NDIlib_find_instance_t ndi_find;
  35. /* Streams */
  36. AVStream *video_st, *audio_st;
  37. };
  38. static int ndi_set_video_packet(AVFormatContext *avctx, NDIlib_video_frame_t *v, AVPacket *pkt)
  39. {
  40. int ret;
  41. struct NDIContext *ctx = avctx->priv_data;
  42. ret = av_new_packet(pkt, v->yres * v->line_stride_in_bytes);
  43. if (ret < 0)
  44. return ret;
  45. pkt->dts = pkt->pts = av_rescale_q(v->timecode, NDI_TIME_BASE_Q, ctx->video_st->time_base);
  46. pkt->duration = av_rescale_q(1, (AVRational){v->frame_rate_D, v->frame_rate_N}, ctx->video_st->time_base);
  47. av_log(avctx, AV_LOG_DEBUG, "%s: pkt->dts = pkt->pts = %"PRId64", duration=%"PRId64", timecode=%"PRId64"\n",
  48. __func__, pkt->dts, pkt->duration, v->timecode);
  49. pkt->flags |= AV_PKT_FLAG_KEY;
  50. pkt->stream_index = ctx->video_st->index;
  51. memcpy(pkt->data, v->p_data, pkt->size);
  52. return 0;
  53. }
  54. static int ndi_set_audio_packet(AVFormatContext *avctx, NDIlib_audio_frame_t *a, AVPacket *pkt)
  55. {
  56. int ret;
  57. struct NDIContext *ctx = avctx->priv_data;
  58. NDIlib_audio_frame_interleaved_16s_t dst;
  59. ret = av_new_packet(pkt, 2 * a->no_samples * a->no_channels);
  60. if (ret < 0)
  61. return ret;
  62. pkt->dts = pkt->pts = av_rescale_q(a->timecode, NDI_TIME_BASE_Q, ctx->audio_st->time_base);
  63. pkt->duration = av_rescale_q(1, (AVRational){a->no_samples, a->sample_rate}, ctx->audio_st->time_base);
  64. av_log(avctx, AV_LOG_DEBUG, "%s: pkt->dts = pkt->pts = %"PRId64", duration=%"PRId64", timecode=%"PRId64"\n",
  65. __func__, pkt->dts, pkt->duration, a->timecode);
  66. pkt->flags |= AV_PKT_FLAG_KEY;
  67. pkt->stream_index = ctx->audio_st->index;
  68. dst.reference_level = 0;
  69. dst.p_data = (short *)pkt->data;
  70. NDIlib_util_audio_to_interleaved_16s(a, &dst);
  71. return 0;
  72. }
  73. static int ndi_find_sources(AVFormatContext *avctx, const char *name, NDIlib_source_t *source_to_connect_to)
  74. {
  75. int j = AVERROR(ENODEV);
  76. unsigned int n, i;
  77. struct NDIContext *ctx = avctx->priv_data;
  78. const NDIlib_source_t *ndi_srcs = NULL;
  79. const NDIlib_find_create_t find_create_desc = { .show_local_sources = true,
  80. .p_groups = NULL, .p_extra_ips = NULL };
  81. if (!ctx->ndi_find)
  82. ctx->ndi_find = NDIlib_find_create2(&find_create_desc);
  83. if (!ctx->ndi_find) {
  84. av_log(avctx, AV_LOG_ERROR, "NDIlib_find_create failed.\n");
  85. return AVERROR(EIO);
  86. }
  87. while (1)
  88. {
  89. int f, t = ctx->wait_sources / 1000;
  90. av_log(avctx, AV_LOG_DEBUG, "Waiting for sources %d miliseconds\n", t);
  91. f = NDIlib_find_wait_for_sources(ctx->ndi_find, t);
  92. av_log(avctx, AV_LOG_DEBUG, "NDIlib_find_wait_for_sources returns %d\n", f);
  93. if (!f)
  94. break;
  95. };
  96. ndi_srcs = NDIlib_find_get_current_sources(ctx->ndi_find, &n);
  97. if (ctx->find_sources)
  98. av_log(avctx, AV_LOG_INFO, "Found %d NDI sources:\n", n);
  99. for (i = 0; i < n; i++) {
  100. if (ctx->find_sources)
  101. av_log(avctx, AV_LOG_INFO, "\t'%s'\t'%s'\n", ndi_srcs[i].p_ndi_name, ndi_srcs[i].p_ip_address);
  102. if (!strcmp(name, ndi_srcs[i].p_ndi_name)) {
  103. *source_to_connect_to = ndi_srcs[i];
  104. j = i;
  105. }
  106. }
  107. return j;
  108. }
  109. static int ndi_read_header(AVFormatContext *avctx)
  110. {
  111. int ret;
  112. NDIlib_recv_create_t recv_create_desc;
  113. const NDIlib_tally_t tally_state = { .on_program = true, .on_preview = false };
  114. struct NDIContext *ctx = avctx->priv_data;
  115. if (!NDIlib_initialize()) {
  116. av_log(avctx, AV_LOG_ERROR, "NDIlib_initialize failed.\n");
  117. return AVERROR_EXTERNAL;
  118. }
  119. /* Find available sources. */
  120. ret = ndi_find_sources(avctx, avctx->url, &recv_create_desc.source_to_connect_to);
  121. if (ctx->find_sources) {
  122. return AVERROR_EXIT;
  123. }
  124. if (ret < 0)
  125. return ret;
  126. /* Create receiver description */
  127. recv_create_desc.color_format = NDIlib_recv_color_format_e_UYVY_RGBA;
  128. recv_create_desc.bandwidth = NDIlib_recv_bandwidth_highest;
  129. recv_create_desc.allow_video_fields = ctx->allow_video_fields;
  130. /* Create the receiver */
  131. ctx->recv = NDIlib_recv_create(&recv_create_desc);
  132. if (!ctx->recv) {
  133. av_log(avctx, AV_LOG_ERROR, "NDIlib_recv_create2 failed.\n");
  134. return AVERROR(EIO);
  135. }
  136. /* Set tally */
  137. NDIlib_recv_set_tally(ctx->recv, &tally_state);
  138. avctx->ctx_flags |= AVFMTCTX_NOHEADER;
  139. return 0;
  140. }
  141. static int ndi_create_video_stream(AVFormatContext *avctx, NDIlib_video_frame_t *v)
  142. {
  143. AVStream *st;
  144. AVRational tmp;
  145. struct NDIContext *ctx = avctx->priv_data;
  146. st = avformat_new_stream(avctx, NULL);
  147. if (!st) {
  148. av_log(avctx, AV_LOG_ERROR, "Cannot add video stream\n");
  149. return AVERROR(ENOMEM);
  150. }
  151. st->time_base = NDI_TIME_BASE_Q;
  152. st->r_frame_rate = av_make_q(v->frame_rate_N, v->frame_rate_D);
  153. tmp = av_mul_q(av_d2q(v->picture_aspect_ratio, INT_MAX), (AVRational){v->yres, v->xres});
  154. av_reduce(&st->sample_aspect_ratio.num, &st->sample_aspect_ratio.den, tmp.num, tmp.den, 1000);
  155. st->codecpar->sample_aspect_ratio = st->sample_aspect_ratio;
  156. st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
  157. st->codecpar->width = v->xres;
  158. st->codecpar->height = v->yres;
  159. st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
  160. st->codecpar->bit_rate = av_rescale(v->xres * v->yres * 16, v->frame_rate_N, v->frame_rate_D);
  161. st->codecpar->field_order = v->frame_format_type == NDIlib_frame_format_type_progressive
  162. ? AV_FIELD_PROGRESSIVE : AV_FIELD_TT;
  163. if (NDIlib_FourCC_type_UYVY == v->FourCC || NDIlib_FourCC_type_UYVA == v->FourCC) {
  164. st->codecpar->format = AV_PIX_FMT_UYVY422;
  165. st->codecpar->codec_tag = MKTAG('U', 'Y', 'V', 'Y');
  166. if (NDIlib_FourCC_type_UYVA == v->FourCC)
  167. av_log(avctx, AV_LOG_WARNING, "Alpha channel ignored\n");
  168. } else if (NDIlib_FourCC_type_BGRA == v->FourCC) {
  169. st->codecpar->format = AV_PIX_FMT_BGRA;
  170. st->codecpar->codec_tag = MKTAG('B', 'G', 'R', 'A');
  171. } else if (NDIlib_FourCC_type_BGRX == v->FourCC) {
  172. st->codecpar->format = AV_PIX_FMT_BGR0;
  173. st->codecpar->codec_tag = MKTAG('B', 'G', 'R', '0');
  174. } else if (NDIlib_FourCC_type_RGBA == v->FourCC) {
  175. st->codecpar->format = AV_PIX_FMT_RGBA;
  176. st->codecpar->codec_tag = MKTAG('R', 'G', 'B', 'A');
  177. } else if (NDIlib_FourCC_type_RGBX == v->FourCC) {
  178. st->codecpar->format = AV_PIX_FMT_RGB0;
  179. st->codecpar->codec_tag = MKTAG('R', 'G', 'B', '0');
  180. } else {
  181. av_log(avctx, AV_LOG_ERROR, "Unsupported video stream format, v->FourCC=%d\n", v->FourCC);
  182. return AVERROR(EINVAL);
  183. }
  184. avpriv_set_pts_info(st, 64, 1, NDI_TIME_BASE);
  185. ctx->video_st = st;
  186. return 0;
  187. }
  188. static int ndi_create_audio_stream(AVFormatContext *avctx, NDIlib_audio_frame_t *a)
  189. {
  190. AVStream *st;
  191. struct NDIContext *ctx = avctx->priv_data;
  192. st = avformat_new_stream(avctx, NULL);
  193. if (!st) {
  194. av_log(avctx, AV_LOG_ERROR, "Cannot add audio stream\n");
  195. return AVERROR(ENOMEM);
  196. }
  197. st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
  198. st->codecpar->codec_id = AV_CODEC_ID_PCM_S16LE;
  199. st->codecpar->sample_rate = a->sample_rate;
  200. st->codecpar->channels = a->no_channels;
  201. avpriv_set_pts_info(st, 64, 1, NDI_TIME_BASE);
  202. ctx->audio_st = st;
  203. return 0;
  204. }
  205. static int ndi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
  206. {
  207. int ret = 0;
  208. struct NDIContext *ctx = avctx->priv_data;
  209. while (!ret) {
  210. NDIlib_video_frame_t v;
  211. NDIlib_audio_frame_t a;
  212. NDIlib_metadata_frame_t m;
  213. NDIlib_frame_type_e t;
  214. av_log(avctx, AV_LOG_DEBUG, "NDIlib_recv_capture...\n");
  215. t = NDIlib_recv_capture(ctx->recv, &v, &a, &m, 40);
  216. av_log(avctx, AV_LOG_DEBUG, "NDIlib_recv_capture=%d\n", t);
  217. if (t == NDIlib_frame_type_video) {
  218. if (!ctx->video_st)
  219. ret = ndi_create_video_stream(avctx, &v);
  220. if (!ret)
  221. ret = ndi_set_video_packet(avctx, &v, pkt);
  222. NDIlib_recv_free_video(ctx->recv, &v);
  223. break;
  224. }
  225. else if (t == NDIlib_frame_type_audio) {
  226. if (!ctx->audio_st)
  227. ret = ndi_create_audio_stream(avctx, &a);
  228. if (!ret)
  229. ret = ndi_set_audio_packet(avctx, &a, pkt);
  230. NDIlib_recv_free_audio(ctx->recv, &a);
  231. break;
  232. }
  233. else if (t == NDIlib_frame_type_metadata)
  234. NDIlib_recv_free_metadata(ctx->recv, &m);
  235. else if (t == NDIlib_frame_type_error){
  236. av_log(avctx, AV_LOG_ERROR, "NDIlib_recv_capture failed with error\n");
  237. ret = AVERROR(EIO);
  238. }
  239. };
  240. return ret;
  241. }
  242. static int ndi_read_close(AVFormatContext *avctx)
  243. {
  244. struct NDIContext *ctx = (struct NDIContext *)avctx->priv_data;
  245. if (ctx->recv)
  246. NDIlib_recv_destroy(ctx->recv);
  247. if (ctx->ndi_find)
  248. NDIlib_find_destroy(ctx->ndi_find);
  249. return 0;
  250. }
  251. #define OFFSET(x) offsetof(struct NDIContext, x)
  252. #define DEC AV_OPT_FLAG_DECODING_PARAM
  253. static const AVOption options[] = {
  254. { "find_sources", "Find available sources" , OFFSET(find_sources), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, DEC },
  255. { "wait_sources", "Time to wait until the number of online sources have changed" , OFFSET(wait_sources), AV_OPT_TYPE_DURATION, { .i64 = 1000000 }, 100000, 20000000, DEC },
  256. { "allow_video_fields", "When this flag is FALSE, all video that you receive will be progressive" , OFFSET(allow_video_fields), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, DEC },
  257. { NULL },
  258. };
  259. static const AVClass libndi_newtek_demuxer_class = {
  260. .class_name = "NDI demuxer",
  261. .item_name = av_default_item_name,
  262. .option = options,
  263. .version = LIBAVUTIL_VERSION_INT,
  264. .category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
  265. };
  266. AVInputFormat ff_libndi_newtek_demuxer = {
  267. .name = "libndi_newtek",
  268. .long_name = NULL_IF_CONFIG_SMALL("Network Device Interface (NDI) input using NewTek library"),
  269. .flags = AVFMT_NOFILE,
  270. .priv_class = &libndi_newtek_demuxer_class,
  271. .priv_data_size = sizeof(struct NDIContext),
  272. .read_header = ndi_read_header,
  273. .read_packet = ndi_read_packet,
  274. .read_close = ndi_read_close,
  275. };