You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

700 lines
23KB

  1. /*
  2. * Blackmagic DeckLink input
  3. * Copyright (c) 2013-2014 Luca Barbato, Deti Fliegl
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /* Include internal.h first to avoid conflict between winsock.h (used by
  22. * DeckLink headers) and winsock2.h (used by libavformat) in MSVC++ builds */
  23. extern "C" {
  24. #include "libavformat/internal.h"
  25. }
  26. #include <DeckLinkAPI.h>
  27. extern "C" {
  28. #include "config.h"
  29. #include "libavformat/avformat.h"
  30. #include "libavutil/avassert.h"
  31. #include "libavutil/avutil.h"
  32. #include "libavutil/common.h"
  33. #include "libavutil/imgutils.h"
  34. #include "libavutil/time.h"
  35. #include "libavutil/mathematics.h"
  36. #if CONFIG_LIBZVBI
  37. #include <libzvbi.h>
  38. #endif
  39. }
  40. #include "decklink_common.h"
  41. #include "decklink_dec.h"
  42. #if CONFIG_LIBZVBI
  43. static uint8_t calc_parity_and_line_offset(int line)
  44. {
  45. uint8_t ret = (line < 313) << 5;
  46. if (line >= 7 && line <= 22)
  47. ret += line;
  48. if (line >= 320 && line <= 335)
  49. ret += (line - 313);
  50. return ret;
  51. }
  52. static void fill_data_unit_head(int line, uint8_t *tgt)
  53. {
  54. tgt[0] = 0x02; // data_unit_id
  55. tgt[1] = 0x2c; // data_unit_length
  56. tgt[2] = calc_parity_and_line_offset(line); // field_parity, line_offset
  57. tgt[3] = 0xe4; // framing code
  58. }
  59. static uint8_t* teletext_data_unit_from_vbi_data(int line, uint8_t *src, uint8_t *tgt, vbi_pixfmt fmt)
  60. {
  61. vbi_bit_slicer slicer;
  62. vbi_bit_slicer_init(&slicer, 720, 13500000, 6937500, 6937500, 0x00aaaae4, 0xffff, 18, 6, 42 * 8, VBI_MODULATION_NRZ_MSB, fmt);
  63. if (vbi_bit_slice(&slicer, src, tgt + 4) == FALSE)
  64. return tgt;
  65. fill_data_unit_head(line, tgt);
  66. return tgt + 46;
  67. }
  68. static uint8_t* teletext_data_unit_from_vbi_data_10bit(int line, uint8_t *src, uint8_t *tgt)
  69. {
  70. uint8_t y[720];
  71. uint8_t *py = y;
  72. uint8_t *pend = y + 720;
  73. /* The 10-bit VBI data is packed in V210, but libzvbi only supports 8-bit,
  74. * so we extract the 8 MSBs of the luma component, that is enough for
  75. * teletext bit slicing. */
  76. while (py < pend) {
  77. *py++ = (src[1] >> 4) + ((src[2] & 15) << 4);
  78. *py++ = (src[4] >> 2) + ((src[5] & 3 ) << 6);
  79. *py++ = (src[6] >> 6) + ((src[7] & 63) << 2);
  80. src += 8;
  81. }
  82. return teletext_data_unit_from_vbi_data(line, y, tgt, VBI_PIXFMT_YUV420);
  83. }
  84. #endif
  85. static void avpacket_queue_init(AVFormatContext *avctx, AVPacketQueue *q)
  86. {
  87. memset(q, 0, sizeof(AVPacketQueue));
  88. pthread_mutex_init(&q->mutex, NULL);
  89. pthread_cond_init(&q->cond, NULL);
  90. q->avctx = avctx;
  91. }
  92. static void avpacket_queue_flush(AVPacketQueue *q)
  93. {
  94. AVPacketList *pkt, *pkt1;
  95. pthread_mutex_lock(&q->mutex);
  96. for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
  97. pkt1 = pkt->next;
  98. av_packet_unref(&pkt->pkt);
  99. av_freep(&pkt);
  100. }
  101. q->last_pkt = NULL;
  102. q->first_pkt = NULL;
  103. q->nb_packets = 0;
  104. q->size = 0;
  105. pthread_mutex_unlock(&q->mutex);
  106. }
  107. static void avpacket_queue_end(AVPacketQueue *q)
  108. {
  109. avpacket_queue_flush(q);
  110. pthread_mutex_destroy(&q->mutex);
  111. pthread_cond_destroy(&q->cond);
  112. }
  113. static unsigned long long avpacket_queue_size(AVPacketQueue *q)
  114. {
  115. unsigned long long size;
  116. pthread_mutex_lock(&q->mutex);
  117. size = q->size;
  118. pthread_mutex_unlock(&q->mutex);
  119. return size;
  120. }
  121. static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
  122. {
  123. AVPacketList *pkt1;
  124. // Drop Packet if queue size is > 1GB
  125. if (avpacket_queue_size(q) > 1024 * 1024 * 1024 ) {
  126. av_log(q->avctx, AV_LOG_WARNING, "Decklink input buffer overrun!\n");
  127. return -1;
  128. }
  129. /* duplicate the packet */
  130. if (av_dup_packet(pkt) < 0) {
  131. return -1;
  132. }
  133. pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
  134. if (!pkt1) {
  135. return -1;
  136. }
  137. pkt1->pkt = *pkt;
  138. pkt1->next = NULL;
  139. pthread_mutex_lock(&q->mutex);
  140. if (!q->last_pkt) {
  141. q->first_pkt = pkt1;
  142. } else {
  143. q->last_pkt->next = pkt1;
  144. }
  145. q->last_pkt = pkt1;
  146. q->nb_packets++;
  147. q->size += pkt1->pkt.size + sizeof(*pkt1);
  148. pthread_cond_signal(&q->cond);
  149. pthread_mutex_unlock(&q->mutex);
  150. return 0;
  151. }
  152. static int avpacket_queue_get(AVPacketQueue *q, AVPacket *pkt, int block)
  153. {
  154. AVPacketList *pkt1;
  155. int ret;
  156. pthread_mutex_lock(&q->mutex);
  157. for (;; ) {
  158. pkt1 = q->first_pkt;
  159. if (pkt1) {
  160. q->first_pkt = pkt1->next;
  161. if (!q->first_pkt) {
  162. q->last_pkt = NULL;
  163. }
  164. q->nb_packets--;
  165. q->size -= pkt1->pkt.size + sizeof(*pkt1);
  166. *pkt = pkt1->pkt;
  167. av_free(pkt1);
  168. ret = 1;
  169. break;
  170. } else if (!block) {
  171. ret = 0;
  172. break;
  173. } else {
  174. pthread_cond_wait(&q->cond, &q->mutex);
  175. }
  176. }
  177. pthread_mutex_unlock(&q->mutex);
  178. return ret;
  179. }
  180. class decklink_input_callback : public IDeckLinkInputCallback
  181. {
  182. public:
  183. decklink_input_callback(AVFormatContext *_avctx);
  184. ~decklink_input_callback();
  185. virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
  186. virtual ULONG STDMETHODCALLTYPE AddRef(void);
  187. virtual ULONG STDMETHODCALLTYPE Release(void);
  188. virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
  189. virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
  190. private:
  191. ULONG m_refCount;
  192. pthread_mutex_t m_mutex;
  193. AVFormatContext *avctx;
  194. decklink_ctx *ctx;
  195. int no_video;
  196. int64_t initial_video_pts;
  197. int64_t initial_audio_pts;
  198. };
  199. decklink_input_callback::decklink_input_callback(AVFormatContext *_avctx) : m_refCount(0)
  200. {
  201. avctx = _avctx;
  202. decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
  203. ctx = (struct decklink_ctx *)cctx->ctx;
  204. no_video = 0;
  205. initial_audio_pts = initial_video_pts = AV_NOPTS_VALUE;
  206. pthread_mutex_init(&m_mutex, NULL);
  207. }
  208. decklink_input_callback::~decklink_input_callback()
  209. {
  210. pthread_mutex_destroy(&m_mutex);
  211. }
  212. ULONG decklink_input_callback::AddRef(void)
  213. {
  214. pthread_mutex_lock(&m_mutex);
  215. m_refCount++;
  216. pthread_mutex_unlock(&m_mutex);
  217. return (ULONG)m_refCount;
  218. }
  219. ULONG decklink_input_callback::Release(void)
  220. {
  221. pthread_mutex_lock(&m_mutex);
  222. m_refCount--;
  223. pthread_mutex_unlock(&m_mutex);
  224. if (m_refCount == 0) {
  225. delete this;
  226. return 0;
  227. }
  228. return (ULONG)m_refCount;
  229. }
  230. static int64_t get_pkt_pts(IDeckLinkVideoInputFrame *videoFrame,
  231. IDeckLinkAudioInputPacket *audioFrame,
  232. int64_t wallclock,
  233. DecklinkPtsSource pts_src,
  234. AVRational time_base, int64_t *initial_pts)
  235. {
  236. int64_t pts = AV_NOPTS_VALUE;
  237. BMDTimeValue bmd_pts;
  238. BMDTimeValue bmd_duration;
  239. HRESULT res = E_INVALIDARG;
  240. switch (pts_src) {
  241. case PTS_SRC_AUDIO:
  242. if (audioFrame)
  243. res = audioFrame->GetPacketTime(&bmd_pts, time_base.den);
  244. break;
  245. case PTS_SRC_VIDEO:
  246. if (videoFrame)
  247. res = videoFrame->GetStreamTime(&bmd_pts, &bmd_duration, time_base.den);
  248. break;
  249. case PTS_SRC_REFERENCE:
  250. if (videoFrame)
  251. res = videoFrame->GetHardwareReferenceTimestamp(time_base.den, &bmd_pts, &bmd_duration);
  252. break;
  253. case PTS_SRC_WALLCLOCK:
  254. {
  255. /* MSVC does not support compound literals like AV_TIME_BASE_Q
  256. * in C++ code (compiler error C4576) */
  257. AVRational timebase;
  258. timebase.num = 1;
  259. timebase.den = AV_TIME_BASE;
  260. pts = av_rescale_q(wallclock, timebase, time_base);
  261. break;
  262. }
  263. }
  264. if (res == S_OK)
  265. pts = bmd_pts / time_base.num;
  266. if (pts != AV_NOPTS_VALUE && *initial_pts == AV_NOPTS_VALUE)
  267. *initial_pts = pts;
  268. if (*initial_pts != AV_NOPTS_VALUE)
  269. pts -= *initial_pts;
  270. return pts;
  271. }
  272. HRESULT decklink_input_callback::VideoInputFrameArrived(
  273. IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
  274. {
  275. void *frameBytes;
  276. void *audioFrameBytes;
  277. BMDTimeValue frameTime;
  278. BMDTimeValue frameDuration;
  279. int64_t wallclock = 0;
  280. ctx->frameCount++;
  281. if (ctx->audio_pts_source == PTS_SRC_WALLCLOCK || ctx->video_pts_source == PTS_SRC_WALLCLOCK)
  282. wallclock = av_gettime_relative();
  283. // Handle Video Frame
  284. if (videoFrame) {
  285. AVPacket pkt;
  286. av_init_packet(&pkt);
  287. if (ctx->frameCount % 25 == 0) {
  288. unsigned long long qsize = avpacket_queue_size(&ctx->queue);
  289. av_log(avctx, AV_LOG_DEBUG,
  290. "Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
  291. ctx->frameCount,
  292. videoFrame->GetRowBytes() * videoFrame->GetHeight(),
  293. (double)qsize / 1024 / 1024);
  294. }
  295. videoFrame->GetBytes(&frameBytes);
  296. videoFrame->GetStreamTime(&frameTime, &frameDuration,
  297. ctx->video_st->time_base.den);
  298. if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
  299. if (ctx->draw_bars && videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
  300. unsigned bars[8] = {
  301. 0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
  302. 0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
  303. int width = videoFrame->GetWidth();
  304. int height = videoFrame->GetHeight();
  305. unsigned *p = (unsigned *)frameBytes;
  306. for (int y = 0; y < height; y++) {
  307. for (int x = 0; x < width; x += 2)
  308. *p++ = bars[(x * 8) / width];
  309. }
  310. }
  311. if (!no_video) {
  312. av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
  313. "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
  314. }
  315. no_video = 1;
  316. } else {
  317. if (no_video) {
  318. av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
  319. "- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
  320. }
  321. no_video = 0;
  322. }
  323. pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, ctx->video_pts_source, ctx->video_st->time_base, &initial_video_pts);
  324. pkt.dts = pkt.pts;
  325. pkt.duration = frameDuration;
  326. //To be made sure it still applies
  327. pkt.flags |= AV_PKT_FLAG_KEY;
  328. pkt.stream_index = ctx->video_st->index;
  329. pkt.data = (uint8_t *)frameBytes;
  330. pkt.size = videoFrame->GetRowBytes() *
  331. videoFrame->GetHeight();
  332. //fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
  333. #if CONFIG_LIBZVBI
  334. if (!no_video && ctx->teletext_lines) {
  335. IDeckLinkVideoFrameAncillary *vanc;
  336. AVPacket txt_pkt;
  337. uint8_t txt_buf0[1611]; // max 35 * 46 bytes decoded teletext lines + 1 byte data_identifier
  338. uint8_t *txt_buf = txt_buf0;
  339. if (videoFrame->GetAncillaryData(&vanc) == S_OK) {
  340. int i;
  341. int64_t line_mask = 1;
  342. BMDPixelFormat vanc_format = vanc->GetPixelFormat();
  343. txt_buf[0] = 0x10; // data_identifier - EBU_data
  344. txt_buf++;
  345. if (ctx->bmd_mode == bmdModePAL && (vanc_format == bmdFormat8BitYUV || vanc_format == bmdFormat10BitYUV)) {
  346. av_assert0(videoFrame->GetWidth() == 720);
  347. for (i = 6; i < 336; i++, line_mask <<= 1) {
  348. uint8_t *buf;
  349. if ((ctx->teletext_lines & line_mask) && vanc->GetBufferForVerticalBlankingLine(i, (void**)&buf) == S_OK) {
  350. if (vanc_format == bmdFormat8BitYUV)
  351. txt_buf = teletext_data_unit_from_vbi_data(i, buf, txt_buf, VBI_PIXFMT_UYVY);
  352. else
  353. txt_buf = teletext_data_unit_from_vbi_data_10bit(i, buf, txt_buf);
  354. }
  355. if (i == 22)
  356. i = 317;
  357. }
  358. }
  359. vanc->Release();
  360. if (txt_buf - txt_buf0 > 1) {
  361. int stuffing_units = (4 - ((45 + txt_buf - txt_buf0) / 46) % 4) % 4;
  362. while (stuffing_units--) {
  363. memset(txt_buf, 0xff, 46);
  364. txt_buf[1] = 0x2c; // data_unit_length
  365. txt_buf += 46;
  366. }
  367. av_init_packet(&txt_pkt);
  368. txt_pkt.pts = pkt.pts;
  369. txt_pkt.dts = pkt.dts;
  370. txt_pkt.stream_index = ctx->teletext_st->index;
  371. txt_pkt.data = txt_buf0;
  372. txt_pkt.size = txt_buf - txt_buf0;
  373. if (avpacket_queue_put(&ctx->queue, &txt_pkt) < 0) {
  374. ++ctx->dropped;
  375. }
  376. }
  377. }
  378. }
  379. #endif
  380. if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
  381. ++ctx->dropped;
  382. }
  383. }
  384. // Handle Audio Frame
  385. if (audioFrame) {
  386. AVPacket pkt;
  387. BMDTimeValue audio_pts;
  388. av_init_packet(&pkt);
  389. //hack among hacks
  390. pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codecpar->channels * (16 / 8);
  391. audioFrame->GetBytes(&audioFrameBytes);
  392. audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
  393. pkt.pts = get_pkt_pts(videoFrame, audioFrame, wallclock, ctx->audio_pts_source, ctx->audio_st->time_base, &initial_audio_pts);
  394. pkt.dts = pkt.pts;
  395. //fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
  396. pkt.flags |= AV_PKT_FLAG_KEY;
  397. pkt.stream_index = ctx->audio_st->index;
  398. pkt.data = (uint8_t *)audioFrameBytes;
  399. if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
  400. ++ctx->dropped;
  401. }
  402. }
  403. return S_OK;
  404. }
  405. HRESULT decklink_input_callback::VideoInputFormatChanged(
  406. BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
  407. BMDDetectedVideoInputFormatFlags)
  408. {
  409. return S_OK;
  410. }
  411. static HRESULT decklink_start_input(AVFormatContext *avctx)
  412. {
  413. struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
  414. struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
  415. ctx->input_callback = new decklink_input_callback(avctx);
  416. ctx->dli->SetCallback(ctx->input_callback);
  417. return ctx->dli->StartStreams();
  418. }
  419. extern "C" {
  420. av_cold int ff_decklink_read_close(AVFormatContext *avctx)
  421. {
  422. struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
  423. struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
  424. if (ctx->capture_started) {
  425. ctx->dli->StopStreams();
  426. ctx->dli->DisableVideoInput();
  427. ctx->dli->DisableAudioInput();
  428. }
  429. ff_decklink_cleanup(avctx);
  430. avpacket_queue_end(&ctx->queue);
  431. av_freep(&cctx->ctx);
  432. return 0;
  433. }
  434. av_cold int ff_decklink_read_header(AVFormatContext *avctx)
  435. {
  436. struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
  437. struct decklink_ctx *ctx;
  438. AVStream *st;
  439. HRESULT result;
  440. char fname[1024];
  441. char *tmp;
  442. int mode_num = 0;
  443. int ret;
  444. ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
  445. if (!ctx)
  446. return AVERROR(ENOMEM);
  447. ctx->list_devices = cctx->list_devices;
  448. ctx->list_formats = cctx->list_formats;
  449. ctx->teletext_lines = cctx->teletext_lines;
  450. ctx->preroll = cctx->preroll;
  451. ctx->duplex_mode = cctx->duplex_mode;
  452. if (cctx->video_input > 0 && (unsigned int)cctx->video_input < FF_ARRAY_ELEMS(decklink_video_connection_map))
  453. ctx->video_input = decklink_video_connection_map[cctx->video_input];
  454. if (cctx->audio_input > 0 && (unsigned int)cctx->audio_input < FF_ARRAY_ELEMS(decklink_audio_connection_map))
  455. ctx->audio_input = decklink_audio_connection_map[cctx->audio_input];
  456. ctx->audio_pts_source = cctx->audio_pts_source;
  457. ctx->video_pts_source = cctx->video_pts_source;
  458. ctx->draw_bars = cctx->draw_bars;
  459. cctx->ctx = ctx;
  460. #if !CONFIG_LIBZVBI
  461. if (ctx->teletext_lines) {
  462. av_log(avctx, AV_LOG_ERROR, "Libzvbi support is needed for capturing teletext, please recompile FFmpeg.\n");
  463. return AVERROR(ENOSYS);
  464. }
  465. #endif
  466. /* Check audio channel option for valid values: 2, 8 or 16 */
  467. switch (cctx->audio_channels) {
  468. case 2:
  469. case 8:
  470. case 16:
  471. break;
  472. default:
  473. av_log(avctx, AV_LOG_ERROR, "Value of channels option must be one of 2, 8 or 16\n");
  474. return AVERROR(EINVAL);
  475. }
  476. /* List available devices. */
  477. if (ctx->list_devices) {
  478. ff_decklink_list_devices(avctx);
  479. return AVERROR_EXIT;
  480. }
  481. strcpy (fname, avctx->filename);
  482. tmp=strchr (fname, '@');
  483. if (tmp != NULL) {
  484. av_log(avctx, AV_LOG_WARNING, "The @mode syntax is deprecated and will be removed. Please use the -format_code option.\n");
  485. mode_num = atoi (tmp+1);
  486. *tmp = 0;
  487. }
  488. ret = ff_decklink_init_device(avctx, fname);
  489. if (ret < 0)
  490. return ret;
  491. /* Get input device. */
  492. if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
  493. av_log(avctx, AV_LOG_ERROR, "Could not open input device from '%s'\n",
  494. avctx->filename);
  495. ret = AVERROR(EIO);
  496. goto error;
  497. }
  498. /* List supported formats. */
  499. if (ctx->list_formats) {
  500. ff_decklink_list_formats(avctx, DIRECTION_IN);
  501. ret = AVERROR_EXIT;
  502. goto error;
  503. }
  504. if (mode_num > 0 || cctx->format_code) {
  505. if (ff_decklink_set_format(avctx, DIRECTION_IN, mode_num) < 0) {
  506. av_log(avctx, AV_LOG_ERROR, "Could not set mode number %d or format code %s for %s\n",
  507. mode_num, (cctx->format_code) ? cctx->format_code : "(unset)", fname);
  508. ret = AVERROR(EIO);
  509. goto error;
  510. }
  511. }
  512. /* Setup streams. */
  513. st = avformat_new_stream(avctx, NULL);
  514. if (!st) {
  515. av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
  516. ret = AVERROR(ENOMEM);
  517. goto error;
  518. }
  519. st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
  520. st->codecpar->codec_id = AV_CODEC_ID_PCM_S16LE;
  521. st->codecpar->sample_rate = bmdAudioSampleRate48kHz;
  522. st->codecpar->channels = cctx->audio_channels;
  523. avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
  524. ctx->audio_st=st;
  525. st = avformat_new_stream(avctx, NULL);
  526. if (!st) {
  527. av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
  528. ret = AVERROR(ENOMEM);
  529. goto error;
  530. }
  531. st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
  532. st->codecpar->width = ctx->bmd_width;
  533. st->codecpar->height = ctx->bmd_height;
  534. st->time_base.den = ctx->bmd_tb_den;
  535. st->time_base.num = ctx->bmd_tb_num;
  536. av_stream_set_r_frame_rate(st, av_make_q(st->time_base.den, st->time_base.num));
  537. if (cctx->v210) {
  538. st->codecpar->codec_id = AV_CODEC_ID_V210;
  539. st->codecpar->codec_tag = MKTAG('V', '2', '1', '0');
  540. st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 64, st->time_base.den, st->time_base.num * 3);
  541. } else {
  542. st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
  543. st->codecpar->format = AV_PIX_FMT_UYVY422;
  544. st->codecpar->codec_tag = MKTAG('U', 'Y', 'V', 'Y');
  545. st->codecpar->bit_rate = av_rescale(ctx->bmd_width * ctx->bmd_height * 16, st->time_base.den, st->time_base.num);
  546. }
  547. avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
  548. ctx->video_st=st;
  549. if (ctx->teletext_lines) {
  550. st = avformat_new_stream(avctx, NULL);
  551. if (!st) {
  552. av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
  553. ret = AVERROR(ENOMEM);
  554. goto error;
  555. }
  556. st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
  557. st->time_base.den = ctx->bmd_tb_den;
  558. st->time_base.num = ctx->bmd_tb_num;
  559. st->codecpar->codec_id = AV_CODEC_ID_DVB_TELETEXT;
  560. avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
  561. ctx->teletext_st = st;
  562. }
  563. av_log(avctx, AV_LOG_VERBOSE, "Using %d input audio channels\n", ctx->audio_st->codecpar->channels);
  564. result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType16bitInteger, ctx->audio_st->codecpar->channels);
  565. if (result != S_OK) {
  566. av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
  567. ret = AVERROR(EIO);
  568. goto error;
  569. }
  570. result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
  571. cctx->v210 ? bmdFormat10BitYUV : bmdFormat8BitYUV,
  572. bmdVideoInputFlagDefault);
  573. if (result != S_OK) {
  574. av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
  575. ret = AVERROR(EIO);
  576. goto error;
  577. }
  578. avpacket_queue_init (avctx, &ctx->queue);
  579. if (decklink_start_input (avctx) != S_OK) {
  580. av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
  581. ret = AVERROR(EIO);
  582. goto error;
  583. }
  584. return 0;
  585. error:
  586. ff_decklink_cleanup(avctx);
  587. return ret;
  588. }
  589. int ff_decklink_read_packet(AVFormatContext *avctx, AVPacket *pkt)
  590. {
  591. struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
  592. struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
  593. AVFrame *frame = ctx->video_st->codec->coded_frame;
  594. avpacket_queue_get(&ctx->queue, pkt, 1);
  595. if (frame && (ctx->bmd_field_dominance == bmdUpperFieldFirst || ctx->bmd_field_dominance == bmdLowerFieldFirst)) {
  596. frame->interlaced_frame = 1;
  597. if (ctx->bmd_field_dominance == bmdUpperFieldFirst) {
  598. frame->top_field_first = 1;
  599. }
  600. }
  601. return 0;
  602. }
  603. } /* extern "C" */