You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1192 lines
43KB

  1. /*
  2. * ffmpeg filter configuration
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <stdint.h>
  21. #include "ffmpeg.h"
  22. #include "libavfilter/avfilter.h"
  23. #include "libavfilter/buffersink.h"
  24. #include "libavfilter/buffersrc.h"
  25. #include "libavresample/avresample.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/avstring.h"
  28. #include "libavutil/bprint.h"
  29. #include "libavutil/channel_layout.h"
  30. #include "libavutil/display.h"
  31. #include "libavutil/opt.h"
  32. #include "libavutil/pixdesc.h"
  33. #include "libavutil/pixfmt.h"
  34. #include "libavutil/imgutils.h"
  35. #include "libavutil/samplefmt.h"
  36. static const enum AVPixelFormat *get_compliance_unofficial_pix_fmts(enum AVCodecID codec_id, const enum AVPixelFormat default_formats[])
  37. {
  38. static const enum AVPixelFormat mjpeg_formats[] =
  39. { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
  40. AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
  41. AV_PIX_FMT_NONE };
  42. static const enum AVPixelFormat ljpeg_formats[] =
  43. { AV_PIX_FMT_BGR24 , AV_PIX_FMT_BGRA , AV_PIX_FMT_BGR0,
  44. AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P,
  45. AV_PIX_FMT_YUV420P , AV_PIX_FMT_YUV444P , AV_PIX_FMT_YUV422P,
  46. AV_PIX_FMT_NONE};
  47. if (codec_id == AV_CODEC_ID_MJPEG) {
  48. return mjpeg_formats;
  49. } else if (codec_id == AV_CODEC_ID_LJPEG) {
  50. return ljpeg_formats;
  51. } else {
  52. return default_formats;
  53. }
  54. }
  55. enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx,
  56. const AVCodec *codec, enum AVPixelFormat target)
  57. {
  58. if (codec && codec->pix_fmts) {
  59. const enum AVPixelFormat *p = codec->pix_fmts;
  60. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
  61. //FIXME: This should check for AV_PIX_FMT_FLAG_ALPHA after PAL8 pixel format without alpha is implemented
  62. int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
  63. enum AVPixelFormat best= AV_PIX_FMT_NONE;
  64. if (enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
  65. p = get_compliance_unofficial_pix_fmts(enc_ctx->codec_id, p);
  66. }
  67. for (; *p != AV_PIX_FMT_NONE; p++) {
  68. best= avcodec_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
  69. if (*p == target)
  70. break;
  71. }
  72. if (*p == AV_PIX_FMT_NONE) {
  73. if (target != AV_PIX_FMT_NONE)
  74. av_log(NULL, AV_LOG_WARNING,
  75. "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
  76. av_get_pix_fmt_name(target),
  77. codec->name,
  78. av_get_pix_fmt_name(best));
  79. return best;
  80. }
  81. }
  82. return target;
  83. }
  84. static char *choose_pix_fmts(OutputFilter *ofilter)
  85. {
  86. OutputStream *ost = ofilter->ost;
  87. AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
  88. if (strict_dict)
  89. // used by choose_pixel_fmt() and below
  90. av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
  91. if (ost->keep_pix_fmt) {
  92. avfilter_graph_set_auto_convert(ofilter->graph->graph,
  93. AVFILTER_AUTO_CONVERT_NONE);
  94. if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
  95. return NULL;
  96. return av_strdup(av_get_pix_fmt_name(ost->enc_ctx->pix_fmt));
  97. }
  98. if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
  99. return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
  100. } else if (ost->enc && ost->enc->pix_fmts) {
  101. const enum AVPixelFormat *p;
  102. AVIOContext *s = NULL;
  103. uint8_t *ret;
  104. int len;
  105. if (avio_open_dyn_buf(&s) < 0)
  106. exit_program(1);
  107. p = ost->enc->pix_fmts;
  108. if (ost->enc_ctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
  109. p = get_compliance_unofficial_pix_fmts(ost->enc_ctx->codec_id, p);
  110. }
  111. for (; *p != AV_PIX_FMT_NONE; p++) {
  112. const char *name = av_get_pix_fmt_name(*p);
  113. avio_printf(s, "%s|", name);
  114. }
  115. len = avio_close_dyn_buf(s, &ret);
  116. ret[len - 1] = 0;
  117. return ret;
  118. } else
  119. return NULL;
  120. }
  121. /* Define a function for building a string containing a list of
  122. * allowed formats. */
  123. #define DEF_CHOOSE_FORMAT(suffix, type, var, supported_list, none, get_name) \
  124. static char *choose_ ## suffix (OutputFilter *ofilter) \
  125. { \
  126. if (ofilter->var != none) { \
  127. get_name(ofilter->var); \
  128. return av_strdup(name); \
  129. } else if (ofilter->supported_list) { \
  130. const type *p; \
  131. AVIOContext *s = NULL; \
  132. uint8_t *ret; \
  133. int len; \
  134. \
  135. if (avio_open_dyn_buf(&s) < 0) \
  136. exit_program(1); \
  137. \
  138. for (p = ofilter->supported_list; *p != none; p++) { \
  139. get_name(*p); \
  140. avio_printf(s, "%s|", name); \
  141. } \
  142. len = avio_close_dyn_buf(s, &ret); \
  143. ret[len - 1] = 0; \
  144. return ret; \
  145. } else \
  146. return NULL; \
  147. }
  148. //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
  149. // GET_PIX_FMT_NAME)
  150. DEF_CHOOSE_FORMAT(sample_fmts, enum AVSampleFormat, format, formats,
  151. AV_SAMPLE_FMT_NONE, GET_SAMPLE_FMT_NAME)
  152. DEF_CHOOSE_FORMAT(sample_rates, int, sample_rate, sample_rates, 0,
  153. GET_SAMPLE_RATE_NAME)
  154. DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
  155. GET_CH_LAYOUT_NAME)
  156. int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
  157. {
  158. FilterGraph *fg = av_mallocz(sizeof(*fg));
  159. if (!fg)
  160. exit_program(1);
  161. fg->index = nb_filtergraphs;
  162. GROW_ARRAY(fg->outputs, fg->nb_outputs);
  163. if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
  164. exit_program(1);
  165. fg->outputs[0]->ost = ost;
  166. fg->outputs[0]->graph = fg;
  167. fg->outputs[0]->format = -1;
  168. ost->filter = fg->outputs[0];
  169. GROW_ARRAY(fg->inputs, fg->nb_inputs);
  170. if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
  171. exit_program(1);
  172. fg->inputs[0]->ist = ist;
  173. fg->inputs[0]->graph = fg;
  174. fg->inputs[0]->format = -1;
  175. fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
  176. if (!fg->inputs[0]->frame_queue)
  177. exit_program(1);
  178. GROW_ARRAY(ist->filters, ist->nb_filters);
  179. ist->filters[ist->nb_filters - 1] = fg->inputs[0];
  180. GROW_ARRAY(filtergraphs, nb_filtergraphs);
  181. filtergraphs[nb_filtergraphs - 1] = fg;
  182. return 0;
  183. }
  184. static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
  185. {
  186. AVFilterContext *ctx = inout->filter_ctx;
  187. AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
  188. int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
  189. AVIOContext *pb;
  190. uint8_t *res = NULL;
  191. if (avio_open_dyn_buf(&pb) < 0)
  192. exit_program(1);
  193. avio_printf(pb, "%s", ctx->filter->name);
  194. if (nb_pads > 1)
  195. avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));
  196. avio_w8(pb, 0);
  197. avio_close_dyn_buf(pb, &res);
  198. return res;
  199. }
  200. static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
  201. {
  202. InputStream *ist = NULL;
  203. enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
  204. int i;
  205. // TODO: support other filter types
  206. if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
  207. av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
  208. "currently.\n");
  209. exit_program(1);
  210. }
  211. if (in->name) {
  212. AVFormatContext *s;
  213. AVStream *st = NULL;
  214. char *p;
  215. int file_idx = strtol(in->name, &p, 0);
  216. if (file_idx < 0 || file_idx >= nb_input_files) {
  217. av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
  218. file_idx, fg->graph_desc);
  219. exit_program(1);
  220. }
  221. s = input_files[file_idx]->ctx;
  222. for (i = 0; i < s->nb_streams; i++) {
  223. enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
  224. if (stream_type != type &&
  225. !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
  226. type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
  227. continue;
  228. if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
  229. st = s->streams[i];
  230. break;
  231. }
  232. }
  233. if (!st) {
  234. av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
  235. "matches no streams.\n", p, fg->graph_desc);
  236. exit_program(1);
  237. }
  238. ist = input_streams[input_files[file_idx]->ist_index + st->index];
  239. if (ist->user_set_discard == AVDISCARD_ALL) {
  240. av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
  241. "matches a disabled input stream.\n", p, fg->graph_desc);
  242. exit_program(1);
  243. }
  244. } else {
  245. /* find the first unused stream of corresponding type */
  246. for (i = 0; i < nb_input_streams; i++) {
  247. ist = input_streams[i];
  248. if (ist->user_set_discard == AVDISCARD_ALL)
  249. continue;
  250. if (ist->dec_ctx->codec_type == type && ist->discard)
  251. break;
  252. }
  253. if (i == nb_input_streams) {
  254. av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
  255. "unlabeled input pad %d on filter %s\n", in->pad_idx,
  256. in->filter_ctx->name);
  257. exit_program(1);
  258. }
  259. }
  260. av_assert0(ist);
  261. ist->discard = 0;
  262. ist->decoding_needed |= DECODING_FOR_FILTER;
  263. ist->st->discard = AVDISCARD_NONE;
  264. GROW_ARRAY(fg->inputs, fg->nb_inputs);
  265. if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
  266. exit_program(1);
  267. fg->inputs[fg->nb_inputs - 1]->ist = ist;
  268. fg->inputs[fg->nb_inputs - 1]->graph = fg;
  269. fg->inputs[fg->nb_inputs - 1]->format = -1;
  270. fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
  271. fg->inputs[fg->nb_inputs - 1]->name = describe_filter_link(fg, in, 1);
  272. fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
  273. if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
  274. exit_program(1);
  275. GROW_ARRAY(ist->filters, ist->nb_filters);
  276. ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
  277. }
  278. int init_complex_filtergraph(FilterGraph *fg)
  279. {
  280. AVFilterInOut *inputs, *outputs, *cur;
  281. AVFilterGraph *graph;
  282. int ret = 0;
  283. /* this graph is only used for determining the kinds of inputs
  284. * and outputs we have, and is discarded on exit from this function */
  285. graph = avfilter_graph_alloc();
  286. if (!graph)
  287. return AVERROR(ENOMEM);
  288. graph->nb_threads = 1;
  289. ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
  290. if (ret < 0)
  291. goto fail;
  292. for (cur = inputs; cur; cur = cur->next)
  293. init_input_filter(fg, cur);
  294. for (cur = outputs; cur;) {
  295. GROW_ARRAY(fg->outputs, fg->nb_outputs);
  296. fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]));
  297. if (!fg->outputs[fg->nb_outputs - 1])
  298. exit_program(1);
  299. fg->outputs[fg->nb_outputs - 1]->graph = fg;
  300. fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
  301. fg->outputs[fg->nb_outputs - 1]->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
  302. cur->pad_idx);
  303. fg->outputs[fg->nb_outputs - 1]->name = describe_filter_link(fg, cur, 0);
  304. cur = cur->next;
  305. fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
  306. }
  307. fail:
  308. avfilter_inout_free(&inputs);
  309. avfilter_graph_free(&graph);
  310. return ret;
  311. }
  312. static int insert_trim(int64_t start_time, int64_t duration,
  313. AVFilterContext **last_filter, int *pad_idx,
  314. const char *filter_name)
  315. {
  316. AVFilterGraph *graph = (*last_filter)->graph;
  317. AVFilterContext *ctx;
  318. const AVFilter *trim;
  319. enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
  320. const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
  321. int ret = 0;
  322. if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
  323. return 0;
  324. trim = avfilter_get_by_name(name);
  325. if (!trim) {
  326. av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
  327. "recording time.\n", name);
  328. return AVERROR_FILTER_NOT_FOUND;
  329. }
  330. ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
  331. if (!ctx)
  332. return AVERROR(ENOMEM);
  333. if (duration != INT64_MAX) {
  334. ret = av_opt_set_int(ctx, "durationi", duration,
  335. AV_OPT_SEARCH_CHILDREN);
  336. }
  337. if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
  338. ret = av_opt_set_int(ctx, "starti", start_time,
  339. AV_OPT_SEARCH_CHILDREN);
  340. }
  341. if (ret < 0) {
  342. av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
  343. return ret;
  344. }
  345. ret = avfilter_init_str(ctx, NULL);
  346. if (ret < 0)
  347. return ret;
  348. ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
  349. if (ret < 0)
  350. return ret;
  351. *last_filter = ctx;
  352. *pad_idx = 0;
  353. return 0;
  354. }
  355. static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
  356. const char *filter_name, const char *args)
  357. {
  358. AVFilterGraph *graph = (*last_filter)->graph;
  359. AVFilterContext *ctx;
  360. int ret;
  361. ret = avfilter_graph_create_filter(&ctx,
  362. avfilter_get_by_name(filter_name),
  363. filter_name, args, NULL, graph);
  364. if (ret < 0)
  365. return ret;
  366. ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
  367. if (ret < 0)
  368. return ret;
  369. *last_filter = ctx;
  370. *pad_idx = 0;
  371. return 0;
  372. }
  373. static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
  374. {
  375. char *pix_fmts;
  376. OutputStream *ost = ofilter->ost;
  377. OutputFile *of = output_files[ost->file_index];
  378. AVFilterContext *last_filter = out->filter_ctx;
  379. int pad_idx = out->pad_idx;
  380. int ret;
  381. char name[255];
  382. snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
  383. ret = avfilter_graph_create_filter(&ofilter->filter,
  384. avfilter_get_by_name("buffersink"),
  385. name, NULL, NULL, fg->graph);
  386. if (ret < 0)
  387. return ret;
  388. if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) {
  389. char args[255];
  390. AVFilterContext *filter;
  391. AVDictionaryEntry *e = NULL;
  392. snprintf(args, sizeof(args), "%d:%d",
  393. ofilter->width, ofilter->height);
  394. while ((e = av_dict_get(ost->sws_dict, "", e,
  395. AV_DICT_IGNORE_SUFFIX))) {
  396. av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
  397. }
  398. snprintf(name, sizeof(name), "scaler_out_%d_%d",
  399. ost->file_index, ost->index);
  400. if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
  401. name, args, NULL, fg->graph)) < 0)
  402. return ret;
  403. if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
  404. return ret;
  405. last_filter = filter;
  406. pad_idx = 0;
  407. }
  408. if ((pix_fmts = choose_pix_fmts(ofilter))) {
  409. AVFilterContext *filter;
  410. snprintf(name, sizeof(name), "format_out_%d_%d",
  411. ost->file_index, ost->index);
  412. ret = avfilter_graph_create_filter(&filter,
  413. avfilter_get_by_name("format"),
  414. "format", pix_fmts, NULL, fg->graph);
  415. av_freep(&pix_fmts);
  416. if (ret < 0)
  417. return ret;
  418. if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
  419. return ret;
  420. last_filter = filter;
  421. pad_idx = 0;
  422. }
  423. if (ost->frame_rate.num && 0) {
  424. AVFilterContext *fps;
  425. char args[255];
  426. snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
  427. ost->frame_rate.den);
  428. snprintf(name, sizeof(name), "fps_out_%d_%d",
  429. ost->file_index, ost->index);
  430. ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
  431. name, args, NULL, fg->graph);
  432. if (ret < 0)
  433. return ret;
  434. ret = avfilter_link(last_filter, pad_idx, fps, 0);
  435. if (ret < 0)
  436. return ret;
  437. last_filter = fps;
  438. pad_idx = 0;
  439. }
  440. snprintf(name, sizeof(name), "trim_out_%d_%d",
  441. ost->file_index, ost->index);
  442. ret = insert_trim(of->start_time, of->recording_time,
  443. &last_filter, &pad_idx, name);
  444. if (ret < 0)
  445. return ret;
  446. if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
  447. return ret;
  448. return 0;
  449. }
  450. static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
  451. {
  452. OutputStream *ost = ofilter->ost;
  453. OutputFile *of = output_files[ost->file_index];
  454. AVCodecContext *codec = ost->enc_ctx;
  455. AVFilterContext *last_filter = out->filter_ctx;
  456. int pad_idx = out->pad_idx;
  457. char *sample_fmts, *sample_rates, *channel_layouts;
  458. char name[255];
  459. int ret;
  460. snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
  461. ret = avfilter_graph_create_filter(&ofilter->filter,
  462. avfilter_get_by_name("abuffersink"),
  463. name, NULL, NULL, fg->graph);
  464. if (ret < 0)
  465. return ret;
  466. if ((ret = av_opt_set_int(ofilter->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
  467. return ret;
  468. #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
  469. AVFilterContext *filt_ctx; \
  470. \
  471. av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
  472. "similarly to -af " filter_name "=%s.\n", arg); \
  473. \
  474. ret = avfilter_graph_create_filter(&filt_ctx, \
  475. avfilter_get_by_name(filter_name), \
  476. filter_name, arg, NULL, fg->graph); \
  477. if (ret < 0) \
  478. return ret; \
  479. \
  480. ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
  481. if (ret < 0) \
  482. return ret; \
  483. \
  484. last_filter = filt_ctx; \
  485. pad_idx = 0; \
  486. } while (0)
  487. if (ost->audio_channels_mapped) {
  488. int i;
  489. AVBPrint pan_buf;
  490. av_bprint_init(&pan_buf, 256, 8192);
  491. av_bprintf(&pan_buf, "0x%"PRIx64,
  492. av_get_default_channel_layout(ost->audio_channels_mapped));
  493. for (i = 0; i < ost->audio_channels_mapped; i++)
  494. if (ost->audio_channels_map[i] != -1)
  495. av_bprintf(&pan_buf, "|c%d=c%d", i, ost->audio_channels_map[i]);
  496. AUTO_INSERT_FILTER("-map_channel", "pan", pan_buf.str);
  497. av_bprint_finalize(&pan_buf, NULL);
  498. }
  499. if (codec->channels && !codec->channel_layout)
  500. codec->channel_layout = av_get_default_channel_layout(codec->channels);
  501. sample_fmts = choose_sample_fmts(ofilter);
  502. sample_rates = choose_sample_rates(ofilter);
  503. channel_layouts = choose_channel_layouts(ofilter);
  504. if (sample_fmts || sample_rates || channel_layouts) {
  505. AVFilterContext *format;
  506. char args[256];
  507. args[0] = 0;
  508. if (sample_fmts)
  509. av_strlcatf(args, sizeof(args), "sample_fmts=%s:",
  510. sample_fmts);
  511. if (sample_rates)
  512. av_strlcatf(args, sizeof(args), "sample_rates=%s:",
  513. sample_rates);
  514. if (channel_layouts)
  515. av_strlcatf(args, sizeof(args), "channel_layouts=%s:",
  516. channel_layouts);
  517. av_freep(&sample_fmts);
  518. av_freep(&sample_rates);
  519. av_freep(&channel_layouts);
  520. snprintf(name, sizeof(name), "format_out_%d_%d",
  521. ost->file_index, ost->index);
  522. ret = avfilter_graph_create_filter(&format,
  523. avfilter_get_by_name("aformat"),
  524. name, args, NULL, fg->graph);
  525. if (ret < 0)
  526. return ret;
  527. ret = avfilter_link(last_filter, pad_idx, format, 0);
  528. if (ret < 0)
  529. return ret;
  530. last_filter = format;
  531. pad_idx = 0;
  532. }
  533. if (ost->apad && of->shortest) {
  534. char args[256];
  535. int i;
  536. for (i=0; i<of->ctx->nb_streams; i++)
  537. if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  538. break;
  539. if (i<of->ctx->nb_streams) {
  540. snprintf(args, sizeof(args), "%s", ost->apad);
  541. AUTO_INSERT_FILTER("-apad", "apad", args);
  542. }
  543. }
  544. snprintf(name, sizeof(name), "trim for output stream %d:%d",
  545. ost->file_index, ost->index);
  546. ret = insert_trim(of->start_time, of->recording_time,
  547. &last_filter, &pad_idx, name);
  548. if (ret < 0)
  549. return ret;
  550. if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
  551. return ret;
  552. return 0;
  553. }
  554. int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
  555. {
  556. if (!ofilter->ost) {
  557. av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
  558. exit_program(1);
  559. }
  560. switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
  561. case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
  562. case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
  563. default: av_assert0(0);
  564. }
  565. }
  566. void check_filter_outputs(void)
  567. {
  568. int i;
  569. for (i = 0; i < nb_filtergraphs; i++) {
  570. int n;
  571. for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
  572. OutputFilter *output = filtergraphs[i]->outputs[n];
  573. if (!output->ost) {
  574. av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", output->name);
  575. exit_program(1);
  576. }
  577. }
  578. }
  579. }
  580. static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
  581. {
  582. AVFormatContext *avf = input_files[ist->file_index]->ctx;
  583. int i, w, h;
  584. /* Compute the size of the canvas for the subtitles stream.
  585. If the subtitles codecpar has set a size, use it. Otherwise use the
  586. maximum dimensions of the video streams in the same file. */
  587. w = ifilter->width;
  588. h = ifilter->height;
  589. if (!(w && h)) {
  590. for (i = 0; i < avf->nb_streams; i++) {
  591. if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  592. w = FFMAX(w, avf->streams[i]->codecpar->width);
  593. h = FFMAX(h, avf->streams[i]->codecpar->height);
  594. }
  595. }
  596. if (!(w && h)) {
  597. w = FFMAX(w, 720);
  598. h = FFMAX(h, 576);
  599. }
  600. av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
  601. }
  602. ist->sub2video.w = ifilter->width = w;
  603. ist->sub2video.h = ifilter->height = h;
  604. ifilter->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
  605. ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
  606. /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
  607. palettes for all rectangles are identical or compatible */
  608. ifilter->format = AV_PIX_FMT_RGB32;
  609. ist->sub2video.frame = av_frame_alloc();
  610. if (!ist->sub2video.frame)
  611. return AVERROR(ENOMEM);
  612. ist->sub2video.last_pts = INT64_MIN;
  613. ist->sub2video.end_pts = INT64_MIN;
  614. /* sub2video structure has been (re-)initialized.
  615. Mark it as such so that the system will be
  616. initialized with the first received heartbeat. */
  617. ist->sub2video.initialize = 1;
  618. return 0;
  619. }
  620. static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
  621. AVFilterInOut *in)
  622. {
  623. AVFilterContext *last_filter;
  624. const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
  625. InputStream *ist = ifilter->ist;
  626. InputFile *f = input_files[ist->file_index];
  627. AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
  628. ist->st->time_base;
  629. AVRational fr = ist->framerate;
  630. AVRational sar;
  631. AVBPrint args;
  632. char name[255];
  633. int ret, pad_idx = 0;
  634. int64_t tsoffset = 0;
  635. AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
  636. if (!par)
  637. return AVERROR(ENOMEM);
  638. memset(par, 0, sizeof(*par));
  639. par->format = AV_PIX_FMT_NONE;
  640. if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
  641. av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
  642. ret = AVERROR(EINVAL);
  643. goto fail;
  644. }
  645. if (!fr.num)
  646. fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
  647. if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
  648. ret = sub2video_prepare(ist, ifilter);
  649. if (ret < 0)
  650. goto fail;
  651. }
  652. sar = ifilter->sample_aspect_ratio;
  653. if(!sar.den)
  654. sar = (AVRational){0,1};
  655. av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
  656. av_bprintf(&args,
  657. "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
  658. "pixel_aspect=%d/%d",
  659. ifilter->width, ifilter->height, ifilter->format,
  660. tb.num, tb.den, sar.num, sar.den);
  661. if (fr.num && fr.den)
  662. av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
  663. snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
  664. ist->file_index, ist->st->index);
  665. if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
  666. args.str, NULL, fg->graph)) < 0)
  667. goto fail;
  668. par->hw_frames_ctx = ifilter->hw_frames_ctx;
  669. ret = av_buffersrc_parameters_set(ifilter->filter, par);
  670. if (ret < 0)
  671. goto fail;
  672. av_freep(&par);
  673. last_filter = ifilter->filter;
  674. if (ist->autorotate) {
  675. double theta = get_rotation(ist->st);
  676. if (fabs(theta - 90) < 1.0) {
  677. ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
  678. } else if (fabs(theta - 180) < 1.0) {
  679. ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
  680. if (ret < 0)
  681. return ret;
  682. ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
  683. } else if (fabs(theta - 270) < 1.0) {
  684. ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
  685. } else if (fabs(theta) > 1.0) {
  686. char rotate_buf[64];
  687. snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
  688. ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
  689. }
  690. if (ret < 0)
  691. return ret;
  692. }
  693. if (do_deinterlace) {
  694. AVFilterContext *yadif;
  695. snprintf(name, sizeof(name), "deinterlace_in_%d_%d",
  696. ist->file_index, ist->st->index);
  697. if ((ret = avfilter_graph_create_filter(&yadif,
  698. avfilter_get_by_name("yadif"),
  699. name, "", NULL,
  700. fg->graph)) < 0)
  701. return ret;
  702. if ((ret = avfilter_link(last_filter, 0, yadif, 0)) < 0)
  703. return ret;
  704. last_filter = yadif;
  705. }
  706. snprintf(name, sizeof(name), "trim_in_%d_%d",
  707. ist->file_index, ist->st->index);
  708. if (copy_ts) {
  709. tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
  710. if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
  711. tsoffset += f->ctx->start_time;
  712. }
  713. ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
  714. AV_NOPTS_VALUE : tsoffset, f->recording_time,
  715. &last_filter, &pad_idx, name);
  716. if (ret < 0)
  717. return ret;
  718. if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
  719. return ret;
  720. return 0;
  721. fail:
  722. av_freep(&par);
  723. return ret;
  724. }
  725. static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
  726. AVFilterInOut *in)
  727. {
  728. AVFilterContext *last_filter;
  729. const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
  730. InputStream *ist = ifilter->ist;
  731. InputFile *f = input_files[ist->file_index];
  732. AVBPrint args;
  733. char name[255];
  734. int ret, pad_idx = 0;
  735. int64_t tsoffset = 0;
  736. if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
  737. av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
  738. return AVERROR(EINVAL);
  739. }
  740. av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
  741. av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
  742. 1, ifilter->sample_rate,
  743. ifilter->sample_rate,
  744. av_get_sample_fmt_name(ifilter->format));
  745. if (ifilter->channel_layout)
  746. av_bprintf(&args, ":channel_layout=0x%"PRIx64,
  747. ifilter->channel_layout);
  748. else
  749. av_bprintf(&args, ":channels=%d", ifilter->channels);
  750. snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
  751. ist->file_index, ist->st->index);
  752. if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
  753. name, args.str, NULL,
  754. fg->graph)) < 0)
  755. return ret;
  756. last_filter = ifilter->filter;
  757. #define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
  758. AVFilterContext *filt_ctx; \
  759. \
  760. av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
  761. "similarly to -af " filter_name "=%s.\n", arg); \
  762. \
  763. snprintf(name, sizeof(name), "graph_%d_%s_in_%d_%d", \
  764. fg->index, filter_name, ist->file_index, ist->st->index); \
  765. ret = avfilter_graph_create_filter(&filt_ctx, \
  766. avfilter_get_by_name(filter_name), \
  767. name, arg, NULL, fg->graph); \
  768. if (ret < 0) \
  769. return ret; \
  770. \
  771. ret = avfilter_link(last_filter, 0, filt_ctx, 0); \
  772. if (ret < 0) \
  773. return ret; \
  774. \
  775. last_filter = filt_ctx; \
  776. } while (0)
  777. if (audio_sync_method > 0) {
  778. char args[256] = {0};
  779. av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
  780. if (audio_drift_threshold != 0.1)
  781. av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
  782. if (!fg->reconfiguration)
  783. av_strlcatf(args, sizeof(args), ":first_pts=0");
  784. AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
  785. }
  786. // if (ost->audio_channels_mapped) {
  787. // int i;
  788. // AVBPrint pan_buf;
  789. // av_bprint_init(&pan_buf, 256, 8192);
  790. // av_bprintf(&pan_buf, "0x%"PRIx64,
  791. // av_get_default_channel_layout(ost->audio_channels_mapped));
  792. // for (i = 0; i < ost->audio_channels_mapped; i++)
  793. // if (ost->audio_channels_map[i] != -1)
  794. // av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
  795. // AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
  796. // av_bprint_finalize(&pan_buf, NULL);
  797. // }
  798. if (audio_volume != 256) {
  799. char args[256];
  800. av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
  801. "audio filter instead.\n");
  802. snprintf(args, sizeof(args), "%f", audio_volume / 256.);
  803. AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
  804. }
  805. snprintf(name, sizeof(name), "trim for input stream %d:%d",
  806. ist->file_index, ist->st->index);
  807. if (copy_ts) {
  808. tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
  809. if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
  810. tsoffset += f->ctx->start_time;
  811. }
  812. ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
  813. AV_NOPTS_VALUE : tsoffset, f->recording_time,
  814. &last_filter, &pad_idx, name);
  815. if (ret < 0)
  816. return ret;
  817. if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
  818. return ret;
  819. return 0;
  820. }
  821. static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
  822. AVFilterInOut *in)
  823. {
  824. if (!ifilter->ist->dec) {
  825. av_log(NULL, AV_LOG_ERROR,
  826. "No decoder for stream #%d:%d, filtering impossible\n",
  827. ifilter->ist->file_index, ifilter->ist->st->index);
  828. return AVERROR_DECODER_NOT_FOUND;
  829. }
  830. switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
  831. case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
  832. case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
  833. default: av_assert0(0);
  834. }
  835. }
  836. static void cleanup_filtergraph(FilterGraph *fg)
  837. {
  838. int i;
  839. for (i = 0; i < fg->nb_outputs; i++)
  840. fg->outputs[i]->filter = (AVFilterContext *)NULL;
  841. for (i = 0; i < fg->nb_inputs; i++)
  842. fg->inputs[i]->filter = (AVFilterContext *)NULL;
  843. avfilter_graph_free(&fg->graph);
  844. }
  845. int configure_filtergraph(FilterGraph *fg)
  846. {
  847. AVFilterInOut *inputs, *outputs, *cur;
  848. int ret, i, simple = filtergraph_is_simple(fg);
  849. const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
  850. fg->graph_desc;
  851. cleanup_filtergraph(fg);
  852. if (!(fg->graph = avfilter_graph_alloc()))
  853. return AVERROR(ENOMEM);
  854. if (simple) {
  855. OutputStream *ost = fg->outputs[0]->ost;
  856. char args[512];
  857. AVDictionaryEntry *e = NULL;
  858. fg->graph->nb_threads = filter_nbthreads;
  859. args[0] = 0;
  860. while ((e = av_dict_get(ost->sws_dict, "", e,
  861. AV_DICT_IGNORE_SUFFIX))) {
  862. av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
  863. }
  864. if (strlen(args))
  865. args[strlen(args)-1] = 0;
  866. fg->graph->scale_sws_opts = av_strdup(args);
  867. args[0] = 0;
  868. while ((e = av_dict_get(ost->swr_opts, "", e,
  869. AV_DICT_IGNORE_SUFFIX))) {
  870. av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
  871. }
  872. if (strlen(args))
  873. args[strlen(args)-1] = 0;
  874. av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
  875. args[0] = '\0';
  876. while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, "", e,
  877. AV_DICT_IGNORE_SUFFIX))) {
  878. av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
  879. }
  880. if (strlen(args))
  881. args[strlen(args) - 1] = '\0';
  882. e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
  883. if (e)
  884. av_opt_set(fg->graph, "threads", e->value, 0);
  885. } else {
  886. fg->graph->nb_threads = filter_complex_nbthreads;
  887. }
  888. if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
  889. goto fail;
  890. ret = hw_device_setup_for_filter(fg);
  891. if (ret < 0)
  892. goto fail;
  893. if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
  894. const char *num_inputs;
  895. const char *num_outputs;
  896. if (!outputs) {
  897. num_outputs = "0";
  898. } else if (outputs->next) {
  899. num_outputs = ">1";
  900. } else {
  901. num_outputs = "1";
  902. }
  903. if (!inputs) {
  904. num_inputs = "0";
  905. } else if (inputs->next) {
  906. num_inputs = ">1";
  907. } else {
  908. num_inputs = "1";
  909. }
  910. av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
  911. "to have exactly 1 input and 1 output."
  912. " However, it had %s input(s) and %s output(s)."
  913. " Please adjust, or use a complex filtergraph (-filter_complex) instead.\n",
  914. graph_desc, num_inputs, num_outputs);
  915. ret = AVERROR(EINVAL);
  916. goto fail;
  917. }
  918. for (cur = inputs, i = 0; cur; cur = cur->next, i++)
  919. if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
  920. avfilter_inout_free(&inputs);
  921. avfilter_inout_free(&outputs);
  922. goto fail;
  923. }
  924. avfilter_inout_free(&inputs);
  925. for (cur = outputs, i = 0; cur; cur = cur->next, i++)
  926. configure_output_filter(fg, fg->outputs[i], cur);
  927. avfilter_inout_free(&outputs);
  928. if (!auto_conversion_filters)
  929. avfilter_graph_set_auto_convert(fg->graph, AVFILTER_AUTO_CONVERT_NONE);
  930. if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
  931. goto fail;
  932. /* limit the lists of allowed formats to the ones selected, to
  933. * make sure they stay the same if the filtergraph is reconfigured later */
  934. for (i = 0; i < fg->nb_outputs; i++) {
  935. OutputFilter *ofilter = fg->outputs[i];
  936. AVFilterContext *sink = ofilter->filter;
  937. ofilter->format = av_buffersink_get_format(sink);
  938. ofilter->width = av_buffersink_get_w(sink);
  939. ofilter->height = av_buffersink_get_h(sink);
  940. ofilter->sample_rate = av_buffersink_get_sample_rate(sink);
  941. ofilter->channel_layout = av_buffersink_get_channel_layout(sink);
  942. }
  943. fg->reconfiguration = 1;
  944. for (i = 0; i < fg->nb_outputs; i++) {
  945. OutputStream *ost = fg->outputs[i]->ost;
  946. if (!ost->enc) {
  947. /* identical to the same check in ffmpeg.c, needed because
  948. complex filter graphs are initialized earlier */
  949. av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
  950. avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
  951. ret = AVERROR(EINVAL);
  952. goto fail;
  953. }
  954. if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
  955. !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
  956. av_buffersink_set_frame_size(ost->filter->filter,
  957. ost->enc_ctx->frame_size);
  958. }
  959. for (i = 0; i < fg->nb_inputs; i++) {
  960. while (av_fifo_size(fg->inputs[i]->frame_queue)) {
  961. AVFrame *tmp;
  962. av_fifo_generic_read(fg->inputs[i]->frame_queue, &tmp, sizeof(tmp), NULL);
  963. ret = av_buffersrc_add_frame(fg->inputs[i]->filter, tmp);
  964. av_frame_free(&tmp);
  965. if (ret < 0)
  966. goto fail;
  967. }
  968. }
  969. /* send the EOFs for the finished inputs */
  970. for (i = 0; i < fg->nb_inputs; i++) {
  971. if (fg->inputs[i]->eof) {
  972. ret = av_buffersrc_add_frame(fg->inputs[i]->filter, NULL);
  973. if (ret < 0)
  974. goto fail;
  975. }
  976. }
  977. /* process queued up subtitle packets */
  978. for (i = 0; i < fg->nb_inputs; i++) {
  979. InputStream *ist = fg->inputs[i]->ist;
  980. if (ist->sub2video.sub_queue && ist->sub2video.frame) {
  981. while (av_fifo_size(ist->sub2video.sub_queue)) {
  982. AVSubtitle tmp;
  983. av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
  984. sub2video_update(ist, INT64_MIN, &tmp);
  985. avsubtitle_free(&tmp);
  986. }
  987. }
  988. }
  989. return 0;
  990. fail:
  991. cleanup_filtergraph(fg);
  992. return ret;
  993. }
  994. int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
  995. {
  996. av_buffer_unref(&ifilter->hw_frames_ctx);
  997. ifilter->format = frame->format;
  998. ifilter->width = frame->width;
  999. ifilter->height = frame->height;
  1000. ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
  1001. ifilter->sample_rate = frame->sample_rate;
  1002. ifilter->channels = frame->channels;
  1003. ifilter->channel_layout = frame->channel_layout;
  1004. if (frame->hw_frames_ctx) {
  1005. ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
  1006. if (!ifilter->hw_frames_ctx)
  1007. return AVERROR(ENOMEM);
  1008. }
  1009. return 0;
  1010. }
  1011. int ist_in_filtergraph(FilterGraph *fg, InputStream *ist)
  1012. {
  1013. int i;
  1014. for (i = 0; i < fg->nb_inputs; i++)
  1015. if (fg->inputs[i]->ist == ist)
  1016. return 1;
  1017. return 0;
  1018. }
  1019. int filtergraph_is_simple(FilterGraph *fg)
  1020. {
  1021. return !fg->graph_desc;
  1022. }