You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1158 lines
42KB

  1. /*
  2. * ffmpeg filter configuration
  3. *
  4. * This file is part of FFmpeg.
  5. *
  6. * FFmpeg is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * FFmpeg is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with FFmpeg; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  19. */
  20. #include <stdint.h>
  21. #include "ffmpeg.h"
  22. #include "libavfilter/avfilter.h"
  23. #include "libavfilter/buffersink.h"
  24. #include "libavfilter/buffersrc.h"
  25. #include "libavresample/avresample.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/avstring.h"
  28. #include "libavutil/bprint.h"
  29. #include "libavutil/channel_layout.h"
  30. #include "libavutil/display.h"
  31. #include "libavutil/opt.h"
  32. #include "libavutil/pixdesc.h"
  33. #include "libavutil/pixfmt.h"
  34. #include "libavutil/imgutils.h"
  35. #include "libavutil/samplefmt.h"
  36. // FIXME: YUV420P etc. are actually supported with full color range,
  37. // yet the latter information isn't available here.
  38. static const enum AVPixelFormat *get_compliance_normal_pix_fmts(const AVCodec *codec, const enum AVPixelFormat default_formats[])
  39. {
  40. static const enum AVPixelFormat mjpeg_formats[] =
  41. { AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P,
  42. AV_PIX_FMT_NONE };
  43. if (!strcmp(codec->name, "mjpeg")) {
  44. return mjpeg_formats;
  45. } else {
  46. return default_formats;
  47. }
  48. }
  49. static enum AVPixelFormat choose_pixel_fmt(AVStream *st, AVCodecContext *enc_ctx,
  50. const AVCodec *codec, enum AVPixelFormat target)
  51. {
  52. if (codec && codec->pix_fmts) {
  53. const enum AVPixelFormat *p = codec->pix_fmts;
  54. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(target);
  55. //FIXME: This should check for AV_PIX_FMT_FLAG_ALPHA after PAL8 pixel format without alpha is implemented
  56. int has_alpha = desc ? desc->nb_components % 2 == 0 : 0;
  57. enum AVPixelFormat best= AV_PIX_FMT_NONE;
  58. if (enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
  59. p = get_compliance_normal_pix_fmts(codec, p);
  60. }
  61. for (; *p != AV_PIX_FMT_NONE; p++) {
  62. best = av_find_best_pix_fmt_of_2(best, *p, target, has_alpha, NULL);
  63. if (*p == target)
  64. break;
  65. }
  66. if (*p == AV_PIX_FMT_NONE) {
  67. if (target != AV_PIX_FMT_NONE)
  68. av_log(NULL, AV_LOG_WARNING,
  69. "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
  70. av_get_pix_fmt_name(target),
  71. codec->name,
  72. av_get_pix_fmt_name(best));
  73. return best;
  74. }
  75. }
  76. return target;
  77. }
  78. static char *choose_pix_fmts(OutputFilter *ofilter)
  79. {
  80. OutputStream *ost = ofilter->ost;
  81. AVDictionaryEntry *strict_dict = av_dict_get(ost->encoder_opts, "strict", NULL, 0);
  82. if (strict_dict)
  83. // used by choose_pixel_fmt() and below
  84. av_opt_set(ost->enc_ctx, "strict", strict_dict->value, 0);
  85. if (ost->keep_pix_fmt) {
  86. avfilter_graph_set_auto_convert(ofilter->graph->graph,
  87. AVFILTER_AUTO_CONVERT_NONE);
  88. if (ost->enc_ctx->pix_fmt == AV_PIX_FMT_NONE)
  89. return NULL;
  90. return av_strdup(av_get_pix_fmt_name(ost->enc_ctx->pix_fmt));
  91. }
  92. if (ost->enc_ctx->pix_fmt != AV_PIX_FMT_NONE) {
  93. return av_strdup(av_get_pix_fmt_name(choose_pixel_fmt(ost->st, ost->enc_ctx, ost->enc, ost->enc_ctx->pix_fmt)));
  94. } else if (ost->enc && ost->enc->pix_fmts) {
  95. const enum AVPixelFormat *p;
  96. AVIOContext *s = NULL;
  97. uint8_t *ret;
  98. int len;
  99. if (avio_open_dyn_buf(&s) < 0)
  100. exit_program(1);
  101. p = ost->enc->pix_fmts;
  102. if (ost->enc_ctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) {
  103. p = get_compliance_normal_pix_fmts(ost->enc, p);
  104. }
  105. for (; *p != AV_PIX_FMT_NONE; p++) {
  106. const char *name = av_get_pix_fmt_name(*p);
  107. avio_printf(s, "%s|", name);
  108. }
  109. len = avio_close_dyn_buf(s, &ret);
  110. ret[len - 1] = 0;
  111. return ret;
  112. } else
  113. return NULL;
  114. }
  115. /* Define a function for appending a list of allowed formats
  116. * to an AVBPrint. If nonempty, the list will have a header. */
  117. #define DEF_CHOOSE_FORMAT(name, type, var, supported_list, none, printf_format, get_name) \
  118. static void choose_ ## name (OutputFilter *ofilter, AVBPrint *bprint) \
  119. { \
  120. if (ofilter->var == none && !ofilter->supported_list) \
  121. return; \
  122. av_bprintf(bprint, #name "="); \
  123. if (ofilter->var != none) { \
  124. av_bprintf(bprint, printf_format, get_name(ofilter->var)); \
  125. } else { \
  126. const type *p; \
  127. \
  128. for (p = ofilter->supported_list; *p != none; p++) { \
  129. av_bprintf(bprint, printf_format "|", get_name(*p)); \
  130. } \
  131. if (bprint->len > 0) \
  132. bprint->str[--bprint->len] = '\0'; \
  133. } \
  134. av_bprint_chars(bprint, ':', 1); \
  135. }
  136. //DEF_CHOOSE_FORMAT(pix_fmts, enum AVPixelFormat, format, formats, AV_PIX_FMT_NONE,
  137. // GET_PIX_FMT_NAME)
  138. DEF_CHOOSE_FORMAT(sample_fmts, enum AVSampleFormat, format, formats,
  139. AV_SAMPLE_FMT_NONE, "%s", av_get_sample_fmt_name)
  140. DEF_CHOOSE_FORMAT(sample_rates, int, sample_rate, sample_rates, 0,
  141. "%d", )
  142. DEF_CHOOSE_FORMAT(channel_layouts, uint64_t, channel_layout, channel_layouts, 0,
  143. "0x%"PRIx64, )
  144. int init_simple_filtergraph(InputStream *ist, OutputStream *ost)
  145. {
  146. FilterGraph *fg = av_mallocz(sizeof(*fg));
  147. if (!fg)
  148. exit_program(1);
  149. fg->index = nb_filtergraphs;
  150. GROW_ARRAY(fg->outputs, fg->nb_outputs);
  151. if (!(fg->outputs[0] = av_mallocz(sizeof(*fg->outputs[0]))))
  152. exit_program(1);
  153. fg->outputs[0]->ost = ost;
  154. fg->outputs[0]->graph = fg;
  155. fg->outputs[0]->format = -1;
  156. ost->filter = fg->outputs[0];
  157. GROW_ARRAY(fg->inputs, fg->nb_inputs);
  158. if (!(fg->inputs[0] = av_mallocz(sizeof(*fg->inputs[0]))))
  159. exit_program(1);
  160. fg->inputs[0]->ist = ist;
  161. fg->inputs[0]->graph = fg;
  162. fg->inputs[0]->format = -1;
  163. fg->inputs[0]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
  164. if (!fg->inputs[0]->frame_queue)
  165. exit_program(1);
  166. GROW_ARRAY(ist->filters, ist->nb_filters);
  167. ist->filters[ist->nb_filters - 1] = fg->inputs[0];
  168. GROW_ARRAY(filtergraphs, nb_filtergraphs);
  169. filtergraphs[nb_filtergraphs - 1] = fg;
  170. return 0;
  171. }
  172. static char *describe_filter_link(FilterGraph *fg, AVFilterInOut *inout, int in)
  173. {
  174. AVFilterContext *ctx = inout->filter_ctx;
  175. AVFilterPad *pads = in ? ctx->input_pads : ctx->output_pads;
  176. int nb_pads = in ? ctx->nb_inputs : ctx->nb_outputs;
  177. AVIOContext *pb;
  178. uint8_t *res = NULL;
  179. if (avio_open_dyn_buf(&pb) < 0)
  180. exit_program(1);
  181. avio_printf(pb, "%s", ctx->filter->name);
  182. if (nb_pads > 1)
  183. avio_printf(pb, ":%s", avfilter_pad_get_name(pads, inout->pad_idx));
  184. avio_w8(pb, 0);
  185. avio_close_dyn_buf(pb, &res);
  186. return res;
  187. }
  188. static void init_input_filter(FilterGraph *fg, AVFilterInOut *in)
  189. {
  190. InputStream *ist = NULL;
  191. enum AVMediaType type = avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx);
  192. int i;
  193. // TODO: support other filter types
  194. if (type != AVMEDIA_TYPE_VIDEO && type != AVMEDIA_TYPE_AUDIO) {
  195. av_log(NULL, AV_LOG_FATAL, "Only video and audio filters supported "
  196. "currently.\n");
  197. exit_program(1);
  198. }
  199. if (in->name) {
  200. AVFormatContext *s;
  201. AVStream *st = NULL;
  202. char *p;
  203. int file_idx = strtol(in->name, &p, 0);
  204. if (file_idx < 0 || file_idx >= nb_input_files) {
  205. av_log(NULL, AV_LOG_FATAL, "Invalid file index %d in filtergraph description %s.\n",
  206. file_idx, fg->graph_desc);
  207. exit_program(1);
  208. }
  209. s = input_files[file_idx]->ctx;
  210. for (i = 0; i < s->nb_streams; i++) {
  211. enum AVMediaType stream_type = s->streams[i]->codecpar->codec_type;
  212. if (stream_type != type &&
  213. !(stream_type == AVMEDIA_TYPE_SUBTITLE &&
  214. type == AVMEDIA_TYPE_VIDEO /* sub2video hack */))
  215. continue;
  216. if (check_stream_specifier(s, s->streams[i], *p == ':' ? p + 1 : p) == 1) {
  217. st = s->streams[i];
  218. break;
  219. }
  220. }
  221. if (!st) {
  222. av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
  223. "matches no streams.\n", p, fg->graph_desc);
  224. exit_program(1);
  225. }
  226. ist = input_streams[input_files[file_idx]->ist_index + st->index];
  227. if (ist->user_set_discard == AVDISCARD_ALL) {
  228. av_log(NULL, AV_LOG_FATAL, "Stream specifier '%s' in filtergraph description %s "
  229. "matches a disabled input stream.\n", p, fg->graph_desc);
  230. exit_program(1);
  231. }
  232. } else {
  233. /* find the first unused stream of corresponding type */
  234. for (i = 0; i < nb_input_streams; i++) {
  235. ist = input_streams[i];
  236. if (ist->user_set_discard == AVDISCARD_ALL)
  237. continue;
  238. if (ist->dec_ctx->codec_type == type && ist->discard)
  239. break;
  240. }
  241. if (i == nb_input_streams) {
  242. av_log(NULL, AV_LOG_FATAL, "Cannot find a matching stream for "
  243. "unlabeled input pad %d on filter %s\n", in->pad_idx,
  244. in->filter_ctx->name);
  245. exit_program(1);
  246. }
  247. }
  248. av_assert0(ist);
  249. ist->discard = 0;
  250. ist->decoding_needed |= DECODING_FOR_FILTER;
  251. ist->st->discard = AVDISCARD_NONE;
  252. GROW_ARRAY(fg->inputs, fg->nb_inputs);
  253. if (!(fg->inputs[fg->nb_inputs - 1] = av_mallocz(sizeof(*fg->inputs[0]))))
  254. exit_program(1);
  255. fg->inputs[fg->nb_inputs - 1]->ist = ist;
  256. fg->inputs[fg->nb_inputs - 1]->graph = fg;
  257. fg->inputs[fg->nb_inputs - 1]->format = -1;
  258. fg->inputs[fg->nb_inputs - 1]->type = ist->st->codecpar->codec_type;
  259. fg->inputs[fg->nb_inputs - 1]->name = describe_filter_link(fg, in, 1);
  260. fg->inputs[fg->nb_inputs - 1]->frame_queue = av_fifo_alloc(8 * sizeof(AVFrame*));
  261. if (!fg->inputs[fg->nb_inputs - 1]->frame_queue)
  262. exit_program(1);
  263. GROW_ARRAY(ist->filters, ist->nb_filters);
  264. ist->filters[ist->nb_filters - 1] = fg->inputs[fg->nb_inputs - 1];
  265. }
  266. int init_complex_filtergraph(FilterGraph *fg)
  267. {
  268. AVFilterInOut *inputs, *outputs, *cur;
  269. AVFilterGraph *graph;
  270. int ret = 0;
  271. /* this graph is only used for determining the kinds of inputs
  272. * and outputs we have, and is discarded on exit from this function */
  273. graph = avfilter_graph_alloc();
  274. if (!graph)
  275. return AVERROR(ENOMEM);
  276. graph->nb_threads = 1;
  277. ret = avfilter_graph_parse2(graph, fg->graph_desc, &inputs, &outputs);
  278. if (ret < 0)
  279. goto fail;
  280. for (cur = inputs; cur; cur = cur->next)
  281. init_input_filter(fg, cur);
  282. for (cur = outputs; cur;) {
  283. GROW_ARRAY(fg->outputs, fg->nb_outputs);
  284. fg->outputs[fg->nb_outputs - 1] = av_mallocz(sizeof(*fg->outputs[0]));
  285. if (!fg->outputs[fg->nb_outputs - 1])
  286. exit_program(1);
  287. fg->outputs[fg->nb_outputs - 1]->graph = fg;
  288. fg->outputs[fg->nb_outputs - 1]->out_tmp = cur;
  289. fg->outputs[fg->nb_outputs - 1]->type = avfilter_pad_get_type(cur->filter_ctx->output_pads,
  290. cur->pad_idx);
  291. fg->outputs[fg->nb_outputs - 1]->name = describe_filter_link(fg, cur, 0);
  292. cur = cur->next;
  293. fg->outputs[fg->nb_outputs - 1]->out_tmp->next = NULL;
  294. }
  295. fail:
  296. avfilter_inout_free(&inputs);
  297. avfilter_graph_free(&graph);
  298. return ret;
  299. }
  300. static int insert_trim(int64_t start_time, int64_t duration,
  301. AVFilterContext **last_filter, int *pad_idx,
  302. const char *filter_name)
  303. {
  304. AVFilterGraph *graph = (*last_filter)->graph;
  305. AVFilterContext *ctx;
  306. const AVFilter *trim;
  307. enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
  308. const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
  309. int ret = 0;
  310. if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
  311. return 0;
  312. trim = avfilter_get_by_name(name);
  313. if (!trim) {
  314. av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
  315. "recording time.\n", name);
  316. return AVERROR_FILTER_NOT_FOUND;
  317. }
  318. ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
  319. if (!ctx)
  320. return AVERROR(ENOMEM);
  321. if (duration != INT64_MAX) {
  322. ret = av_opt_set_int(ctx, "durationi", duration,
  323. AV_OPT_SEARCH_CHILDREN);
  324. }
  325. if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
  326. ret = av_opt_set_int(ctx, "starti", start_time,
  327. AV_OPT_SEARCH_CHILDREN);
  328. }
  329. if (ret < 0) {
  330. av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
  331. return ret;
  332. }
  333. ret = avfilter_init_str(ctx, NULL);
  334. if (ret < 0)
  335. return ret;
  336. ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
  337. if (ret < 0)
  338. return ret;
  339. *last_filter = ctx;
  340. *pad_idx = 0;
  341. return 0;
  342. }
  343. static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
  344. const char *filter_name, const char *args)
  345. {
  346. AVFilterGraph *graph = (*last_filter)->graph;
  347. AVFilterContext *ctx;
  348. int ret;
  349. ret = avfilter_graph_create_filter(&ctx,
  350. avfilter_get_by_name(filter_name),
  351. filter_name, args, NULL, graph);
  352. if (ret < 0)
  353. return ret;
  354. ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
  355. if (ret < 0)
  356. return ret;
  357. *last_filter = ctx;
  358. *pad_idx = 0;
  359. return 0;
  360. }
  361. static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
  362. {
  363. char *pix_fmts;
  364. OutputStream *ost = ofilter->ost;
  365. OutputFile *of = output_files[ost->file_index];
  366. AVFilterContext *last_filter = out->filter_ctx;
  367. int pad_idx = out->pad_idx;
  368. int ret;
  369. char name[255];
  370. snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
  371. ret = avfilter_graph_create_filter(&ofilter->filter,
  372. avfilter_get_by_name("buffersink"),
  373. name, NULL, NULL, fg->graph);
  374. if (ret < 0)
  375. return ret;
  376. if ((ofilter->width || ofilter->height) && ofilter->ost->autoscale) {
  377. char args[255];
  378. AVFilterContext *filter;
  379. AVDictionaryEntry *e = NULL;
  380. snprintf(args, sizeof(args), "%d:%d",
  381. ofilter->width, ofilter->height);
  382. while ((e = av_dict_get(ost->sws_dict, "", e,
  383. AV_DICT_IGNORE_SUFFIX))) {
  384. av_strlcatf(args, sizeof(args), ":%s=%s", e->key, e->value);
  385. }
  386. snprintf(name, sizeof(name), "scaler_out_%d_%d",
  387. ost->file_index, ost->index);
  388. if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
  389. name, args, NULL, fg->graph)) < 0)
  390. return ret;
  391. if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
  392. return ret;
  393. last_filter = filter;
  394. pad_idx = 0;
  395. }
  396. if ((pix_fmts = choose_pix_fmts(ofilter))) {
  397. AVFilterContext *filter;
  398. ret = avfilter_graph_create_filter(&filter,
  399. avfilter_get_by_name("format"),
  400. "format", pix_fmts, NULL, fg->graph);
  401. av_freep(&pix_fmts);
  402. if (ret < 0)
  403. return ret;
  404. if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
  405. return ret;
  406. last_filter = filter;
  407. pad_idx = 0;
  408. }
  409. if (ost->frame_rate.num && 0) {
  410. AVFilterContext *fps;
  411. char args[255];
  412. snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
  413. ost->frame_rate.den);
  414. snprintf(name, sizeof(name), "fps_out_%d_%d",
  415. ost->file_index, ost->index);
  416. ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
  417. name, args, NULL, fg->graph);
  418. if (ret < 0)
  419. return ret;
  420. ret = avfilter_link(last_filter, pad_idx, fps, 0);
  421. if (ret < 0)
  422. return ret;
  423. last_filter = fps;
  424. pad_idx = 0;
  425. }
  426. snprintf(name, sizeof(name), "trim_out_%d_%d",
  427. ost->file_index, ost->index);
  428. ret = insert_trim(of->start_time, of->recording_time,
  429. &last_filter, &pad_idx, name);
  430. if (ret < 0)
  431. return ret;
  432. if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
  433. return ret;
  434. return 0;
  435. }
  436. static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
  437. {
  438. OutputStream *ost = ofilter->ost;
  439. OutputFile *of = output_files[ost->file_index];
  440. AVCodecContext *codec = ost->enc_ctx;
  441. AVFilterContext *last_filter = out->filter_ctx;
  442. int pad_idx = out->pad_idx;
  443. AVBPrint args;
  444. char name[255];
  445. int ret;
  446. snprintf(name, sizeof(name), "out_%d_%d", ost->file_index, ost->index);
  447. ret = avfilter_graph_create_filter(&ofilter->filter,
  448. avfilter_get_by_name("abuffersink"),
  449. name, NULL, NULL, fg->graph);
  450. if (ret < 0)
  451. return ret;
  452. if ((ret = av_opt_set_int(ofilter->filter, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
  453. return ret;
  454. #define AUTO_INSERT_FILTER(opt_name, filter_name, arg) do { \
  455. AVFilterContext *filt_ctx; \
  456. \
  457. av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
  458. "similarly to -af " filter_name "=%s.\n", arg); \
  459. \
  460. ret = avfilter_graph_create_filter(&filt_ctx, \
  461. avfilter_get_by_name(filter_name), \
  462. filter_name, arg, NULL, fg->graph); \
  463. if (ret < 0) \
  464. goto fail; \
  465. \
  466. ret = avfilter_link(last_filter, pad_idx, filt_ctx, 0); \
  467. if (ret < 0) \
  468. goto fail; \
  469. \
  470. last_filter = filt_ctx; \
  471. pad_idx = 0; \
  472. } while (0)
  473. av_bprint_init(&args, 0, AV_BPRINT_SIZE_UNLIMITED);
  474. if (ost->audio_channels_mapped) {
  475. int i;
  476. av_bprintf(&args, "0x%"PRIx64,
  477. av_get_default_channel_layout(ost->audio_channels_mapped));
  478. for (i = 0; i < ost->audio_channels_mapped; i++)
  479. if (ost->audio_channels_map[i] != -1)
  480. av_bprintf(&args, "|c%d=c%d", i, ost->audio_channels_map[i]);
  481. AUTO_INSERT_FILTER("-map_channel", "pan", args.str);
  482. av_bprint_clear(&args);
  483. }
  484. if (codec->channels && !codec->channel_layout)
  485. codec->channel_layout = av_get_default_channel_layout(codec->channels);
  486. choose_sample_fmts(ofilter, &args);
  487. choose_sample_rates(ofilter, &args);
  488. choose_channel_layouts(ofilter, &args);
  489. if (!av_bprint_is_complete(&args)) {
  490. ret = AVERROR(ENOMEM);
  491. goto fail;
  492. }
  493. if (args.len) {
  494. AVFilterContext *format;
  495. snprintf(name, sizeof(name), "format_out_%d_%d",
  496. ost->file_index, ost->index);
  497. ret = avfilter_graph_create_filter(&format,
  498. avfilter_get_by_name("aformat"),
  499. name, args.str, NULL, fg->graph);
  500. if (ret < 0)
  501. goto fail;
  502. ret = avfilter_link(last_filter, pad_idx, format, 0);
  503. if (ret < 0)
  504. goto fail;
  505. last_filter = format;
  506. pad_idx = 0;
  507. }
  508. if (ost->apad && of->shortest) {
  509. int i;
  510. for (i=0; i<of->ctx->nb_streams; i++)
  511. if (of->ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  512. break;
  513. if (i<of->ctx->nb_streams) {
  514. AUTO_INSERT_FILTER("-apad", "apad", ost->apad);
  515. }
  516. }
  517. snprintf(name, sizeof(name), "trim for output stream %d:%d",
  518. ost->file_index, ost->index);
  519. ret = insert_trim(of->start_time, of->recording_time,
  520. &last_filter, &pad_idx, name);
  521. if (ret < 0)
  522. goto fail;
  523. if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
  524. goto fail;
  525. fail:
  526. av_bprint_finalize(&args, NULL);
  527. return ret;
  528. }
  529. static int configure_output_filter(FilterGraph *fg, OutputFilter *ofilter,
  530. AVFilterInOut *out)
  531. {
  532. if (!ofilter->ost) {
  533. av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", ofilter->name);
  534. exit_program(1);
  535. }
  536. switch (avfilter_pad_get_type(out->filter_ctx->output_pads, out->pad_idx)) {
  537. case AVMEDIA_TYPE_VIDEO: return configure_output_video_filter(fg, ofilter, out);
  538. case AVMEDIA_TYPE_AUDIO: return configure_output_audio_filter(fg, ofilter, out);
  539. default: av_assert0(0);
  540. }
  541. }
  542. void check_filter_outputs(void)
  543. {
  544. int i;
  545. for (i = 0; i < nb_filtergraphs; i++) {
  546. int n;
  547. for (n = 0; n < filtergraphs[i]->nb_outputs; n++) {
  548. OutputFilter *output = filtergraphs[i]->outputs[n];
  549. if (!output->ost) {
  550. av_log(NULL, AV_LOG_FATAL, "Filter %s has an unconnected output\n", output->name);
  551. exit_program(1);
  552. }
  553. }
  554. }
  555. }
  556. static int sub2video_prepare(InputStream *ist, InputFilter *ifilter)
  557. {
  558. AVFormatContext *avf = input_files[ist->file_index]->ctx;
  559. int i, w, h;
  560. /* Compute the size of the canvas for the subtitles stream.
  561. If the subtitles codecpar has set a size, use it. Otherwise use the
  562. maximum dimensions of the video streams in the same file. */
  563. w = ifilter->width;
  564. h = ifilter->height;
  565. if (!(w && h)) {
  566. for (i = 0; i < avf->nb_streams; i++) {
  567. if (avf->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  568. w = FFMAX(w, avf->streams[i]->codecpar->width);
  569. h = FFMAX(h, avf->streams[i]->codecpar->height);
  570. }
  571. }
  572. if (!(w && h)) {
  573. w = FFMAX(w, 720);
  574. h = FFMAX(h, 576);
  575. }
  576. av_log(avf, AV_LOG_INFO, "sub2video: using %dx%d canvas\n", w, h);
  577. }
  578. ist->sub2video.w = ifilter->width = w;
  579. ist->sub2video.h = ifilter->height = h;
  580. ifilter->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
  581. ifilter->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
  582. /* rectangles are AV_PIX_FMT_PAL8, but we have no guarantee that the
  583. palettes for all rectangles are identical or compatible */
  584. ifilter->format = AV_PIX_FMT_RGB32;
  585. ist->sub2video.frame = av_frame_alloc();
  586. if (!ist->sub2video.frame)
  587. return AVERROR(ENOMEM);
  588. ist->sub2video.last_pts = INT64_MIN;
  589. ist->sub2video.end_pts = INT64_MIN;
  590. /* sub2video structure has been (re-)initialized.
  591. Mark it as such so that the system will be
  592. initialized with the first received heartbeat. */
  593. ist->sub2video.initialize = 1;
  594. return 0;
  595. }
  596. static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
  597. AVFilterInOut *in)
  598. {
  599. AVFilterContext *last_filter;
  600. const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
  601. InputStream *ist = ifilter->ist;
  602. InputFile *f = input_files[ist->file_index];
  603. AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
  604. ist->st->time_base;
  605. AVRational fr = ist->framerate;
  606. AVRational sar;
  607. AVBPrint args;
  608. char name[255];
  609. int ret, pad_idx = 0;
  610. int64_t tsoffset = 0;
  611. AVBufferSrcParameters *par = av_buffersrc_parameters_alloc();
  612. if (!par)
  613. return AVERROR(ENOMEM);
  614. memset(par, 0, sizeof(*par));
  615. par->format = AV_PIX_FMT_NONE;
  616. if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
  617. av_log(NULL, AV_LOG_ERROR, "Cannot connect video filter to audio input\n");
  618. ret = AVERROR(EINVAL);
  619. goto fail;
  620. }
  621. if (!fr.num)
  622. fr = av_guess_frame_rate(input_files[ist->file_index]->ctx, ist->st, NULL);
  623. if (ist->dec_ctx->codec_type == AVMEDIA_TYPE_SUBTITLE) {
  624. ret = sub2video_prepare(ist, ifilter);
  625. if (ret < 0)
  626. goto fail;
  627. }
  628. sar = ifilter->sample_aspect_ratio;
  629. if(!sar.den)
  630. sar = (AVRational){0,1};
  631. av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
  632. av_bprintf(&args,
  633. "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:"
  634. "pixel_aspect=%d/%d",
  635. ifilter->width, ifilter->height, ifilter->format,
  636. tb.num, tb.den, sar.num, sar.den);
  637. if (fr.num && fr.den)
  638. av_bprintf(&args, ":frame_rate=%d/%d", fr.num, fr.den);
  639. snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
  640. ist->file_index, ist->st->index);
  641. if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
  642. args.str, NULL, fg->graph)) < 0)
  643. goto fail;
  644. par->hw_frames_ctx = ifilter->hw_frames_ctx;
  645. ret = av_buffersrc_parameters_set(ifilter->filter, par);
  646. if (ret < 0)
  647. goto fail;
  648. av_freep(&par);
  649. last_filter = ifilter->filter;
  650. if (ist->autorotate) {
  651. double theta = get_rotation(ist->st);
  652. if (fabs(theta - 90) < 1.0) {
  653. ret = insert_filter(&last_filter, &pad_idx, "transpose", "clock");
  654. } else if (fabs(theta - 180) < 1.0) {
  655. ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
  656. if (ret < 0)
  657. return ret;
  658. ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
  659. } else if (fabs(theta - 270) < 1.0) {
  660. ret = insert_filter(&last_filter, &pad_idx, "transpose", "cclock");
  661. } else if (fabs(theta) > 1.0) {
  662. char rotate_buf[64];
  663. snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
  664. ret = insert_filter(&last_filter, &pad_idx, "rotate", rotate_buf);
  665. }
  666. if (ret < 0)
  667. return ret;
  668. }
  669. if (do_deinterlace) {
  670. AVFilterContext *yadif;
  671. snprintf(name, sizeof(name), "deinterlace_in_%d_%d",
  672. ist->file_index, ist->st->index);
  673. if ((ret = avfilter_graph_create_filter(&yadif,
  674. avfilter_get_by_name("yadif"),
  675. name, "", NULL,
  676. fg->graph)) < 0)
  677. return ret;
  678. if ((ret = avfilter_link(last_filter, 0, yadif, 0)) < 0)
  679. return ret;
  680. last_filter = yadif;
  681. }
  682. snprintf(name, sizeof(name), "trim_in_%d_%d",
  683. ist->file_index, ist->st->index);
  684. if (copy_ts) {
  685. tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
  686. if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
  687. tsoffset += f->ctx->start_time;
  688. }
  689. ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
  690. AV_NOPTS_VALUE : tsoffset, f->recording_time,
  691. &last_filter, &pad_idx, name);
  692. if (ret < 0)
  693. return ret;
  694. if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
  695. return ret;
  696. return 0;
  697. fail:
  698. av_freep(&par);
  699. return ret;
  700. }
  701. static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
  702. AVFilterInOut *in)
  703. {
  704. AVFilterContext *last_filter;
  705. const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
  706. InputStream *ist = ifilter->ist;
  707. InputFile *f = input_files[ist->file_index];
  708. AVBPrint args;
  709. char name[255];
  710. int ret, pad_idx = 0;
  711. int64_t tsoffset = 0;
  712. if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_AUDIO) {
  713. av_log(NULL, AV_LOG_ERROR, "Cannot connect audio filter to non audio input\n");
  714. return AVERROR(EINVAL);
  715. }
  716. av_bprint_init(&args, 0, AV_BPRINT_SIZE_AUTOMATIC);
  717. av_bprintf(&args, "time_base=%d/%d:sample_rate=%d:sample_fmt=%s",
  718. 1, ifilter->sample_rate,
  719. ifilter->sample_rate,
  720. av_get_sample_fmt_name(ifilter->format));
  721. if (ifilter->channel_layout)
  722. av_bprintf(&args, ":channel_layout=0x%"PRIx64,
  723. ifilter->channel_layout);
  724. else
  725. av_bprintf(&args, ":channels=%d", ifilter->channels);
  726. snprintf(name, sizeof(name), "graph_%d_in_%d_%d", fg->index,
  727. ist->file_index, ist->st->index);
  728. if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
  729. name, args.str, NULL,
  730. fg->graph)) < 0)
  731. return ret;
  732. last_filter = ifilter->filter;
  733. #define AUTO_INSERT_FILTER_INPUT(opt_name, filter_name, arg) do { \
  734. AVFilterContext *filt_ctx; \
  735. \
  736. av_log(NULL, AV_LOG_INFO, opt_name " is forwarded to lavfi " \
  737. "similarly to -af " filter_name "=%s.\n", arg); \
  738. \
  739. snprintf(name, sizeof(name), "graph_%d_%s_in_%d_%d", \
  740. fg->index, filter_name, ist->file_index, ist->st->index); \
  741. ret = avfilter_graph_create_filter(&filt_ctx, \
  742. avfilter_get_by_name(filter_name), \
  743. name, arg, NULL, fg->graph); \
  744. if (ret < 0) \
  745. return ret; \
  746. \
  747. ret = avfilter_link(last_filter, 0, filt_ctx, 0); \
  748. if (ret < 0) \
  749. return ret; \
  750. \
  751. last_filter = filt_ctx; \
  752. } while (0)
  753. if (audio_sync_method > 0) {
  754. char args[256] = {0};
  755. av_strlcatf(args, sizeof(args), "async=%d", audio_sync_method);
  756. if (audio_drift_threshold != 0.1)
  757. av_strlcatf(args, sizeof(args), ":min_hard_comp=%f", audio_drift_threshold);
  758. if (!fg->reconfiguration)
  759. av_strlcatf(args, sizeof(args), ":first_pts=0");
  760. AUTO_INSERT_FILTER_INPUT("-async", "aresample", args);
  761. }
  762. // if (ost->audio_channels_mapped) {
  763. // int i;
  764. // AVBPrint pan_buf;
  765. // av_bprint_init(&pan_buf, 256, 8192);
  766. // av_bprintf(&pan_buf, "0x%"PRIx64,
  767. // av_get_default_channel_layout(ost->audio_channels_mapped));
  768. // for (i = 0; i < ost->audio_channels_mapped; i++)
  769. // if (ost->audio_channels_map[i] != -1)
  770. // av_bprintf(&pan_buf, ":c%d=c%d", i, ost->audio_channels_map[i]);
  771. // AUTO_INSERT_FILTER_INPUT("-map_channel", "pan", pan_buf.str);
  772. // av_bprint_finalize(&pan_buf, NULL);
  773. // }
  774. if (audio_volume != 256) {
  775. char args[256];
  776. av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
  777. "audio filter instead.\n");
  778. snprintf(args, sizeof(args), "%f", audio_volume / 256.);
  779. AUTO_INSERT_FILTER_INPUT("-vol", "volume", args);
  780. }
  781. snprintf(name, sizeof(name), "trim for input stream %d:%d",
  782. ist->file_index, ist->st->index);
  783. if (copy_ts) {
  784. tsoffset = f->start_time == AV_NOPTS_VALUE ? 0 : f->start_time;
  785. if (!start_at_zero && f->ctx->start_time != AV_NOPTS_VALUE)
  786. tsoffset += f->ctx->start_time;
  787. }
  788. ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
  789. AV_NOPTS_VALUE : tsoffset, f->recording_time,
  790. &last_filter, &pad_idx, name);
  791. if (ret < 0)
  792. return ret;
  793. if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
  794. return ret;
  795. return 0;
  796. }
  797. static int configure_input_filter(FilterGraph *fg, InputFilter *ifilter,
  798. AVFilterInOut *in)
  799. {
  800. if (!ifilter->ist->dec) {
  801. av_log(NULL, AV_LOG_ERROR,
  802. "No decoder for stream #%d:%d, filtering impossible\n",
  803. ifilter->ist->file_index, ifilter->ist->st->index);
  804. return AVERROR_DECODER_NOT_FOUND;
  805. }
  806. switch (avfilter_pad_get_type(in->filter_ctx->input_pads, in->pad_idx)) {
  807. case AVMEDIA_TYPE_VIDEO: return configure_input_video_filter(fg, ifilter, in);
  808. case AVMEDIA_TYPE_AUDIO: return configure_input_audio_filter(fg, ifilter, in);
  809. default: av_assert0(0);
  810. }
  811. }
  812. static void cleanup_filtergraph(FilterGraph *fg)
  813. {
  814. int i;
  815. for (i = 0; i < fg->nb_outputs; i++)
  816. fg->outputs[i]->filter = (AVFilterContext *)NULL;
  817. for (i = 0; i < fg->nb_inputs; i++)
  818. fg->inputs[i]->filter = (AVFilterContext *)NULL;
  819. avfilter_graph_free(&fg->graph);
  820. }
  821. int configure_filtergraph(FilterGraph *fg)
  822. {
  823. AVFilterInOut *inputs, *outputs, *cur;
  824. int ret, i, simple = filtergraph_is_simple(fg);
  825. const char *graph_desc = simple ? fg->outputs[0]->ost->avfilter :
  826. fg->graph_desc;
  827. cleanup_filtergraph(fg);
  828. if (!(fg->graph = avfilter_graph_alloc()))
  829. return AVERROR(ENOMEM);
  830. if (simple) {
  831. OutputStream *ost = fg->outputs[0]->ost;
  832. char args[512];
  833. AVDictionaryEntry *e = NULL;
  834. fg->graph->nb_threads = filter_nbthreads;
  835. args[0] = 0;
  836. while ((e = av_dict_get(ost->sws_dict, "", e,
  837. AV_DICT_IGNORE_SUFFIX))) {
  838. av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
  839. }
  840. if (strlen(args))
  841. args[strlen(args)-1] = 0;
  842. fg->graph->scale_sws_opts = av_strdup(args);
  843. args[0] = 0;
  844. while ((e = av_dict_get(ost->swr_opts, "", e,
  845. AV_DICT_IGNORE_SUFFIX))) {
  846. av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
  847. }
  848. if (strlen(args))
  849. args[strlen(args)-1] = 0;
  850. av_opt_set(fg->graph, "aresample_swr_opts", args, 0);
  851. args[0] = '\0';
  852. while ((e = av_dict_get(fg->outputs[0]->ost->resample_opts, "", e,
  853. AV_DICT_IGNORE_SUFFIX))) {
  854. av_strlcatf(args, sizeof(args), "%s=%s:", e->key, e->value);
  855. }
  856. if (strlen(args))
  857. args[strlen(args) - 1] = '\0';
  858. e = av_dict_get(ost->encoder_opts, "threads", NULL, 0);
  859. if (e)
  860. av_opt_set(fg->graph, "threads", e->value, 0);
  861. } else {
  862. fg->graph->nb_threads = filter_complex_nbthreads;
  863. }
  864. if ((ret = avfilter_graph_parse2(fg->graph, graph_desc, &inputs, &outputs)) < 0)
  865. goto fail;
  866. ret = hw_device_setup_for_filter(fg);
  867. if (ret < 0)
  868. goto fail;
  869. if (simple && (!inputs || inputs->next || !outputs || outputs->next)) {
  870. const char *num_inputs;
  871. const char *num_outputs;
  872. if (!outputs) {
  873. num_outputs = "0";
  874. } else if (outputs->next) {
  875. num_outputs = ">1";
  876. } else {
  877. num_outputs = "1";
  878. }
  879. if (!inputs) {
  880. num_inputs = "0";
  881. } else if (inputs->next) {
  882. num_inputs = ">1";
  883. } else {
  884. num_inputs = "1";
  885. }
  886. av_log(NULL, AV_LOG_ERROR, "Simple filtergraph '%s' was expected "
  887. "to have exactly 1 input and 1 output."
  888. " However, it had %s input(s) and %s output(s)."
  889. " Please adjust, or use a complex filtergraph (-filter_complex) instead.\n",
  890. graph_desc, num_inputs, num_outputs);
  891. ret = AVERROR(EINVAL);
  892. goto fail;
  893. }
  894. for (cur = inputs, i = 0; cur; cur = cur->next, i++)
  895. if ((ret = configure_input_filter(fg, fg->inputs[i], cur)) < 0) {
  896. avfilter_inout_free(&inputs);
  897. avfilter_inout_free(&outputs);
  898. goto fail;
  899. }
  900. avfilter_inout_free(&inputs);
  901. for (cur = outputs, i = 0; cur; cur = cur->next, i++)
  902. configure_output_filter(fg, fg->outputs[i], cur);
  903. avfilter_inout_free(&outputs);
  904. if (!auto_conversion_filters)
  905. avfilter_graph_set_auto_convert(fg->graph, AVFILTER_AUTO_CONVERT_NONE);
  906. if ((ret = avfilter_graph_config(fg->graph, NULL)) < 0)
  907. goto fail;
  908. /* limit the lists of allowed formats to the ones selected, to
  909. * make sure they stay the same if the filtergraph is reconfigured later */
  910. for (i = 0; i < fg->nb_outputs; i++) {
  911. OutputFilter *ofilter = fg->outputs[i];
  912. AVFilterContext *sink = ofilter->filter;
  913. ofilter->format = av_buffersink_get_format(sink);
  914. ofilter->width = av_buffersink_get_w(sink);
  915. ofilter->height = av_buffersink_get_h(sink);
  916. ofilter->sample_rate = av_buffersink_get_sample_rate(sink);
  917. ofilter->channel_layout = av_buffersink_get_channel_layout(sink);
  918. }
  919. fg->reconfiguration = 1;
  920. for (i = 0; i < fg->nb_outputs; i++) {
  921. OutputStream *ost = fg->outputs[i]->ost;
  922. if (!ost->enc) {
  923. /* identical to the same check in ffmpeg.c, needed because
  924. complex filter graphs are initialized earlier */
  925. av_log(NULL, AV_LOG_ERROR, "Encoder (codec %s) not found for output stream #%d:%d\n",
  926. avcodec_get_name(ost->st->codecpar->codec_id), ost->file_index, ost->index);
  927. ret = AVERROR(EINVAL);
  928. goto fail;
  929. }
  930. if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
  931. !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
  932. av_buffersink_set_frame_size(ost->filter->filter,
  933. ost->enc_ctx->frame_size);
  934. }
  935. for (i = 0; i < fg->nb_inputs; i++) {
  936. while (av_fifo_size(fg->inputs[i]->frame_queue)) {
  937. AVFrame *tmp;
  938. av_fifo_generic_read(fg->inputs[i]->frame_queue, &tmp, sizeof(tmp), NULL);
  939. ret = av_buffersrc_add_frame(fg->inputs[i]->filter, tmp);
  940. av_frame_free(&tmp);
  941. if (ret < 0)
  942. goto fail;
  943. }
  944. }
  945. /* send the EOFs for the finished inputs */
  946. for (i = 0; i < fg->nb_inputs; i++) {
  947. if (fg->inputs[i]->eof) {
  948. ret = av_buffersrc_add_frame(fg->inputs[i]->filter, NULL);
  949. if (ret < 0)
  950. goto fail;
  951. }
  952. }
  953. /* process queued up subtitle packets */
  954. for (i = 0; i < fg->nb_inputs; i++) {
  955. InputStream *ist = fg->inputs[i]->ist;
  956. if (ist->sub2video.sub_queue && ist->sub2video.frame) {
  957. while (av_fifo_size(ist->sub2video.sub_queue)) {
  958. AVSubtitle tmp;
  959. av_fifo_generic_read(ist->sub2video.sub_queue, &tmp, sizeof(tmp), NULL);
  960. sub2video_update(ist, INT64_MIN, &tmp);
  961. avsubtitle_free(&tmp);
  962. }
  963. }
  964. }
  965. return 0;
  966. fail:
  967. cleanup_filtergraph(fg);
  968. return ret;
  969. }
  970. int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
  971. {
  972. av_buffer_unref(&ifilter->hw_frames_ctx);
  973. ifilter->format = frame->format;
  974. ifilter->width = frame->width;
  975. ifilter->height = frame->height;
  976. ifilter->sample_aspect_ratio = frame->sample_aspect_ratio;
  977. ifilter->sample_rate = frame->sample_rate;
  978. ifilter->channels = frame->channels;
  979. ifilter->channel_layout = frame->channel_layout;
  980. if (frame->hw_frames_ctx) {
  981. ifilter->hw_frames_ctx = av_buffer_ref(frame->hw_frames_ctx);
  982. if (!ifilter->hw_frames_ctx)
  983. return AVERROR(ENOMEM);
  984. }
  985. return 0;
  986. }
  987. int filtergraph_is_simple(FilterGraph *fg)
  988. {
  989. return !fg->graph_desc;
  990. }