You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

4552 lines
164KB

  1. /*
  2. * avconv main
  3. * Copyright (c) 2000-2011 The libav developers.
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "config.h"
  22. #include <ctype.h>
  23. #include <string.h>
  24. #include <math.h>
  25. #include <stdlib.h>
  26. #include <errno.h>
  27. #include <signal.h>
  28. #include <limits.h>
  29. #include <unistd.h>
  30. #include "libavformat/avformat.h"
  31. #include "libavdevice/avdevice.h"
  32. #include "libswscale/swscale.h"
  33. #include "libavutil/opt.h"
  34. #include "libavcodec/audioconvert.h"
  35. #include "libavutil/audioconvert.h"
  36. #include "libavutil/parseutils.h"
  37. #include "libavutil/samplefmt.h"
  38. #include "libavutil/colorspace.h"
  39. #include "libavutil/fifo.h"
  40. #include "libavutil/intreadwrite.h"
  41. #include "libavutil/dict.h"
  42. #include "libavutil/mathematics.h"
  43. #include "libavutil/pixdesc.h"
  44. #include "libavutil/avstring.h"
  45. #include "libavutil/libm.h"
  46. #include "libavutil/imgutils.h"
  47. #include "libavformat/os_support.h"
  48. #if CONFIG_AVFILTER
  49. # include "libavfilter/avfilter.h"
  50. # include "libavfilter/avfiltergraph.h"
  51. # include "libavfilter/buffersrc.h"
  52. # include "libavfilter/vsrc_buffer.h"
  53. #endif
  54. #if HAVE_SYS_RESOURCE_H
  55. #include <sys/types.h>
  56. #include <sys/time.h>
  57. #include <sys/resource.h>
  58. #elif HAVE_GETPROCESSTIMES
  59. #include <windows.h>
  60. #endif
  61. #if HAVE_GETPROCESSMEMORYINFO
  62. #include <windows.h>
  63. #include <psapi.h>
  64. #endif
  65. #if HAVE_SYS_SELECT_H
  66. #include <sys/select.h>
  67. #endif
  68. #include <time.h>
  69. #include "cmdutils.h"
  70. #include "libavutil/avassert.h"
  71. #define VSYNC_AUTO -1
  72. #define VSYNC_PASSTHROUGH 0
  73. #define VSYNC_CFR 1
  74. #define VSYNC_VFR 2
  75. const char program_name[] = "avconv";
  76. const int program_birth_year = 2000;
  77. /* select an input stream for an output stream */
  78. typedef struct StreamMap {
  79. int disabled; /** 1 is this mapping is disabled by a negative map */
  80. int file_index;
  81. int stream_index;
  82. int sync_file_index;
  83. int sync_stream_index;
  84. } StreamMap;
  85. /**
  86. * select an input file for an output file
  87. */
  88. typedef struct MetadataMap {
  89. int file; ///< file index
  90. char type; ///< type of metadata to copy -- (g)lobal, (s)tream, (c)hapter or (p)rogram
  91. int index; ///< stream/chapter/program number
  92. } MetadataMap;
  93. static const OptionDef options[];
  94. static int video_discard = 0;
  95. static int same_quant = 0;
  96. static int do_deinterlace = 0;
  97. static int intra_dc_precision = 8;
  98. static int qp_hist = 0;
  99. static int file_overwrite = 0;
  100. static int do_benchmark = 0;
  101. static int do_hex_dump = 0;
  102. static int do_pkt_dump = 0;
  103. static int do_pass = 0;
  104. static char *pass_logfilename_prefix = NULL;
  105. static int video_sync_method = VSYNC_AUTO;
  106. static int audio_sync_method = 0;
  107. static float audio_drift_threshold = 0.1;
  108. static int copy_ts = 0;
  109. static int copy_tb = 1;
  110. static int opt_shortest = 0;
  111. static char *vstats_filename;
  112. static FILE *vstats_file;
  113. static int audio_volume = 256;
  114. static int exit_on_error = 0;
  115. static int using_stdin = 0;
  116. static int64_t video_size = 0;
  117. static int64_t audio_size = 0;
  118. static int64_t extra_size = 0;
  119. static int nb_frames_dup = 0;
  120. static int nb_frames_drop = 0;
  121. static int input_sync;
  122. static float dts_delta_threshold = 10;
  123. static int print_stats = 1;
  124. static uint8_t *audio_buf;
  125. static unsigned int allocated_audio_buf_size;
  126. #define DEFAULT_PASS_LOGFILENAME_PREFIX "av2pass"
  127. typedef struct FrameBuffer {
  128. uint8_t *base[4];
  129. uint8_t *data[4];
  130. int linesize[4];
  131. int h, w;
  132. enum PixelFormat pix_fmt;
  133. int refcount;
  134. struct InputStream *ist;
  135. struct FrameBuffer *next;
  136. } FrameBuffer;
  137. typedef struct InputStream {
  138. int file_index;
  139. AVStream *st;
  140. int discard; /* true if stream data should be discarded */
  141. int decoding_needed; /* true if the packets must be decoded in 'raw_fifo' */
  142. AVCodec *dec;
  143. AVFrame *decoded_frame;
  144. AVFrame *filtered_frame;
  145. int64_t start; /* time when read started */
  146. /* predicted dts of the next packet read for this stream or (when there are
  147. * several frames in a packet) of the next frame in current packet */
  148. int64_t next_dts;
  149. /* dts of the last packet read for this stream */
  150. int64_t last_dts;
  151. PtsCorrectionContext pts_ctx;
  152. double ts_scale;
  153. int is_start; /* is 1 at the start and after a discontinuity */
  154. int showed_multi_packet_warning;
  155. AVDictionary *opts;
  156. /* a pool of free buffers for decoded data */
  157. FrameBuffer *buffer_pool;
  158. } InputStream;
  159. typedef struct InputFile {
  160. AVFormatContext *ctx;
  161. int eof_reached; /* true if eof reached */
  162. int ist_index; /* index of first stream in ist_table */
  163. int buffer_size; /* current total buffer size */
  164. int64_t ts_offset;
  165. int nb_streams; /* number of stream that avconv is aware of; may be different
  166. from ctx.nb_streams if new streams appear during av_read_frame() */
  167. int rate_emu;
  168. } InputFile;
  169. typedef struct OutputStream {
  170. int file_index; /* file index */
  171. int index; /* stream index in the output file */
  172. int source_index; /* InputStream index */
  173. AVStream *st; /* stream in the output file */
  174. int encoding_needed; /* true if encoding needed for this stream */
  175. int frame_number;
  176. /* input pts and corresponding output pts
  177. for A/V sync */
  178. // double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
  179. struct InputStream *sync_ist; /* input stream to sync against */
  180. int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ // FIXME look at frame_number
  181. /* pts of the first frame encoded for this stream, used for limiting
  182. * recording time */
  183. int64_t first_pts;
  184. AVBitStreamFilterContext *bitstream_filters;
  185. AVCodec *enc;
  186. int64_t max_frames;
  187. AVFrame *output_frame;
  188. /* video only */
  189. int video_resample;
  190. AVFrame pict_tmp; /* temporary image for resampling */
  191. struct SwsContext *img_resample_ctx; /* for image resampling */
  192. int resample_height;
  193. int resample_width;
  194. int resample_pix_fmt;
  195. AVRational frame_rate;
  196. int force_fps;
  197. int top_field_first;
  198. float frame_aspect_ratio;
  199. /* forced key frames */
  200. int64_t *forced_kf_pts;
  201. int forced_kf_count;
  202. int forced_kf_index;
  203. /* audio only */
  204. int audio_resample;
  205. ReSampleContext *resample; /* for audio resampling */
  206. int resample_sample_fmt;
  207. int resample_channels;
  208. int resample_sample_rate;
  209. int reformat_pair;
  210. AVAudioConvert *reformat_ctx;
  211. AVFifoBuffer *fifo; /* for compression: one audio fifo per codec */
  212. FILE *logfile;
  213. #if CONFIG_AVFILTER
  214. AVFilterContext *output_video_filter;
  215. AVFilterContext *input_video_filter;
  216. AVFilterBufferRef *picref;
  217. char *avfilter;
  218. AVFilterGraph *graph;
  219. #endif
  220. int64_t sws_flags;
  221. AVDictionary *opts;
  222. int is_past_recording_time;
  223. int stream_copy;
  224. const char *attachment_filename;
  225. int copy_initial_nonkeyframes;
  226. } OutputStream;
  227. typedef struct OutputFile {
  228. AVFormatContext *ctx;
  229. AVDictionary *opts;
  230. int ost_index; /* index of the first stream in output_streams */
  231. int64_t recording_time; /* desired length of the resulting file in microseconds */
  232. int64_t start_time; /* start time in microseconds */
  233. uint64_t limit_filesize;
  234. } OutputFile;
  235. static InputStream *input_streams = NULL;
  236. static int nb_input_streams = 0;
  237. static InputFile *input_files = NULL;
  238. static int nb_input_files = 0;
  239. static OutputStream *output_streams = NULL;
  240. static int nb_output_streams = 0;
  241. static OutputFile *output_files = NULL;
  242. static int nb_output_files = 0;
  243. typedef struct OptionsContext {
  244. /* input/output options */
  245. int64_t start_time;
  246. const char *format;
  247. SpecifierOpt *codec_names;
  248. int nb_codec_names;
  249. SpecifierOpt *audio_channels;
  250. int nb_audio_channels;
  251. SpecifierOpt *audio_sample_rate;
  252. int nb_audio_sample_rate;
  253. SpecifierOpt *frame_rates;
  254. int nb_frame_rates;
  255. SpecifierOpt *frame_sizes;
  256. int nb_frame_sizes;
  257. SpecifierOpt *frame_pix_fmts;
  258. int nb_frame_pix_fmts;
  259. /* input options */
  260. int64_t input_ts_offset;
  261. int rate_emu;
  262. SpecifierOpt *ts_scale;
  263. int nb_ts_scale;
  264. SpecifierOpt *dump_attachment;
  265. int nb_dump_attachment;
  266. /* output options */
  267. StreamMap *stream_maps;
  268. int nb_stream_maps;
  269. /* first item specifies output metadata, second is input */
  270. MetadataMap (*meta_data_maps)[2];
  271. int nb_meta_data_maps;
  272. int metadata_global_manual;
  273. int metadata_streams_manual;
  274. int metadata_chapters_manual;
  275. const char **attachments;
  276. int nb_attachments;
  277. int chapters_input_file;
  278. int64_t recording_time;
  279. uint64_t limit_filesize;
  280. float mux_preload;
  281. float mux_max_delay;
  282. int video_disable;
  283. int audio_disable;
  284. int subtitle_disable;
  285. int data_disable;
  286. /* indexed by output file stream index */
  287. int *streamid_map;
  288. int nb_streamid_map;
  289. SpecifierOpt *metadata;
  290. int nb_metadata;
  291. SpecifierOpt *max_frames;
  292. int nb_max_frames;
  293. SpecifierOpt *bitstream_filters;
  294. int nb_bitstream_filters;
  295. SpecifierOpt *codec_tags;
  296. int nb_codec_tags;
  297. SpecifierOpt *sample_fmts;
  298. int nb_sample_fmts;
  299. SpecifierOpt *qscale;
  300. int nb_qscale;
  301. SpecifierOpt *forced_key_frames;
  302. int nb_forced_key_frames;
  303. SpecifierOpt *force_fps;
  304. int nb_force_fps;
  305. SpecifierOpt *frame_aspect_ratios;
  306. int nb_frame_aspect_ratios;
  307. SpecifierOpt *rc_overrides;
  308. int nb_rc_overrides;
  309. SpecifierOpt *intra_matrices;
  310. int nb_intra_matrices;
  311. SpecifierOpt *inter_matrices;
  312. int nb_inter_matrices;
  313. SpecifierOpt *top_field_first;
  314. int nb_top_field_first;
  315. SpecifierOpt *metadata_map;
  316. int nb_metadata_map;
  317. SpecifierOpt *presets;
  318. int nb_presets;
  319. SpecifierOpt *copy_initial_nonkeyframes;
  320. int nb_copy_initial_nonkeyframes;
  321. #if CONFIG_AVFILTER
  322. SpecifierOpt *filters;
  323. int nb_filters;
  324. #endif
  325. } OptionsContext;
  326. #define MATCH_PER_STREAM_OPT(name, type, outvar, fmtctx, st)\
  327. {\
  328. int i, ret;\
  329. for (i = 0; i < o->nb_ ## name; i++) {\
  330. char *spec = o->name[i].specifier;\
  331. if ((ret = check_stream_specifier(fmtctx, st, spec)) > 0)\
  332. outvar = o->name[i].u.type;\
  333. else if (ret < 0)\
  334. exit_program(1);\
  335. }\
  336. }
  337. static void reset_options(OptionsContext *o)
  338. {
  339. const OptionDef *po = options;
  340. /* all OPT_SPEC and OPT_STRING can be freed in generic way */
  341. while (po->name) {
  342. void *dst = (uint8_t*)o + po->u.off;
  343. if (po->flags & OPT_SPEC) {
  344. SpecifierOpt **so = dst;
  345. int i, *count = (int*)(so + 1);
  346. for (i = 0; i < *count; i++) {
  347. av_freep(&(*so)[i].specifier);
  348. if (po->flags & OPT_STRING)
  349. av_freep(&(*so)[i].u.str);
  350. }
  351. av_freep(so);
  352. *count = 0;
  353. } else if (po->flags & OPT_OFFSET && po->flags & OPT_STRING)
  354. av_freep(dst);
  355. po++;
  356. }
  357. av_freep(&o->stream_maps);
  358. av_freep(&o->meta_data_maps);
  359. av_freep(&o->streamid_map);
  360. memset(o, 0, sizeof(*o));
  361. o->mux_max_delay = 0.7;
  362. o->recording_time = INT64_MAX;
  363. o->limit_filesize = UINT64_MAX;
  364. o->chapters_input_file = INT_MAX;
  365. uninit_opts();
  366. init_opts();
  367. }
  368. static int alloc_buffer(InputStream *ist, FrameBuffer **pbuf)
  369. {
  370. AVCodecContext *s = ist->st->codec;
  371. FrameBuffer *buf = av_mallocz(sizeof(*buf));
  372. int i, ret;
  373. const int pixel_size = av_pix_fmt_descriptors[s->pix_fmt].comp[0].step_minus1+1;
  374. int h_chroma_shift, v_chroma_shift;
  375. int edge = 32; // XXX should be avcodec_get_edge_width(), but that fails on svq1
  376. int w = s->width, h = s->height;
  377. if (!buf)
  378. return AVERROR(ENOMEM);
  379. if (!(s->flags & CODEC_FLAG_EMU_EDGE)) {
  380. w += 2*edge;
  381. h += 2*edge;
  382. }
  383. avcodec_align_dimensions(s, &w, &h);
  384. if ((ret = av_image_alloc(buf->base, buf->linesize, w, h,
  385. s->pix_fmt, 32)) < 0) {
  386. av_freep(&buf);
  387. return ret;
  388. }
  389. /* XXX this shouldn't be needed, but some tests break without this line
  390. * those decoders are buggy and need to be fixed.
  391. * the following tests fail:
  392. * cdgraphics, ansi, aasc, fraps-v1, qtrle-1bit
  393. */
  394. memset(buf->base[0], 128, ret);
  395. avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift);
  396. for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
  397. const int h_shift = i==0 ? 0 : h_chroma_shift;
  398. const int v_shift = i==0 ? 0 : v_chroma_shift;
  399. if (s->flags & CODEC_FLAG_EMU_EDGE)
  400. buf->data[i] = buf->base[i];
  401. else
  402. buf->data[i] = buf->base[i] +
  403. FFALIGN((buf->linesize[i]*edge >> v_shift) +
  404. (pixel_size*edge >> h_shift), 32);
  405. }
  406. buf->w = s->width;
  407. buf->h = s->height;
  408. buf->pix_fmt = s->pix_fmt;
  409. buf->ist = ist;
  410. *pbuf = buf;
  411. return 0;
  412. }
  413. static void free_buffer_pool(InputStream *ist)
  414. {
  415. FrameBuffer *buf = ist->buffer_pool;
  416. while (buf) {
  417. ist->buffer_pool = buf->next;
  418. av_freep(&buf->base[0]);
  419. av_free(buf);
  420. buf = ist->buffer_pool;
  421. }
  422. }
  423. static void unref_buffer(InputStream *ist, FrameBuffer *buf)
  424. {
  425. av_assert0(buf->refcount);
  426. buf->refcount--;
  427. if (!buf->refcount) {
  428. buf->next = ist->buffer_pool;
  429. ist->buffer_pool = buf;
  430. }
  431. }
  432. static int codec_get_buffer(AVCodecContext *s, AVFrame *frame)
  433. {
  434. InputStream *ist = s->opaque;
  435. FrameBuffer *buf;
  436. int ret, i;
  437. if (!ist->buffer_pool && (ret = alloc_buffer(ist, &ist->buffer_pool)) < 0)
  438. return ret;
  439. buf = ist->buffer_pool;
  440. ist->buffer_pool = buf->next;
  441. buf->next = NULL;
  442. if (buf->w != s->width || buf->h != s->height || buf->pix_fmt != s->pix_fmt) {
  443. av_freep(&buf->base[0]);
  444. av_free(buf);
  445. if ((ret = alloc_buffer(ist, &buf)) < 0)
  446. return ret;
  447. }
  448. buf->refcount++;
  449. frame->opaque = buf;
  450. frame->type = FF_BUFFER_TYPE_USER;
  451. frame->extended_data = frame->data;
  452. frame->pkt_pts = s->pkt ? s->pkt->pts : AV_NOPTS_VALUE;
  453. for (i = 0; i < FF_ARRAY_ELEMS(buf->data); i++) {
  454. frame->base[i] = buf->base[i]; // XXX h264.c uses base though it shouldn't
  455. frame->data[i] = buf->data[i];
  456. frame->linesize[i] = buf->linesize[i];
  457. }
  458. return 0;
  459. }
  460. static void codec_release_buffer(AVCodecContext *s, AVFrame *frame)
  461. {
  462. InputStream *ist = s->opaque;
  463. FrameBuffer *buf = frame->opaque;
  464. int i;
  465. for (i = 0; i < FF_ARRAY_ELEMS(frame->data); i++)
  466. frame->data[i] = NULL;
  467. unref_buffer(ist, buf);
  468. }
  469. static void filter_release_buffer(AVFilterBuffer *fb)
  470. {
  471. FrameBuffer *buf = fb->priv;
  472. av_free(fb);
  473. unref_buffer(buf->ist, buf);
  474. }
  475. #if CONFIG_AVFILTER
  476. static int configure_video_filters(InputStream *ist, OutputStream *ost)
  477. {
  478. AVFilterContext *last_filter, *filter;
  479. /** filter graph containing all filters including input & output */
  480. AVCodecContext *codec = ost->st->codec;
  481. AVCodecContext *icodec = ist->st->codec;
  482. SinkContext sink_ctx = { .pix_fmt = codec->pix_fmt };
  483. AVRational sample_aspect_ratio;
  484. char args[255];
  485. int ret;
  486. ost->graph = avfilter_graph_alloc();
  487. if (ist->st->sample_aspect_ratio.num) {
  488. sample_aspect_ratio = ist->st->sample_aspect_ratio;
  489. } else
  490. sample_aspect_ratio = ist->st->codec->sample_aspect_ratio;
  491. snprintf(args, 255, "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
  492. ist->st->codec->height, ist->st->codec->pix_fmt, 1, AV_TIME_BASE,
  493. sample_aspect_ratio.num, sample_aspect_ratio.den);
  494. ret = avfilter_graph_create_filter(&ost->input_video_filter, avfilter_get_by_name("buffer"),
  495. "src", args, NULL, ost->graph);
  496. if (ret < 0)
  497. return ret;
  498. ret = avfilter_graph_create_filter(&ost->output_video_filter, &sink,
  499. "out", NULL, &sink_ctx, ost->graph);
  500. if (ret < 0)
  501. return ret;
  502. last_filter = ost->input_video_filter;
  503. if (codec->width != icodec->width || codec->height != icodec->height) {
  504. snprintf(args, 255, "%d:%d:flags=0x%X",
  505. codec->width,
  506. codec->height,
  507. (unsigned)ost->sws_flags);
  508. if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
  509. NULL, args, NULL, ost->graph)) < 0)
  510. return ret;
  511. if ((ret = avfilter_link(last_filter, 0, filter, 0)) < 0)
  512. return ret;
  513. last_filter = filter;
  514. }
  515. snprintf(args, sizeof(args), "flags=0x%X", (unsigned)ost->sws_flags);
  516. ost->graph->scale_sws_opts = av_strdup(args);
  517. if (ost->avfilter) {
  518. AVFilterInOut *outputs = av_malloc(sizeof(AVFilterInOut));
  519. AVFilterInOut *inputs = av_malloc(sizeof(AVFilterInOut));
  520. outputs->name = av_strdup("in");
  521. outputs->filter_ctx = last_filter;
  522. outputs->pad_idx = 0;
  523. outputs->next = NULL;
  524. inputs->name = av_strdup("out");
  525. inputs->filter_ctx = ost->output_video_filter;
  526. inputs->pad_idx = 0;
  527. inputs->next = NULL;
  528. if ((ret = avfilter_graph_parse(ost->graph, ost->avfilter, inputs, outputs, NULL)) < 0)
  529. return ret;
  530. } else {
  531. if ((ret = avfilter_link(last_filter, 0, ost->output_video_filter, 0)) < 0)
  532. return ret;
  533. }
  534. if ((ret = avfilter_graph_config(ost->graph, NULL)) < 0)
  535. return ret;
  536. codec->width = ost->output_video_filter->inputs[0]->w;
  537. codec->height = ost->output_video_filter->inputs[0]->h;
  538. codec->sample_aspect_ratio = ost->st->sample_aspect_ratio =
  539. ost->frame_aspect_ratio ? // overridden by the -aspect cli option
  540. av_d2q(ost->frame_aspect_ratio * codec->height/codec->width, 255) :
  541. ost->output_video_filter->inputs[0]->sample_aspect_ratio;
  542. return 0;
  543. }
  544. #endif /* CONFIG_AVFILTER */
  545. static void term_exit(void)
  546. {
  547. av_log(NULL, AV_LOG_QUIET, "");
  548. }
  549. static volatile int received_sigterm = 0;
  550. static volatile int received_nb_signals = 0;
  551. static void
  552. sigterm_handler(int sig)
  553. {
  554. received_sigterm = sig;
  555. received_nb_signals++;
  556. term_exit();
  557. }
  558. static void term_init(void)
  559. {
  560. signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
  561. signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
  562. #ifdef SIGXCPU
  563. signal(SIGXCPU, sigterm_handler);
  564. #endif
  565. }
  566. static int decode_interrupt_cb(void *ctx)
  567. {
  568. return received_nb_signals > 1;
  569. }
  570. static const AVIOInterruptCB int_cb = { decode_interrupt_cb, NULL };
  571. void exit_program(int ret)
  572. {
  573. int i;
  574. /* close files */
  575. for (i = 0; i < nb_output_files; i++) {
  576. AVFormatContext *s = output_files[i].ctx;
  577. if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
  578. avio_close(s->pb);
  579. avformat_free_context(s);
  580. av_dict_free(&output_files[i].opts);
  581. }
  582. for (i = 0; i < nb_output_streams; i++) {
  583. AVBitStreamFilterContext *bsfc = output_streams[i].bitstream_filters;
  584. while (bsfc) {
  585. AVBitStreamFilterContext *next = bsfc->next;
  586. av_bitstream_filter_close(bsfc);
  587. bsfc = next;
  588. }
  589. output_streams[i].bitstream_filters = NULL;
  590. if (output_streams[i].output_frame) {
  591. AVFrame *frame = output_streams[i].output_frame;
  592. if (frame->extended_data != frame->data)
  593. av_freep(&frame->extended_data);
  594. av_freep(&frame);
  595. }
  596. #if CONFIG_AVFILTER
  597. av_freep(&output_streams[i].avfilter);
  598. #endif
  599. }
  600. for (i = 0; i < nb_input_files; i++) {
  601. avformat_close_input(&input_files[i].ctx);
  602. }
  603. for (i = 0; i < nb_input_streams; i++) {
  604. av_freep(&input_streams[i].decoded_frame);
  605. av_freep(&input_streams[i].filtered_frame);
  606. av_dict_free(&input_streams[i].opts);
  607. free_buffer_pool(&input_streams[i]);
  608. }
  609. if (vstats_file)
  610. fclose(vstats_file);
  611. av_free(vstats_filename);
  612. av_freep(&input_streams);
  613. av_freep(&input_files);
  614. av_freep(&output_streams);
  615. av_freep(&output_files);
  616. uninit_opts();
  617. av_free(audio_buf);
  618. allocated_audio_buf_size = 0;
  619. #if CONFIG_AVFILTER
  620. avfilter_uninit();
  621. #endif
  622. avformat_network_deinit();
  623. if (received_sigterm) {
  624. av_log(NULL, AV_LOG_INFO, "Received signal %d: terminating.\n",
  625. (int) received_sigterm);
  626. exit (255);
  627. }
  628. exit(ret);
  629. }
  630. static void assert_avoptions(AVDictionary *m)
  631. {
  632. AVDictionaryEntry *t;
  633. if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
  634. av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
  635. exit_program(1);
  636. }
  637. }
  638. static void assert_codec_experimental(AVCodecContext *c, int encoder)
  639. {
  640. const char *codec_string = encoder ? "encoder" : "decoder";
  641. AVCodec *codec;
  642. if (c->codec->capabilities & CODEC_CAP_EXPERIMENTAL &&
  643. c->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  644. av_log(NULL, AV_LOG_FATAL, "%s '%s' is experimental and might produce bad "
  645. "results.\nAdd '-strict experimental' if you want to use it.\n",
  646. codec_string, c->codec->name);
  647. codec = encoder ? avcodec_find_encoder(c->codec->id) : avcodec_find_decoder(c->codec->id);
  648. if (!(codec->capabilities & CODEC_CAP_EXPERIMENTAL))
  649. av_log(NULL, AV_LOG_FATAL, "Or use the non experimental %s '%s'.\n",
  650. codec_string, codec->name);
  651. exit_program(1);
  652. }
  653. }
  654. static void choose_sample_fmt(AVStream *st, AVCodec *codec)
  655. {
  656. if (codec && codec->sample_fmts) {
  657. const enum AVSampleFormat *p = codec->sample_fmts;
  658. for (; *p != -1; p++) {
  659. if (*p == st->codec->sample_fmt)
  660. break;
  661. }
  662. if (*p == -1) {
  663. av_log(NULL, AV_LOG_WARNING,
  664. "Incompatible sample format '%s' for codec '%s', auto-selecting format '%s'\n",
  665. av_get_sample_fmt_name(st->codec->sample_fmt),
  666. codec->name,
  667. av_get_sample_fmt_name(codec->sample_fmts[0]));
  668. st->codec->sample_fmt = codec->sample_fmts[0];
  669. }
  670. }
  671. }
  672. /**
  673. * Update the requested input sample format based on the output sample format.
  674. * This is currently only used to request float output from decoders which
  675. * support multiple sample formats, one of which is AV_SAMPLE_FMT_FLT.
  676. * Ideally this will be removed in the future when decoders do not do format
  677. * conversion and only output in their native format.
  678. */
  679. static void update_sample_fmt(AVCodecContext *dec, AVCodec *dec_codec,
  680. AVCodecContext *enc)
  681. {
  682. /* if sample formats match or a decoder sample format has already been
  683. requested, just return */
  684. if (enc->sample_fmt == dec->sample_fmt ||
  685. dec->request_sample_fmt > AV_SAMPLE_FMT_NONE)
  686. return;
  687. /* if decoder supports more than one output format */
  688. if (dec_codec && dec_codec->sample_fmts &&
  689. dec_codec->sample_fmts[0] != AV_SAMPLE_FMT_NONE &&
  690. dec_codec->sample_fmts[1] != AV_SAMPLE_FMT_NONE) {
  691. const enum AVSampleFormat *p;
  692. int min_dec = -1, min_inc = -1;
  693. /* find a matching sample format in the encoder */
  694. for (p = dec_codec->sample_fmts; *p != AV_SAMPLE_FMT_NONE; p++) {
  695. if (*p == enc->sample_fmt) {
  696. dec->request_sample_fmt = *p;
  697. return;
  698. } else if (*p > enc->sample_fmt) {
  699. min_inc = FFMIN(min_inc, *p - enc->sample_fmt);
  700. } else
  701. min_dec = FFMIN(min_dec, enc->sample_fmt - *p);
  702. }
  703. /* if none match, provide the one that matches quality closest */
  704. dec->request_sample_fmt = min_inc > 0 ? enc->sample_fmt + min_inc :
  705. enc->sample_fmt - min_dec;
  706. }
  707. }
  708. static void choose_sample_rate(AVStream *st, AVCodec *codec)
  709. {
  710. if (codec && codec->supported_samplerates) {
  711. const int *p = codec->supported_samplerates;
  712. int best = 0;
  713. int best_dist = INT_MAX;
  714. for (; *p; p++) {
  715. int dist = abs(st->codec->sample_rate - *p);
  716. if (dist < best_dist) {
  717. best_dist = dist;
  718. best = *p;
  719. }
  720. }
  721. if (best_dist) {
  722. av_log(st->codec, AV_LOG_WARNING, "Requested sampling rate unsupported using closest supported (%d)\n", best);
  723. }
  724. st->codec->sample_rate = best;
  725. }
  726. }
  727. static void choose_pixel_fmt(AVStream *st, AVCodec *codec)
  728. {
  729. if (codec && codec->pix_fmts) {
  730. const enum PixelFormat *p = codec->pix_fmts;
  731. if (st->codec->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL) {
  732. if (st->codec->codec_id == CODEC_ID_MJPEG) {
  733. p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE };
  734. } else if (st->codec->codec_id == CODEC_ID_LJPEG) {
  735. p = (const enum PixelFormat[]) { PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ444P, PIX_FMT_YUV420P,
  736. PIX_FMT_YUV422P, PIX_FMT_YUV444P, PIX_FMT_BGRA, PIX_FMT_NONE };
  737. }
  738. }
  739. for (; *p != PIX_FMT_NONE; p++) {
  740. if (*p == st->codec->pix_fmt)
  741. break;
  742. }
  743. if (*p == PIX_FMT_NONE) {
  744. if (st->codec->pix_fmt != PIX_FMT_NONE)
  745. av_log(NULL, AV_LOG_WARNING,
  746. "Incompatible pixel format '%s' for codec '%s', auto-selecting format '%s'\n",
  747. av_pix_fmt_descriptors[st->codec->pix_fmt].name,
  748. codec->name,
  749. av_pix_fmt_descriptors[codec->pix_fmts[0]].name);
  750. st->codec->pix_fmt = codec->pix_fmts[0];
  751. }
  752. }
  753. }
  754. static double
  755. get_sync_ipts(const OutputStream *ost, int64_t pts)
  756. {
  757. OutputFile *of = &output_files[ost->file_index];
  758. return (double)(pts - of->start_time) / AV_TIME_BASE;
  759. }
  760. static void write_frame(AVFormatContext *s, AVPacket *pkt, OutputStream *ost)
  761. {
  762. AVBitStreamFilterContext *bsfc = ost->bitstream_filters;
  763. AVCodecContext *avctx = ost->st->codec;
  764. int ret;
  765. /*
  766. * Audio encoders may split the packets -- #frames in != #packets out.
  767. * But there is no reordering, so we can limit the number of output packets
  768. * by simply dropping them here.
  769. * Counting encoded video frames needs to be done separately because of
  770. * reordering, see do_video_out()
  771. */
  772. if (!(avctx->codec_type == AVMEDIA_TYPE_VIDEO && avctx->codec)) {
  773. if (ost->frame_number >= ost->max_frames)
  774. return;
  775. ost->frame_number++;
  776. }
  777. while (bsfc) {
  778. AVPacket new_pkt = *pkt;
  779. int a = av_bitstream_filter_filter(bsfc, avctx, NULL,
  780. &new_pkt.data, &new_pkt.size,
  781. pkt->data, pkt->size,
  782. pkt->flags & AV_PKT_FLAG_KEY);
  783. if (a > 0) {
  784. av_free_packet(pkt);
  785. new_pkt.destruct = av_destruct_packet;
  786. } else if (a < 0) {
  787. av_log(NULL, AV_LOG_ERROR, "%s failed for stream %d, codec %s",
  788. bsfc->filter->name, pkt->stream_index,
  789. avctx->codec ? avctx->codec->name : "copy");
  790. print_error("", a);
  791. if (exit_on_error)
  792. exit_program(1);
  793. }
  794. *pkt = new_pkt;
  795. bsfc = bsfc->next;
  796. }
  797. pkt->stream_index = ost->index;
  798. ret = av_interleaved_write_frame(s, pkt);
  799. if (ret < 0) {
  800. print_error("av_interleaved_write_frame()", ret);
  801. exit_program(1);
  802. }
  803. }
  804. static int check_recording_time(OutputStream *ost)
  805. {
  806. OutputFile *of = &output_files[ost->file_index];
  807. if (of->recording_time != INT64_MAX &&
  808. av_compare_ts(ost->sync_opts - ost->first_pts, ost->st->codec->time_base, of->recording_time,
  809. AV_TIME_BASE_Q) >= 0) {
  810. ost->is_past_recording_time = 1;
  811. return 0;
  812. }
  813. return 1;
  814. }
  815. static void generate_silence(uint8_t* buf, enum AVSampleFormat sample_fmt, size_t size)
  816. {
  817. int fill_char = 0x00;
  818. if (sample_fmt == AV_SAMPLE_FMT_U8)
  819. fill_char = 0x80;
  820. memset(buf, fill_char, size);
  821. }
  822. static int encode_audio_frame(AVFormatContext *s, OutputStream *ost,
  823. const uint8_t *buf, int buf_size)
  824. {
  825. AVCodecContext *enc = ost->st->codec;
  826. AVFrame *frame = NULL;
  827. AVPacket pkt;
  828. int ret, got_packet;
  829. av_init_packet(&pkt);
  830. pkt.data = NULL;
  831. pkt.size = 0;
  832. if (buf) {
  833. if (!ost->output_frame) {
  834. ost->output_frame = avcodec_alloc_frame();
  835. if (!ost->output_frame) {
  836. av_log(NULL, AV_LOG_FATAL, "out-of-memory in encode_audio_frame()\n");
  837. exit_program(1);
  838. }
  839. }
  840. frame = ost->output_frame;
  841. if (frame->extended_data != frame->data)
  842. av_freep(&frame->extended_data);
  843. avcodec_get_frame_defaults(frame);
  844. frame->nb_samples = buf_size /
  845. (enc->channels * av_get_bytes_per_sample(enc->sample_fmt));
  846. if ((ret = avcodec_fill_audio_frame(frame, enc->channels, enc->sample_fmt,
  847. buf, buf_size, 1)) < 0) {
  848. av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
  849. exit_program(1);
  850. }
  851. if (!check_recording_time(ost))
  852. return 0;
  853. ost->sync_opts += frame->nb_samples;
  854. }
  855. got_packet = 0;
  856. if (avcodec_encode_audio2(enc, &pkt, frame, &got_packet) < 0) {
  857. av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
  858. exit_program(1);
  859. }
  860. if (got_packet) {
  861. if (pkt.pts != AV_NOPTS_VALUE)
  862. pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
  863. if (pkt.dts != AV_NOPTS_VALUE)
  864. pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
  865. if (pkt.duration > 0)
  866. pkt.duration = av_rescale_q(pkt.duration, enc->time_base, ost->st->time_base);
  867. write_frame(s, &pkt, ost);
  868. audio_size += pkt.size;
  869. }
  870. return pkt.size;
  871. }
  872. static void do_audio_out(AVFormatContext *s, OutputStream *ost,
  873. InputStream *ist, AVFrame *decoded_frame)
  874. {
  875. uint8_t *buftmp;
  876. int64_t audio_buf_size;
  877. int size_out, frame_bytes, resample_changed;
  878. AVCodecContext *enc = ost->st->codec;
  879. AVCodecContext *dec = ist->st->codec;
  880. int osize = av_get_bytes_per_sample(enc->sample_fmt);
  881. int isize = av_get_bytes_per_sample(dec->sample_fmt);
  882. uint8_t *buf = decoded_frame->data[0];
  883. int size = decoded_frame->nb_samples * dec->channels * isize;
  884. int64_t allocated_for_size = size;
  885. need_realloc:
  886. audio_buf_size = (allocated_for_size + isize * dec->channels - 1) / (isize * dec->channels);
  887. audio_buf_size = (audio_buf_size * enc->sample_rate + dec->sample_rate) / dec->sample_rate;
  888. audio_buf_size = audio_buf_size * 2 + 10000; // safety factors for the deprecated resampling API
  889. audio_buf_size = FFMAX(audio_buf_size, enc->frame_size);
  890. audio_buf_size *= osize * enc->channels;
  891. if (audio_buf_size > INT_MAX) {
  892. av_log(NULL, AV_LOG_FATAL, "Buffer sizes too large\n");
  893. exit_program(1);
  894. }
  895. av_fast_malloc(&audio_buf, &allocated_audio_buf_size, audio_buf_size);
  896. if (!audio_buf) {
  897. av_log(NULL, AV_LOG_FATAL, "Out of memory in do_audio_out\n");
  898. exit_program(1);
  899. }
  900. if (enc->channels != dec->channels || enc->sample_rate != dec->sample_rate)
  901. ost->audio_resample = 1;
  902. resample_changed = ost->resample_sample_fmt != dec->sample_fmt ||
  903. ost->resample_channels != dec->channels ||
  904. ost->resample_sample_rate != dec->sample_rate;
  905. if ((ost->audio_resample && !ost->resample) || resample_changed) {
  906. if (resample_changed) {
  907. av_log(NULL, AV_LOG_INFO, "Input stream #%d:%d frame changed from rate:%d fmt:%s ch:%d to rate:%d fmt:%s ch:%d\n",
  908. ist->file_index, ist->st->index,
  909. ost->resample_sample_rate, av_get_sample_fmt_name(ost->resample_sample_fmt), ost->resample_channels,
  910. dec->sample_rate, av_get_sample_fmt_name(dec->sample_fmt), dec->channels);
  911. ost->resample_sample_fmt = dec->sample_fmt;
  912. ost->resample_channels = dec->channels;
  913. ost->resample_sample_rate = dec->sample_rate;
  914. if (ost->resample)
  915. audio_resample_close(ost->resample);
  916. }
  917. /* if audio_sync_method is >1 the resampler is needed for audio drift compensation */
  918. if (audio_sync_method <= 1 &&
  919. ost->resample_sample_fmt == enc->sample_fmt &&
  920. ost->resample_channels == enc->channels &&
  921. ost->resample_sample_rate == enc->sample_rate) {
  922. ost->resample = NULL;
  923. ost->audio_resample = 0;
  924. } else if (ost->audio_resample) {
  925. if (dec->sample_fmt != AV_SAMPLE_FMT_S16)
  926. av_log(NULL, AV_LOG_WARNING, "Using s16 intermediate sample format for resampling\n");
  927. ost->resample = av_audio_resample_init(enc->channels, dec->channels,
  928. enc->sample_rate, dec->sample_rate,
  929. enc->sample_fmt, dec->sample_fmt,
  930. 16, 10, 0, 0.8);
  931. if (!ost->resample) {
  932. av_log(NULL, AV_LOG_FATAL, "Can not resample %d channels @ %d Hz to %d channels @ %d Hz\n",
  933. dec->channels, dec->sample_rate,
  934. enc->channels, enc->sample_rate);
  935. exit_program(1);
  936. }
  937. }
  938. }
  939. #define MAKE_SFMT_PAIR(a,b) ((a)+AV_SAMPLE_FMT_NB*(b))
  940. if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt &&
  941. MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt) != ost->reformat_pair) {
  942. if (ost->reformat_ctx)
  943. av_audio_convert_free(ost->reformat_ctx);
  944. ost->reformat_ctx = av_audio_convert_alloc(enc->sample_fmt, 1,
  945. dec->sample_fmt, 1, NULL, 0);
  946. if (!ost->reformat_ctx) {
  947. av_log(NULL, AV_LOG_FATAL, "Cannot convert %s sample format to %s sample format\n",
  948. av_get_sample_fmt_name(dec->sample_fmt),
  949. av_get_sample_fmt_name(enc->sample_fmt));
  950. exit_program(1);
  951. }
  952. ost->reformat_pair = MAKE_SFMT_PAIR(enc->sample_fmt,dec->sample_fmt);
  953. }
  954. if (audio_sync_method) {
  955. double delta = get_sync_ipts(ost, ist->last_dts) * enc->sample_rate - ost->sync_opts -
  956. av_fifo_size(ost->fifo) / (enc->channels * osize);
  957. int idelta = delta * dec->sample_rate / enc->sample_rate;
  958. int byte_delta = idelta * isize * dec->channels;
  959. // FIXME resample delay
  960. if (fabs(delta) > 50) {
  961. if (ist->is_start || fabs(delta) > audio_drift_threshold*enc->sample_rate) {
  962. if (byte_delta < 0) {
  963. byte_delta = FFMAX(byte_delta, -size);
  964. size += byte_delta;
  965. buf -= byte_delta;
  966. av_log(NULL, AV_LOG_VERBOSE, "discarding %d audio samples\n",
  967. -byte_delta / (isize * dec->channels));
  968. if (!size)
  969. return;
  970. ist->is_start = 0;
  971. } else {
  972. static uint8_t *input_tmp = NULL;
  973. input_tmp = av_realloc(input_tmp, byte_delta + size);
  974. if (byte_delta > allocated_for_size - size) {
  975. allocated_for_size = byte_delta + (int64_t)size;
  976. goto need_realloc;
  977. }
  978. ist->is_start = 0;
  979. generate_silence(input_tmp, dec->sample_fmt, byte_delta);
  980. memcpy(input_tmp + byte_delta, buf, size);
  981. buf = input_tmp;
  982. size += byte_delta;
  983. av_log(NULL, AV_LOG_VERBOSE, "adding %d audio samples of silence\n", idelta);
  984. }
  985. } else if (audio_sync_method > 1) {
  986. int comp = av_clip(delta, -audio_sync_method, audio_sync_method);
  987. av_assert0(ost->audio_resample);
  988. av_log(NULL, AV_LOG_VERBOSE, "compensating audio timestamp drift:%f compensation:%d in:%d\n",
  989. delta, comp, enc->sample_rate);
  990. // fprintf(stderr, "drift:%f len:%d opts:%"PRId64" ipts:%"PRId64" fifo:%d\n", delta, -1, ost->sync_opts, (int64_t)(get_sync_ipts(ost) * enc->sample_rate), av_fifo_size(ost->fifo)/(ost->st->codec->channels * 2));
  991. av_resample_compensate(*(struct AVResampleContext**)ost->resample, comp, enc->sample_rate);
  992. }
  993. }
  994. } else
  995. ost->sync_opts = lrintf(get_sync_ipts(ost, ist->last_dts) * enc->sample_rate) -
  996. av_fifo_size(ost->fifo) / (enc->channels * osize); // FIXME wrong
  997. if (ost->audio_resample) {
  998. buftmp = audio_buf;
  999. size_out = audio_resample(ost->resample,
  1000. (short *)buftmp, (short *)buf,
  1001. size / (dec->channels * isize));
  1002. size_out = size_out * enc->channels * osize;
  1003. } else {
  1004. buftmp = buf;
  1005. size_out = size;
  1006. }
  1007. if (!ost->audio_resample && dec->sample_fmt != enc->sample_fmt) {
  1008. const void *ibuf[6] = { buftmp };
  1009. void *obuf[6] = { audio_buf };
  1010. int istride[6] = { isize };
  1011. int ostride[6] = { osize };
  1012. int len = size_out / istride[0];
  1013. if (av_audio_convert(ost->reformat_ctx, obuf, ostride, ibuf, istride, len) < 0) {
  1014. printf("av_audio_convert() failed\n");
  1015. if (exit_on_error)
  1016. exit_program(1);
  1017. return;
  1018. }
  1019. buftmp = audio_buf;
  1020. size_out = len * osize;
  1021. }
  1022. /* now encode as many frames as possible */
  1023. if (!(enc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) {
  1024. /* output resampled raw samples */
  1025. if (av_fifo_realloc2(ost->fifo, av_fifo_size(ost->fifo) + size_out) < 0) {
  1026. av_log(NULL, AV_LOG_FATAL, "av_fifo_realloc2() failed\n");
  1027. exit_program(1);
  1028. }
  1029. av_fifo_generic_write(ost->fifo, buftmp, size_out, NULL);
  1030. frame_bytes = enc->frame_size * osize * enc->channels;
  1031. while (av_fifo_size(ost->fifo) >= frame_bytes) {
  1032. av_fifo_generic_read(ost->fifo, audio_buf, frame_bytes, NULL);
  1033. encode_audio_frame(s, ost, audio_buf, frame_bytes);
  1034. }
  1035. } else {
  1036. encode_audio_frame(s, ost, buftmp, size_out);
  1037. }
  1038. }
  1039. static void pre_process_video_frame(InputStream *ist, AVPicture *picture, void **bufp)
  1040. {
  1041. AVCodecContext *dec;
  1042. AVPicture *picture2;
  1043. AVPicture picture_tmp;
  1044. uint8_t *buf = 0;
  1045. dec = ist->st->codec;
  1046. /* deinterlace : must be done before any resize */
  1047. if (do_deinterlace) {
  1048. int size;
  1049. /* create temporary picture */
  1050. size = avpicture_get_size(dec->pix_fmt, dec->width, dec->height);
  1051. buf = av_malloc(size);
  1052. if (!buf)
  1053. return;
  1054. picture2 = &picture_tmp;
  1055. avpicture_fill(picture2, buf, dec->pix_fmt, dec->width, dec->height);
  1056. if (avpicture_deinterlace(picture2, picture,
  1057. dec->pix_fmt, dec->width, dec->height) < 0) {
  1058. /* if error, do not deinterlace */
  1059. av_log(NULL, AV_LOG_WARNING, "Deinterlacing failed\n");
  1060. av_free(buf);
  1061. buf = NULL;
  1062. picture2 = picture;
  1063. }
  1064. } else {
  1065. picture2 = picture;
  1066. }
  1067. if (picture != picture2)
  1068. *picture = *picture2;
  1069. *bufp = buf;
  1070. }
  1071. static void do_subtitle_out(AVFormatContext *s,
  1072. OutputStream *ost,
  1073. InputStream *ist,
  1074. AVSubtitle *sub,
  1075. int64_t pts)
  1076. {
  1077. static uint8_t *subtitle_out = NULL;
  1078. int subtitle_out_max_size = 1024 * 1024;
  1079. int subtitle_out_size, nb, i;
  1080. AVCodecContext *enc;
  1081. AVPacket pkt;
  1082. if (pts == AV_NOPTS_VALUE) {
  1083. av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
  1084. if (exit_on_error)
  1085. exit_program(1);
  1086. return;
  1087. }
  1088. enc = ost->st->codec;
  1089. if (!subtitle_out) {
  1090. subtitle_out = av_malloc(subtitle_out_max_size);
  1091. }
  1092. /* Note: DVB subtitle need one packet to draw them and one other
  1093. packet to clear them */
  1094. /* XXX: signal it in the codec context ? */
  1095. if (enc->codec_id == CODEC_ID_DVB_SUBTITLE)
  1096. nb = 2;
  1097. else
  1098. nb = 1;
  1099. for (i = 0; i < nb; i++) {
  1100. ost->sync_opts = av_rescale_q(pts, ist->st->time_base, enc->time_base);
  1101. if (!check_recording_time(ost))
  1102. return;
  1103. sub->pts = av_rescale_q(pts, ist->st->time_base, AV_TIME_BASE_Q);
  1104. // start_display_time is required to be 0
  1105. sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
  1106. sub->end_display_time -= sub->start_display_time;
  1107. sub->start_display_time = 0;
  1108. subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
  1109. subtitle_out_max_size, sub);
  1110. if (subtitle_out_size < 0) {
  1111. av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
  1112. exit_program(1);
  1113. }
  1114. av_init_packet(&pkt);
  1115. pkt.data = subtitle_out;
  1116. pkt.size = subtitle_out_size;
  1117. pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->st->time_base);
  1118. if (enc->codec_id == CODEC_ID_DVB_SUBTITLE) {
  1119. /* XXX: the pts correction is handled here. Maybe handling
  1120. it in the codec would be better */
  1121. if (i == 0)
  1122. pkt.pts += 90 * sub->start_display_time;
  1123. else
  1124. pkt.pts += 90 * sub->end_display_time;
  1125. }
  1126. write_frame(s, &pkt, ost);
  1127. }
  1128. }
  1129. #if !CONFIG_AVFILTER
  1130. static void do_video_resample(OutputStream *ost,
  1131. InputStream *ist,
  1132. AVFrame *in_picture,
  1133. AVFrame **out_picture)
  1134. {
  1135. int resample_changed = 0;
  1136. *out_picture = in_picture;
  1137. resample_changed = ost->resample_width != in_picture->width ||
  1138. ost->resample_height != in_picture->height ||
  1139. ost->resample_pix_fmt != in_picture->format;
  1140. if (resample_changed) {
  1141. av_log(NULL, AV_LOG_INFO,
  1142. "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
  1143. ist->file_index, ist->st->index,
  1144. ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
  1145. in_picture->width, in_picture->height, av_get_pix_fmt_name(in_picture->format));
  1146. if (!ost->video_resample)
  1147. ost->video_resample = 1;
  1148. }
  1149. if (ost->video_resample) {
  1150. *out_picture = &ost->pict_tmp;
  1151. if (resample_changed) {
  1152. /* initialize a new scaler context */
  1153. sws_freeContext(ost->img_resample_ctx);
  1154. ost->img_resample_ctx = sws_getContext(
  1155. ist->st->codec->width,
  1156. ist->st->codec->height,
  1157. ist->st->codec->pix_fmt,
  1158. ost->st->codec->width,
  1159. ost->st->codec->height,
  1160. ost->st->codec->pix_fmt,
  1161. ost->sws_flags, NULL, NULL, NULL);
  1162. if (ost->img_resample_ctx == NULL) {
  1163. av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n");
  1164. exit_program(1);
  1165. }
  1166. }
  1167. sws_scale(ost->img_resample_ctx, in_picture->data, in_picture->linesize,
  1168. 0, ost->resample_height, (*out_picture)->data, (*out_picture)->linesize);
  1169. }
  1170. if (resample_changed) {
  1171. ost->resample_width = in_picture->width;
  1172. ost->resample_height = in_picture->height;
  1173. ost->resample_pix_fmt = in_picture->format;
  1174. }
  1175. }
  1176. #endif
  1177. static void do_video_out(AVFormatContext *s,
  1178. OutputStream *ost,
  1179. InputStream *ist,
  1180. AVFrame *in_picture,
  1181. int *frame_size, float quality)
  1182. {
  1183. int nb_frames, i, ret, format_video_sync;
  1184. AVFrame *final_picture;
  1185. AVCodecContext *enc;
  1186. double sync_ipts;
  1187. enc = ost->st->codec;
  1188. sync_ipts = get_sync_ipts(ost, in_picture->pts) / av_q2d(enc->time_base);
  1189. /* by default, we output a single frame */
  1190. nb_frames = 1;
  1191. *frame_size = 0;
  1192. format_video_sync = video_sync_method;
  1193. if (format_video_sync == VSYNC_AUTO)
  1194. format_video_sync = (s->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH :
  1195. (s->oformat->flags & AVFMT_VARIABLE_FPS) ? VSYNC_VFR : VSYNC_CFR;
  1196. if (format_video_sync != VSYNC_PASSTHROUGH) {
  1197. double vdelta = sync_ipts - ost->sync_opts;
  1198. // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
  1199. if (vdelta < -1.1)
  1200. nb_frames = 0;
  1201. else if (format_video_sync == VSYNC_VFR) {
  1202. if (vdelta <= -0.6) {
  1203. nb_frames = 0;
  1204. } else if (vdelta > 0.6)
  1205. ost->sync_opts = lrintf(sync_ipts);
  1206. } else if (vdelta > 1.1)
  1207. nb_frames = lrintf(vdelta);
  1208. if (nb_frames == 0) {
  1209. ++nb_frames_drop;
  1210. av_log(NULL, AV_LOG_VERBOSE, "*** drop!\n");
  1211. } else if (nb_frames > 1) {
  1212. nb_frames_dup += nb_frames - 1;
  1213. av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
  1214. }
  1215. } else
  1216. ost->sync_opts = lrintf(sync_ipts);
  1217. nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
  1218. if (nb_frames <= 0)
  1219. return;
  1220. #if !CONFIG_AVFILTER
  1221. do_video_resample(ost, ist, in_picture, &final_picture);
  1222. #else
  1223. final_picture = in_picture;
  1224. #endif
  1225. if (!ost->frame_number)
  1226. ost->first_pts = ost->sync_opts;
  1227. /* duplicates frame if needed */
  1228. for (i = 0; i < nb_frames; i++) {
  1229. AVPacket pkt;
  1230. av_init_packet(&pkt);
  1231. pkt.data = NULL;
  1232. pkt.size = 0;
  1233. if (!check_recording_time(ost))
  1234. return;
  1235. if (s->oformat->flags & AVFMT_RAWPICTURE &&
  1236. enc->codec->id == CODEC_ID_RAWVIDEO) {
  1237. /* raw pictures are written as AVPicture structure to
  1238. avoid any copies. We support temporarily the older
  1239. method. */
  1240. enc->coded_frame->interlaced_frame = in_picture->interlaced_frame;
  1241. enc->coded_frame->top_field_first = in_picture->top_field_first;
  1242. pkt.data = (uint8_t *)final_picture;
  1243. pkt.size = sizeof(AVPicture);
  1244. pkt.pts = av_rescale_q(ost->sync_opts, enc->time_base, ost->st->time_base);
  1245. pkt.flags |= AV_PKT_FLAG_KEY;
  1246. write_frame(s, &pkt, ost);
  1247. } else {
  1248. int got_packet;
  1249. AVFrame big_picture;
  1250. big_picture = *final_picture;
  1251. /* better than nothing: use input picture interlaced
  1252. settings */
  1253. big_picture.interlaced_frame = in_picture->interlaced_frame;
  1254. if (ost->st->codec->flags & (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME)) {
  1255. if (ost->top_field_first == -1)
  1256. big_picture.top_field_first = in_picture->top_field_first;
  1257. else
  1258. big_picture.top_field_first = !!ost->top_field_first;
  1259. }
  1260. /* handles same_quant here. This is not correct because it may
  1261. not be a global option */
  1262. big_picture.quality = quality;
  1263. if (!enc->me_threshold)
  1264. big_picture.pict_type = 0;
  1265. big_picture.pts = ost->sync_opts;
  1266. if (ost->forced_kf_index < ost->forced_kf_count &&
  1267. big_picture.pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
  1268. big_picture.pict_type = AV_PICTURE_TYPE_I;
  1269. ost->forced_kf_index++;
  1270. }
  1271. ret = avcodec_encode_video2(enc, &pkt, &big_picture, &got_packet);
  1272. if (ret < 0) {
  1273. av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
  1274. exit_program(1);
  1275. }
  1276. if (got_packet) {
  1277. if (pkt.pts != AV_NOPTS_VALUE)
  1278. pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
  1279. if (pkt.dts != AV_NOPTS_VALUE)
  1280. pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
  1281. write_frame(s, &pkt, ost);
  1282. *frame_size = ret;
  1283. video_size += ret;
  1284. /* if two pass, output log */
  1285. if (ost->logfile && enc->stats_out) {
  1286. fprintf(ost->logfile, "%s", enc->stats_out);
  1287. }
  1288. }
  1289. }
  1290. ost->sync_opts++;
  1291. /*
  1292. * For video, number of frames in == number of packets out.
  1293. * But there may be reordering, so we can't throw away frames on encoder
  1294. * flush, we need to limit them here, before they go into encoder.
  1295. */
  1296. ost->frame_number++;
  1297. }
  1298. }
  1299. static double psnr(double d)
  1300. {
  1301. return -10.0 * log(d) / log(10.0);
  1302. }
  1303. static void do_video_stats(AVFormatContext *os, OutputStream *ost,
  1304. int frame_size)
  1305. {
  1306. AVCodecContext *enc;
  1307. int frame_number;
  1308. double ti1, bitrate, avg_bitrate;
  1309. /* this is executed just the first time do_video_stats is called */
  1310. if (!vstats_file) {
  1311. vstats_file = fopen(vstats_filename, "w");
  1312. if (!vstats_file) {
  1313. perror("fopen");
  1314. exit_program(1);
  1315. }
  1316. }
  1317. enc = ost->st->codec;
  1318. if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
  1319. frame_number = ost->frame_number;
  1320. fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number, enc->coded_frame->quality / (float)FF_QP2LAMBDA);
  1321. if (enc->flags&CODEC_FLAG_PSNR)
  1322. fprintf(vstats_file, "PSNR= %6.2f ", psnr(enc->coded_frame->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
  1323. fprintf(vstats_file,"f_size= %6d ", frame_size);
  1324. /* compute pts value */
  1325. ti1 = ost->sync_opts * av_q2d(enc->time_base);
  1326. if (ti1 < 0.01)
  1327. ti1 = 0.01;
  1328. bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
  1329. avg_bitrate = (double)(video_size * 8) / ti1 / 1000.0;
  1330. fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
  1331. (double)video_size / 1024, ti1, bitrate, avg_bitrate);
  1332. fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(enc->coded_frame->pict_type));
  1333. }
  1334. }
  1335. static void print_report(OutputFile *output_files,
  1336. OutputStream *ost_table, int nb_ostreams,
  1337. int is_last_report, int64_t timer_start)
  1338. {
  1339. char buf[1024];
  1340. OutputStream *ost;
  1341. AVFormatContext *oc;
  1342. int64_t total_size;
  1343. AVCodecContext *enc;
  1344. int frame_number, vid, i;
  1345. double bitrate, ti1, pts;
  1346. static int64_t last_time = -1;
  1347. static int qp_histogram[52];
  1348. if (!print_stats && !is_last_report)
  1349. return;
  1350. if (!is_last_report) {
  1351. int64_t cur_time;
  1352. /* display the report every 0.5 seconds */
  1353. cur_time = av_gettime();
  1354. if (last_time == -1) {
  1355. last_time = cur_time;
  1356. return;
  1357. }
  1358. if ((cur_time - last_time) < 500000)
  1359. return;
  1360. last_time = cur_time;
  1361. }
  1362. oc = output_files[0].ctx;
  1363. total_size = avio_size(oc->pb);
  1364. if (total_size < 0) // FIXME improve avio_size() so it works with non seekable output too
  1365. total_size = avio_tell(oc->pb);
  1366. buf[0] = '\0';
  1367. ti1 = 1e10;
  1368. vid = 0;
  1369. for (i = 0; i < nb_ostreams; i++) {
  1370. float q = -1;
  1371. ost = &ost_table[i];
  1372. enc = ost->st->codec;
  1373. if (!ost->stream_copy && enc->coded_frame)
  1374. q = enc->coded_frame->quality / (float)FF_QP2LAMBDA;
  1375. if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
  1376. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "q=%2.1f ", q);
  1377. }
  1378. if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
  1379. float t = (av_gettime() - timer_start) / 1000000.0;
  1380. frame_number = ost->frame_number;
  1381. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "frame=%5d fps=%3d q=%3.1f ",
  1382. frame_number, (t > 1) ? (int)(frame_number / t + 0.5) : 0, q);
  1383. if (is_last_report)
  1384. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "L");
  1385. if (qp_hist) {
  1386. int j;
  1387. int qp = lrintf(q);
  1388. if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
  1389. qp_histogram[qp]++;
  1390. for (j = 0; j < 32; j++)
  1391. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%X", (int)lrintf(log(qp_histogram[j] + 1) / log(2)));
  1392. }
  1393. if (enc->flags&CODEC_FLAG_PSNR) {
  1394. int j;
  1395. double error, error_sum = 0;
  1396. double scale, scale_sum = 0;
  1397. char type[3] = { 'Y','U','V' };
  1398. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "PSNR=");
  1399. for (j = 0; j < 3; j++) {
  1400. if (is_last_report) {
  1401. error = enc->error[j];
  1402. scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
  1403. } else {
  1404. error = enc->coded_frame->error[j];
  1405. scale = enc->width * enc->height * 255.0 * 255.0;
  1406. }
  1407. if (j)
  1408. scale /= 4;
  1409. error_sum += error;
  1410. scale_sum += scale;
  1411. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "%c:%2.2f ", type[j], psnr(error / scale));
  1412. }
  1413. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), "*:%2.2f ", psnr(error_sum / scale_sum));
  1414. }
  1415. vid = 1;
  1416. }
  1417. /* compute min output value */
  1418. pts = (double)ost->st->pts.val * av_q2d(ost->st->time_base);
  1419. if ((pts < ti1) && (pts > 0))
  1420. ti1 = pts;
  1421. }
  1422. if (ti1 < 0.01)
  1423. ti1 = 0.01;
  1424. bitrate = (double)(total_size * 8) / ti1 / 1000.0;
  1425. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
  1426. "size=%8.0fkB time=%0.2f bitrate=%6.1fkbits/s",
  1427. (double)total_size / 1024, ti1, bitrate);
  1428. if (nb_frames_dup || nb_frames_drop)
  1429. snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), " dup=%d drop=%d",
  1430. nb_frames_dup, nb_frames_drop);
  1431. av_log(NULL, AV_LOG_INFO, "%s \r", buf);
  1432. fflush(stderr);
  1433. if (is_last_report) {
  1434. int64_t raw= audio_size + video_size + extra_size;
  1435. av_log(NULL, AV_LOG_INFO, "\n");
  1436. av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB global headers:%1.0fkB muxing overhead %f%%\n",
  1437. video_size / 1024.0,
  1438. audio_size / 1024.0,
  1439. extra_size / 1024.0,
  1440. 100.0 * (total_size - raw) / raw
  1441. );
  1442. }
  1443. }
  1444. static void flush_encoders(OutputStream *ost_table, int nb_ostreams)
  1445. {
  1446. int i, ret;
  1447. for (i = 0; i < nb_ostreams; i++) {
  1448. OutputStream *ost = &ost_table[i];
  1449. AVCodecContext *enc = ost->st->codec;
  1450. AVFormatContext *os = output_files[ost->file_index].ctx;
  1451. int stop_encoding = 0;
  1452. if (!ost->encoding_needed)
  1453. continue;
  1454. if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
  1455. continue;
  1456. if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO && (os->oformat->flags & AVFMT_RAWPICTURE) && enc->codec->id == CODEC_ID_RAWVIDEO)
  1457. continue;
  1458. for (;;) {
  1459. AVPacket pkt;
  1460. int fifo_bytes, got_packet;
  1461. av_init_packet(&pkt);
  1462. pkt.data = NULL;
  1463. pkt.size = 0;
  1464. switch (ost->st->codec->codec_type) {
  1465. case AVMEDIA_TYPE_AUDIO:
  1466. fifo_bytes = av_fifo_size(ost->fifo);
  1467. if (fifo_bytes > 0) {
  1468. /* encode any samples remaining in fifo */
  1469. int frame_bytes = fifo_bytes;
  1470. av_fifo_generic_read(ost->fifo, audio_buf, fifo_bytes, NULL);
  1471. /* pad last frame with silence if needed */
  1472. if (!(enc->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME)) {
  1473. frame_bytes = enc->frame_size * enc->channels *
  1474. av_get_bytes_per_sample(enc->sample_fmt);
  1475. if (allocated_audio_buf_size < frame_bytes)
  1476. exit_program(1);
  1477. generate_silence(audio_buf+fifo_bytes, enc->sample_fmt, frame_bytes - fifo_bytes);
  1478. }
  1479. encode_audio_frame(os, ost, audio_buf, frame_bytes);
  1480. } else {
  1481. /* flush encoder with NULL frames until it is done
  1482. returning packets */
  1483. if (encode_audio_frame(os, ost, NULL, 0) == 0) {
  1484. stop_encoding = 1;
  1485. break;
  1486. }
  1487. }
  1488. break;
  1489. case AVMEDIA_TYPE_VIDEO:
  1490. ret = avcodec_encode_video2(enc, &pkt, NULL, &got_packet);
  1491. if (ret < 0) {
  1492. av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
  1493. exit_program(1);
  1494. }
  1495. video_size += ret;
  1496. if (ost->logfile && enc->stats_out) {
  1497. fprintf(ost->logfile, "%s", enc->stats_out);
  1498. }
  1499. if (!got_packet) {
  1500. stop_encoding = 1;
  1501. break;
  1502. }
  1503. if (pkt.pts != AV_NOPTS_VALUE)
  1504. pkt.pts = av_rescale_q(pkt.pts, enc->time_base, ost->st->time_base);
  1505. if (pkt.dts != AV_NOPTS_VALUE)
  1506. pkt.dts = av_rescale_q(pkt.dts, enc->time_base, ost->st->time_base);
  1507. write_frame(os, &pkt, ost);
  1508. break;
  1509. default:
  1510. stop_encoding = 1;
  1511. }
  1512. if (stop_encoding)
  1513. break;
  1514. }
  1515. }
  1516. }
  1517. /*
  1518. * Check whether a packet from ist should be written into ost at this time
  1519. */
  1520. static int check_output_constraints(InputStream *ist, OutputStream *ost)
  1521. {
  1522. OutputFile *of = &output_files[ost->file_index];
  1523. int ist_index = ist - input_streams;
  1524. if (ost->source_index != ist_index)
  1525. return 0;
  1526. if (of->start_time && ist->last_dts < of->start_time)
  1527. return 0;
  1528. return 1;
  1529. }
  1530. static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
  1531. {
  1532. OutputFile *of = &output_files[ost->file_index];
  1533. int64_t ost_tb_start_time = av_rescale_q(of->start_time, AV_TIME_BASE_Q, ost->st->time_base);
  1534. AVPacket opkt;
  1535. av_init_packet(&opkt);
  1536. if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
  1537. !ost->copy_initial_nonkeyframes)
  1538. return;
  1539. if (of->recording_time != INT64_MAX &&
  1540. ist->last_dts >= of->recording_time + of->start_time) {
  1541. ost->is_past_recording_time = 1;
  1542. return;
  1543. }
  1544. /* force the input stream PTS */
  1545. if (ost->st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
  1546. audio_size += pkt->size;
  1547. else if (ost->st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  1548. video_size += pkt->size;
  1549. ost->sync_opts++;
  1550. }
  1551. if (pkt->pts != AV_NOPTS_VALUE)
  1552. opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->st->time_base) - ost_tb_start_time;
  1553. else
  1554. opkt.pts = AV_NOPTS_VALUE;
  1555. if (pkt->dts == AV_NOPTS_VALUE)
  1556. opkt.dts = av_rescale_q(ist->last_dts, AV_TIME_BASE_Q, ost->st->time_base);
  1557. else
  1558. opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->st->time_base);
  1559. opkt.dts -= ost_tb_start_time;
  1560. opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->st->time_base);
  1561. opkt.flags = pkt->flags;
  1562. // FIXME remove the following 2 lines they shall be replaced by the bitstream filters
  1563. if ( ost->st->codec->codec_id != CODEC_ID_H264
  1564. && ost->st->codec->codec_id != CODEC_ID_MPEG1VIDEO
  1565. && ost->st->codec->codec_id != CODEC_ID_MPEG2VIDEO
  1566. && ost->st->codec->codec_id != CODEC_ID_VC1
  1567. ) {
  1568. if (av_parser_change(ist->st->parser, ost->st->codec, &opkt.data, &opkt.size, pkt->data, pkt->size, pkt->flags & AV_PKT_FLAG_KEY))
  1569. opkt.destruct = av_destruct_packet;
  1570. } else {
  1571. opkt.data = pkt->data;
  1572. opkt.size = pkt->size;
  1573. }
  1574. write_frame(of->ctx, &opkt, ost);
  1575. ost->st->codec->frame_number++;
  1576. av_free_packet(&opkt);
  1577. }
  1578. static void rate_emu_sleep(InputStream *ist)
  1579. {
  1580. if (input_files[ist->file_index].rate_emu) {
  1581. int64_t pts = av_rescale(ist->last_dts, 1000000, AV_TIME_BASE);
  1582. int64_t now = av_gettime() - ist->start;
  1583. if (pts > now)
  1584. usleep(pts - now);
  1585. }
  1586. }
  1587. static int transcode_audio(InputStream *ist, AVPacket *pkt, int *got_output)
  1588. {
  1589. AVFrame *decoded_frame;
  1590. AVCodecContext *avctx = ist->st->codec;
  1591. int bps = av_get_bytes_per_sample(ist->st->codec->sample_fmt);
  1592. int i, ret;
  1593. if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
  1594. return AVERROR(ENOMEM);
  1595. else
  1596. avcodec_get_frame_defaults(ist->decoded_frame);
  1597. decoded_frame = ist->decoded_frame;
  1598. ret = avcodec_decode_audio4(avctx, decoded_frame, got_output, pkt);
  1599. if (ret < 0) {
  1600. return ret;
  1601. }
  1602. if (!*got_output) {
  1603. /* no audio frame */
  1604. return ret;
  1605. }
  1606. /* if the decoder provides a pts, use it instead of the last packet pts.
  1607. the decoder could be delaying output by a packet or more. */
  1608. if (decoded_frame->pts != AV_NOPTS_VALUE)
  1609. ist->next_dts = decoded_frame->pts;
  1610. /* increment next_dts to use for the case where the input stream does not
  1611. have timestamps or there are multiple frames in the packet */
  1612. ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
  1613. avctx->sample_rate;
  1614. // preprocess audio (volume)
  1615. if (audio_volume != 256) {
  1616. int decoded_data_size = decoded_frame->nb_samples * avctx->channels * bps;
  1617. void *samples = decoded_frame->data[0];
  1618. switch (avctx->sample_fmt) {
  1619. case AV_SAMPLE_FMT_U8:
  1620. {
  1621. uint8_t *volp = samples;
  1622. for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
  1623. int v = (((*volp - 128) * audio_volume + 128) >> 8) + 128;
  1624. *volp++ = av_clip_uint8(v);
  1625. }
  1626. break;
  1627. }
  1628. case AV_SAMPLE_FMT_S16:
  1629. {
  1630. int16_t *volp = samples;
  1631. for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
  1632. int v = ((*volp) * audio_volume + 128) >> 8;
  1633. *volp++ = av_clip_int16(v);
  1634. }
  1635. break;
  1636. }
  1637. case AV_SAMPLE_FMT_S32:
  1638. {
  1639. int32_t *volp = samples;
  1640. for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
  1641. int64_t v = (((int64_t)*volp * audio_volume + 128) >> 8);
  1642. *volp++ = av_clipl_int32(v);
  1643. }
  1644. break;
  1645. }
  1646. case AV_SAMPLE_FMT_FLT:
  1647. {
  1648. float *volp = samples;
  1649. float scale = audio_volume / 256.f;
  1650. for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
  1651. *volp++ *= scale;
  1652. }
  1653. break;
  1654. }
  1655. case AV_SAMPLE_FMT_DBL:
  1656. {
  1657. double *volp = samples;
  1658. double scale = audio_volume / 256.;
  1659. for (i = 0; i < (decoded_data_size / sizeof(*volp)); i++) {
  1660. *volp++ *= scale;
  1661. }
  1662. break;
  1663. }
  1664. default:
  1665. av_log(NULL, AV_LOG_FATAL,
  1666. "Audio volume adjustment on sample format %s is not supported.\n",
  1667. av_get_sample_fmt_name(ist->st->codec->sample_fmt));
  1668. exit_program(1);
  1669. }
  1670. }
  1671. rate_emu_sleep(ist);
  1672. for (i = 0; i < nb_output_streams; i++) {
  1673. OutputStream *ost = &output_streams[i];
  1674. if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
  1675. continue;
  1676. do_audio_out(output_files[ost->file_index].ctx, ost, ist, decoded_frame);
  1677. }
  1678. return ret;
  1679. }
  1680. static int transcode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *pkt_pts)
  1681. {
  1682. AVFrame *decoded_frame, *filtered_frame = NULL;
  1683. void *buffer_to_free = NULL;
  1684. int i, ret = 0;
  1685. float quality;
  1686. #if CONFIG_AVFILTER
  1687. int frame_available = 1;
  1688. #endif
  1689. if (!ist->decoded_frame && !(ist->decoded_frame = avcodec_alloc_frame()))
  1690. return AVERROR(ENOMEM);
  1691. else
  1692. avcodec_get_frame_defaults(ist->decoded_frame);
  1693. decoded_frame = ist->decoded_frame;
  1694. pkt->pts = *pkt_pts;
  1695. pkt->dts = ist->last_dts;
  1696. *pkt_pts = AV_NOPTS_VALUE;
  1697. ret = avcodec_decode_video2(ist->st->codec,
  1698. decoded_frame, got_output, pkt);
  1699. if (ret < 0)
  1700. return ret;
  1701. quality = same_quant ? decoded_frame->quality : 0;
  1702. if (!*got_output) {
  1703. /* no picture yet */
  1704. return ret;
  1705. }
  1706. decoded_frame->pts = guess_correct_pts(&ist->pts_ctx, decoded_frame->pkt_pts,
  1707. decoded_frame->pkt_dts);
  1708. pkt->size = 0;
  1709. pre_process_video_frame(ist, (AVPicture *)decoded_frame, &buffer_to_free);
  1710. rate_emu_sleep(ist);
  1711. for (i = 0; i < nb_output_streams; i++) {
  1712. OutputStream *ost = &output_streams[i];
  1713. int frame_size, resample_changed;
  1714. if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
  1715. continue;
  1716. #if CONFIG_AVFILTER
  1717. resample_changed = ost->resample_width != decoded_frame->width ||
  1718. ost->resample_height != decoded_frame->height ||
  1719. ost->resample_pix_fmt != decoded_frame->format;
  1720. if (resample_changed) {
  1721. av_log(NULL, AV_LOG_INFO,
  1722. "Input stream #%d:%d frame changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
  1723. ist->file_index, ist->st->index,
  1724. ost->resample_width, ost->resample_height, av_get_pix_fmt_name(ost->resample_pix_fmt),
  1725. decoded_frame->width, decoded_frame->height, av_get_pix_fmt_name(decoded_frame->format));
  1726. avfilter_graph_free(&ost->graph);
  1727. if (configure_video_filters(ist, ost)) {
  1728. av_log(NULL, AV_LOG_FATAL, "Error reinitializing filters!\n");
  1729. exit_program(1);
  1730. }
  1731. ost->resample_width = decoded_frame->width;
  1732. ost->resample_height = decoded_frame->height;
  1733. ost->resample_pix_fmt = decoded_frame->format;
  1734. }
  1735. if (ist->st->sample_aspect_ratio.num)
  1736. decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
  1737. if (ist->st->codec->codec->capabilities & CODEC_CAP_DR1) {
  1738. FrameBuffer *buf = decoded_frame->opaque;
  1739. AVFilterBufferRef *fb = avfilter_get_video_buffer_ref_from_arrays(
  1740. decoded_frame->data, decoded_frame->linesize,
  1741. AV_PERM_READ | AV_PERM_PRESERVE,
  1742. ist->st->codec->width, ist->st->codec->height,
  1743. ist->st->codec->pix_fmt);
  1744. avfilter_copy_frame_props(fb, decoded_frame);
  1745. fb->buf->priv = buf;
  1746. fb->buf->free = filter_release_buffer;
  1747. buf->refcount++;
  1748. av_buffersrc_buffer(ost->input_video_filter, fb);
  1749. } else
  1750. av_vsrc_buffer_add_frame(ost->input_video_filter, decoded_frame,
  1751. decoded_frame->pts, decoded_frame->sample_aspect_ratio);
  1752. if (!ist->filtered_frame && !(ist->filtered_frame = avcodec_alloc_frame())) {
  1753. av_free(buffer_to_free);
  1754. return AVERROR(ENOMEM);
  1755. } else
  1756. avcodec_get_frame_defaults(ist->filtered_frame);
  1757. filtered_frame = ist->filtered_frame;
  1758. frame_available = avfilter_poll_frame(ost->output_video_filter->inputs[0]);
  1759. while (frame_available) {
  1760. AVRational ist_pts_tb;
  1761. if (ost->output_video_filter)
  1762. get_filtered_video_frame(ost->output_video_filter, filtered_frame, &ost->picref, &ist_pts_tb);
  1763. if (ost->picref)
  1764. filtered_frame->pts = av_rescale_q(ost->picref->pts, ist_pts_tb, AV_TIME_BASE_Q);
  1765. if (ost->picref->video && !ost->frame_aspect_ratio)
  1766. ost->st->codec->sample_aspect_ratio = ost->picref->video->pixel_aspect;
  1767. #else
  1768. filtered_frame = decoded_frame;
  1769. #endif
  1770. do_video_out(output_files[ost->file_index].ctx, ost, ist, filtered_frame, &frame_size,
  1771. same_quant ? quality : ost->st->codec->global_quality);
  1772. if (vstats_filename && frame_size)
  1773. do_video_stats(output_files[ost->file_index].ctx, ost, frame_size);
  1774. #if CONFIG_AVFILTER
  1775. frame_available = ost->output_video_filter && avfilter_poll_frame(ost->output_video_filter->inputs[0]);
  1776. if (ost->picref)
  1777. avfilter_unref_buffer(ost->picref);
  1778. }
  1779. #endif
  1780. }
  1781. av_free(buffer_to_free);
  1782. return ret;
  1783. }
  1784. static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output)
  1785. {
  1786. AVSubtitle subtitle;
  1787. int i, ret = avcodec_decode_subtitle2(ist->st->codec,
  1788. &subtitle, got_output, pkt);
  1789. if (ret < 0)
  1790. return ret;
  1791. if (!*got_output)
  1792. return ret;
  1793. rate_emu_sleep(ist);
  1794. for (i = 0; i < nb_output_streams; i++) {
  1795. OutputStream *ost = &output_streams[i];
  1796. if (!check_output_constraints(ist, ost) || !ost->encoding_needed)
  1797. continue;
  1798. do_subtitle_out(output_files[ost->file_index].ctx, ost, ist, &subtitle, pkt->pts);
  1799. }
  1800. avsubtitle_free(&subtitle);
  1801. return ret;
  1802. }
  1803. /* pkt = NULL means EOF (needed to flush decoder buffers) */
  1804. static int output_packet(InputStream *ist,
  1805. OutputStream *ost_table, int nb_ostreams,
  1806. const AVPacket *pkt)
  1807. {
  1808. int i;
  1809. int got_output;
  1810. int64_t pkt_pts = AV_NOPTS_VALUE;
  1811. AVPacket avpkt;
  1812. if (ist->next_dts == AV_NOPTS_VALUE)
  1813. ist->next_dts = ist->last_dts;
  1814. if (pkt == NULL) {
  1815. /* EOF handling */
  1816. av_init_packet(&avpkt);
  1817. avpkt.data = NULL;
  1818. avpkt.size = 0;
  1819. goto handle_eof;
  1820. } else {
  1821. avpkt = *pkt;
  1822. }
  1823. if (pkt->dts != AV_NOPTS_VALUE)
  1824. ist->next_dts = ist->last_dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
  1825. if (pkt->pts != AV_NOPTS_VALUE)
  1826. pkt_pts = av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
  1827. // while we have more to decode or while the decoder did output something on EOF
  1828. while (ist->decoding_needed && (avpkt.size > 0 || (!pkt && got_output))) {
  1829. int ret = 0;
  1830. handle_eof:
  1831. ist->last_dts = ist->next_dts;
  1832. if (avpkt.size && avpkt.size != pkt->size) {
  1833. av_log(NULL, ist->showed_multi_packet_warning ? AV_LOG_VERBOSE : AV_LOG_WARNING,
  1834. "Multiple frames in a packet from stream %d\n", pkt->stream_index);
  1835. ist->showed_multi_packet_warning = 1;
  1836. }
  1837. switch (ist->st->codec->codec_type) {
  1838. case AVMEDIA_TYPE_AUDIO:
  1839. ret = transcode_audio (ist, &avpkt, &got_output);
  1840. break;
  1841. case AVMEDIA_TYPE_VIDEO:
  1842. ret = transcode_video (ist, &avpkt, &got_output, &pkt_pts);
  1843. if (avpkt.duration)
  1844. ist->next_dts += av_rescale_q(avpkt.duration, ist->st->time_base, AV_TIME_BASE_Q);
  1845. else if (ist->st->r_frame_rate.num)
  1846. ist->next_dts += av_rescale_q(1, (AVRational){ist->st->r_frame_rate.den,
  1847. ist->st->r_frame_rate.num},
  1848. AV_TIME_BASE_Q);
  1849. else if (ist->st->codec->time_base.num != 0) {
  1850. int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 :
  1851. ist->st->codec->ticks_per_frame;
  1852. ist->next_dts += av_rescale_q(ticks, ist->st->codec->time_base, AV_TIME_BASE_Q);
  1853. }
  1854. break;
  1855. case AVMEDIA_TYPE_SUBTITLE:
  1856. ret = transcode_subtitles(ist, &avpkt, &got_output);
  1857. break;
  1858. default:
  1859. return -1;
  1860. }
  1861. if (ret < 0)
  1862. return ret;
  1863. // touch data and size only if not EOF
  1864. if (pkt) {
  1865. avpkt.data += ret;
  1866. avpkt.size -= ret;
  1867. }
  1868. if (!got_output) {
  1869. continue;
  1870. }
  1871. }
  1872. /* handle stream copy */
  1873. if (!ist->decoding_needed) {
  1874. rate_emu_sleep(ist);
  1875. ist->last_dts = ist->next_dts;
  1876. switch (ist->st->codec->codec_type) {
  1877. case AVMEDIA_TYPE_AUDIO:
  1878. ist->next_dts += ((int64_t)AV_TIME_BASE * ist->st->codec->frame_size) /
  1879. ist->st->codec->sample_rate;
  1880. break;
  1881. case AVMEDIA_TYPE_VIDEO:
  1882. if (ist->st->codec->time_base.num != 0) {
  1883. int ticks = ist->st->parser ? ist->st->parser->repeat_pict + 1 : ist->st->codec->ticks_per_frame;
  1884. ist->next_dts += ((int64_t)AV_TIME_BASE *
  1885. ist->st->codec->time_base.num * ticks) /
  1886. ist->st->codec->time_base.den;
  1887. }
  1888. break;
  1889. }
  1890. }
  1891. for (i = 0; pkt && i < nb_ostreams; i++) {
  1892. OutputStream *ost = &ost_table[i];
  1893. if (!check_output_constraints(ist, ost) || ost->encoding_needed)
  1894. continue;
  1895. do_streamcopy(ist, ost, pkt);
  1896. }
  1897. return 0;
  1898. }
  1899. static void print_sdp(OutputFile *output_files, int n)
  1900. {
  1901. char sdp[2048];
  1902. int i;
  1903. AVFormatContext **avc = av_malloc(sizeof(*avc) * n);
  1904. if (!avc)
  1905. exit_program(1);
  1906. for (i = 0; i < n; i++)
  1907. avc[i] = output_files[i].ctx;
  1908. av_sdp_create(avc, n, sdp, sizeof(sdp));
  1909. printf("SDP:\n%s\n", sdp);
  1910. fflush(stdout);
  1911. av_freep(&avc);
  1912. }
  1913. static int init_input_stream(int ist_index, OutputStream *output_streams, int nb_output_streams,
  1914. char *error, int error_len)
  1915. {
  1916. int i;
  1917. InputStream *ist = &input_streams[ist_index];
  1918. if (ist->decoding_needed) {
  1919. AVCodec *codec = ist->dec;
  1920. if (!codec) {
  1921. snprintf(error, error_len, "Decoder (codec id %d) not found for input stream #%d:%d",
  1922. ist->st->codec->codec_id, ist->file_index, ist->st->index);
  1923. return AVERROR(EINVAL);
  1924. }
  1925. /* update requested sample format for the decoder based on the
  1926. corresponding encoder sample format */
  1927. for (i = 0; i < nb_output_streams; i++) {
  1928. OutputStream *ost = &output_streams[i];
  1929. if (ost->source_index == ist_index) {
  1930. update_sample_fmt(ist->st->codec, codec, ost->st->codec);
  1931. break;
  1932. }
  1933. }
  1934. if (codec->type == AVMEDIA_TYPE_VIDEO && codec->capabilities & CODEC_CAP_DR1) {
  1935. ist->st->codec->get_buffer = codec_get_buffer;
  1936. ist->st->codec->release_buffer = codec_release_buffer;
  1937. ist->st->codec->opaque = ist;
  1938. }
  1939. if (!av_dict_get(ist->opts, "threads", NULL, 0))
  1940. av_dict_set(&ist->opts, "threads", "auto", 0);
  1941. if (avcodec_open2(ist->st->codec, codec, &ist->opts) < 0) {
  1942. snprintf(error, error_len, "Error while opening decoder for input stream #%d:%d",
  1943. ist->file_index, ist->st->index);
  1944. return AVERROR(EINVAL);
  1945. }
  1946. assert_codec_experimental(ist->st->codec, 0);
  1947. assert_avoptions(ist->opts);
  1948. }
  1949. ist->last_dts = ist->st->avg_frame_rate.num ? - ist->st->codec->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
  1950. ist->next_dts = AV_NOPTS_VALUE;
  1951. init_pts_correction(&ist->pts_ctx);
  1952. ist->is_start = 1;
  1953. return 0;
  1954. }
  1955. static int transcode_init(OutputFile *output_files,
  1956. int nb_output_files,
  1957. InputFile *input_files,
  1958. int nb_input_files)
  1959. {
  1960. int ret = 0, i, j, k;
  1961. AVFormatContext *oc;
  1962. AVCodecContext *codec, *icodec;
  1963. OutputStream *ost;
  1964. InputStream *ist;
  1965. char error[1024];
  1966. int want_sdp = 1;
  1967. /* init framerate emulation */
  1968. for (i = 0; i < nb_input_files; i++) {
  1969. InputFile *ifile = &input_files[i];
  1970. if (ifile->rate_emu)
  1971. for (j = 0; j < ifile->nb_streams; j++)
  1972. input_streams[j + ifile->ist_index].start = av_gettime();
  1973. }
  1974. /* output stream init */
  1975. for (i = 0; i < nb_output_files; i++) {
  1976. oc = output_files[i].ctx;
  1977. if (!oc->nb_streams && !(oc->oformat->flags & AVFMT_NOSTREAMS)) {
  1978. av_dump_format(oc, i, oc->filename, 1);
  1979. av_log(NULL, AV_LOG_ERROR, "Output file #%d does not contain any stream\n", i);
  1980. return AVERROR(EINVAL);
  1981. }
  1982. }
  1983. /* for each output stream, we compute the right encoding parameters */
  1984. for (i = 0; i < nb_output_streams; i++) {
  1985. ost = &output_streams[i];
  1986. oc = output_files[ost->file_index].ctx;
  1987. ist = &input_streams[ost->source_index];
  1988. if (ost->attachment_filename)
  1989. continue;
  1990. codec = ost->st->codec;
  1991. icodec = ist->st->codec;
  1992. ost->st->disposition = ist->st->disposition;
  1993. codec->bits_per_raw_sample = icodec->bits_per_raw_sample;
  1994. codec->chroma_sample_location = icodec->chroma_sample_location;
  1995. if (ost->stream_copy) {
  1996. uint64_t extra_size = (uint64_t)icodec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE;
  1997. if (extra_size > INT_MAX) {
  1998. return AVERROR(EINVAL);
  1999. }
  2000. /* if stream_copy is selected, no need to decode or encode */
  2001. codec->codec_id = icodec->codec_id;
  2002. codec->codec_type = icodec->codec_type;
  2003. if (!codec->codec_tag) {
  2004. if (!oc->oformat->codec_tag ||
  2005. av_codec_get_id (oc->oformat->codec_tag, icodec->codec_tag) == codec->codec_id ||
  2006. av_codec_get_tag(oc->oformat->codec_tag, icodec->codec_id) <= 0)
  2007. codec->codec_tag = icodec->codec_tag;
  2008. }
  2009. codec->bit_rate = icodec->bit_rate;
  2010. codec->rc_max_rate = icodec->rc_max_rate;
  2011. codec->rc_buffer_size = icodec->rc_buffer_size;
  2012. codec->field_order = icodec->field_order;
  2013. codec->extradata = av_mallocz(extra_size);
  2014. if (!codec->extradata) {
  2015. return AVERROR(ENOMEM);
  2016. }
  2017. memcpy(codec->extradata, icodec->extradata, icodec->extradata_size);
  2018. codec->extradata_size = icodec->extradata_size;
  2019. if (!copy_tb) {
  2020. codec->time_base = icodec->time_base;
  2021. codec->time_base.num *= icodec->ticks_per_frame;
  2022. av_reduce(&codec->time_base.num, &codec->time_base.den,
  2023. codec->time_base.num, codec->time_base.den, INT_MAX);
  2024. } else
  2025. codec->time_base = ist->st->time_base;
  2026. switch (codec->codec_type) {
  2027. case AVMEDIA_TYPE_AUDIO:
  2028. if (audio_volume != 256) {
  2029. av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
  2030. exit_program(1);
  2031. }
  2032. codec->channel_layout = icodec->channel_layout;
  2033. codec->sample_rate = icodec->sample_rate;
  2034. codec->channels = icodec->channels;
  2035. codec->frame_size = icodec->frame_size;
  2036. codec->audio_service_type = icodec->audio_service_type;
  2037. codec->block_align = icodec->block_align;
  2038. break;
  2039. case AVMEDIA_TYPE_VIDEO:
  2040. codec->pix_fmt = icodec->pix_fmt;
  2041. codec->width = icodec->width;
  2042. codec->height = icodec->height;
  2043. codec->has_b_frames = icodec->has_b_frames;
  2044. if (!codec->sample_aspect_ratio.num) {
  2045. codec->sample_aspect_ratio =
  2046. ost->st->sample_aspect_ratio =
  2047. ist->st->sample_aspect_ratio.num ? ist->st->sample_aspect_ratio :
  2048. ist->st->codec->sample_aspect_ratio.num ?
  2049. ist->st->codec->sample_aspect_ratio : (AVRational){0, 1};
  2050. }
  2051. break;
  2052. case AVMEDIA_TYPE_SUBTITLE:
  2053. codec->width = icodec->width;
  2054. codec->height = icodec->height;
  2055. break;
  2056. case AVMEDIA_TYPE_DATA:
  2057. case AVMEDIA_TYPE_ATTACHMENT:
  2058. break;
  2059. default:
  2060. abort();
  2061. }
  2062. } else {
  2063. if (!ost->enc)
  2064. ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
  2065. ist->decoding_needed = 1;
  2066. ost->encoding_needed = 1;
  2067. switch (codec->codec_type) {
  2068. case AVMEDIA_TYPE_AUDIO:
  2069. ost->fifo = av_fifo_alloc(1024);
  2070. if (!ost->fifo) {
  2071. return AVERROR(ENOMEM);
  2072. }
  2073. ost->reformat_pair = MAKE_SFMT_PAIR(AV_SAMPLE_FMT_NONE,AV_SAMPLE_FMT_NONE);
  2074. if (!codec->sample_rate)
  2075. codec->sample_rate = icodec->sample_rate;
  2076. choose_sample_rate(ost->st, ost->enc);
  2077. codec->time_base = (AVRational){ 1, codec->sample_rate };
  2078. if (codec->sample_fmt == AV_SAMPLE_FMT_NONE)
  2079. codec->sample_fmt = icodec->sample_fmt;
  2080. choose_sample_fmt(ost->st, ost->enc);
  2081. if (!codec->channels)
  2082. codec->channels = icodec->channels;
  2083. codec->channel_layout = icodec->channel_layout;
  2084. if (av_get_channel_layout_nb_channels(codec->channel_layout) != codec->channels)
  2085. codec->channel_layout = 0;
  2086. ost->audio_resample = codec-> sample_rate != icodec->sample_rate || audio_sync_method > 1;
  2087. icodec->request_channels = codec-> channels;
  2088. ost->resample_sample_fmt = icodec->sample_fmt;
  2089. ost->resample_sample_rate = icodec->sample_rate;
  2090. ost->resample_channels = icodec->channels;
  2091. break;
  2092. case AVMEDIA_TYPE_VIDEO:
  2093. if (codec->pix_fmt == PIX_FMT_NONE)
  2094. codec->pix_fmt = icodec->pix_fmt;
  2095. choose_pixel_fmt(ost->st, ost->enc);
  2096. if (ost->st->codec->pix_fmt == PIX_FMT_NONE) {
  2097. av_log(NULL, AV_LOG_FATAL, "Video pixel format is unknown, stream cannot be encoded\n");
  2098. exit_program(1);
  2099. }
  2100. if (!codec->width || !codec->height) {
  2101. codec->width = icodec->width;
  2102. codec->height = icodec->height;
  2103. }
  2104. ost->video_resample = codec->width != icodec->width ||
  2105. codec->height != icodec->height ||
  2106. codec->pix_fmt != icodec->pix_fmt;
  2107. if (ost->video_resample) {
  2108. #if !CONFIG_AVFILTER
  2109. avcodec_get_frame_defaults(&ost->pict_tmp);
  2110. if (avpicture_alloc((AVPicture*)&ost->pict_tmp, codec->pix_fmt,
  2111. codec->width, codec->height)) {
  2112. av_log(NULL, AV_LOG_FATAL, "Cannot allocate temp picture, check pix fmt\n");
  2113. exit_program(1);
  2114. }
  2115. ost->img_resample_ctx = sws_getContext(
  2116. icodec->width,
  2117. icodec->height,
  2118. icodec->pix_fmt,
  2119. codec->width,
  2120. codec->height,
  2121. codec->pix_fmt,
  2122. ost->sws_flags, NULL, NULL, NULL);
  2123. if (ost->img_resample_ctx == NULL) {
  2124. av_log(NULL, AV_LOG_FATAL, "Cannot get resampling context\n");
  2125. exit_program(1);
  2126. }
  2127. #endif
  2128. codec->bits_per_raw_sample = 0;
  2129. }
  2130. ost->resample_height = icodec->height;
  2131. ost->resample_width = icodec->width;
  2132. ost->resample_pix_fmt = icodec->pix_fmt;
  2133. if (!ost->frame_rate.num)
  2134. ost->frame_rate = ist->st->r_frame_rate.num ? ist->st->r_frame_rate : (AVRational) { 25, 1 };
  2135. if (ost->enc && ost->enc->supported_framerates && !ost->force_fps) {
  2136. int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
  2137. ost->frame_rate = ost->enc->supported_framerates[idx];
  2138. }
  2139. codec->time_base = (AVRational){ost->frame_rate.den, ost->frame_rate.num};
  2140. #if CONFIG_AVFILTER
  2141. if (configure_video_filters(ist, ost)) {
  2142. av_log(NULL, AV_LOG_FATAL, "Error opening filters!\n");
  2143. exit(1);
  2144. }
  2145. #endif
  2146. break;
  2147. case AVMEDIA_TYPE_SUBTITLE:
  2148. codec->time_base = (AVRational){1, 1000};
  2149. break;
  2150. default:
  2151. abort();
  2152. break;
  2153. }
  2154. /* two pass mode */
  2155. if ((codec->flags & (CODEC_FLAG_PASS1 | CODEC_FLAG_PASS2))) {
  2156. char logfilename[1024];
  2157. FILE *f;
  2158. snprintf(logfilename, sizeof(logfilename), "%s-%d.log",
  2159. pass_logfilename_prefix ? pass_logfilename_prefix : DEFAULT_PASS_LOGFILENAME_PREFIX,
  2160. i);
  2161. if (codec->flags & CODEC_FLAG_PASS1) {
  2162. f = fopen(logfilename, "wb");
  2163. if (!f) {
  2164. av_log(NULL, AV_LOG_FATAL, "Cannot write log file '%s' for pass-1 encoding: %s\n",
  2165. logfilename, strerror(errno));
  2166. exit_program(1);
  2167. }
  2168. ost->logfile = f;
  2169. } else {
  2170. char *logbuffer;
  2171. size_t logbuffer_size;
  2172. if (cmdutils_read_file(logfilename, &logbuffer, &logbuffer_size) < 0) {
  2173. av_log(NULL, AV_LOG_FATAL, "Error reading log file '%s' for pass-2 encoding\n",
  2174. logfilename);
  2175. exit_program(1);
  2176. }
  2177. codec->stats_in = logbuffer;
  2178. }
  2179. }
  2180. }
  2181. }
  2182. /* open each encoder */
  2183. for (i = 0; i < nb_output_streams; i++) {
  2184. ost = &output_streams[i];
  2185. if (ost->encoding_needed) {
  2186. AVCodec *codec = ost->enc;
  2187. AVCodecContext *dec = input_streams[ost->source_index].st->codec;
  2188. if (!codec) {
  2189. snprintf(error, sizeof(error), "Encoder (codec id %d) not found for output stream #%d:%d",
  2190. ost->st->codec->codec_id, ost->file_index, ost->index);
  2191. ret = AVERROR(EINVAL);
  2192. goto dump_format;
  2193. }
  2194. if (dec->subtitle_header) {
  2195. ost->st->codec->subtitle_header = av_malloc(dec->subtitle_header_size);
  2196. if (!ost->st->codec->subtitle_header) {
  2197. ret = AVERROR(ENOMEM);
  2198. goto dump_format;
  2199. }
  2200. memcpy(ost->st->codec->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
  2201. ost->st->codec->subtitle_header_size = dec->subtitle_header_size;
  2202. }
  2203. if (!av_dict_get(ost->opts, "threads", NULL, 0))
  2204. av_dict_set(&ost->opts, "threads", "auto", 0);
  2205. if (avcodec_open2(ost->st->codec, codec, &ost->opts) < 0) {
  2206. snprintf(error, sizeof(error), "Error while opening encoder for output stream #%d:%d - maybe incorrect parameters such as bit_rate, rate, width or height",
  2207. ost->file_index, ost->index);
  2208. ret = AVERROR(EINVAL);
  2209. goto dump_format;
  2210. }
  2211. assert_codec_experimental(ost->st->codec, 1);
  2212. assert_avoptions(ost->opts);
  2213. if (ost->st->codec->bit_rate && ost->st->codec->bit_rate < 1000)
  2214. av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
  2215. "It takes bits/s as argument, not kbits/s\n");
  2216. extra_size += ost->st->codec->extradata_size;
  2217. if (ost->st->codec->me_threshold)
  2218. input_streams[ost->source_index].st->codec->debug |= FF_DEBUG_MV;
  2219. }
  2220. }
  2221. /* init input streams */
  2222. for (i = 0; i < nb_input_streams; i++)
  2223. if ((ret = init_input_stream(i, output_streams, nb_output_streams, error, sizeof(error))) < 0)
  2224. goto dump_format;
  2225. /* discard unused programs */
  2226. for (i = 0; i < nb_input_files; i++) {
  2227. InputFile *ifile = &input_files[i];
  2228. for (j = 0; j < ifile->ctx->nb_programs; j++) {
  2229. AVProgram *p = ifile->ctx->programs[j];
  2230. int discard = AVDISCARD_ALL;
  2231. for (k = 0; k < p->nb_stream_indexes; k++)
  2232. if (!input_streams[ifile->ist_index + p->stream_index[k]].discard) {
  2233. discard = AVDISCARD_DEFAULT;
  2234. break;
  2235. }
  2236. p->discard = discard;
  2237. }
  2238. }
  2239. /* open files and write file headers */
  2240. for (i = 0; i < nb_output_files; i++) {
  2241. oc = output_files[i].ctx;
  2242. oc->interrupt_callback = int_cb;
  2243. if (avformat_write_header(oc, &output_files[i].opts) < 0) {
  2244. snprintf(error, sizeof(error), "Could not write header for output file #%d (incorrect codec parameters ?)", i);
  2245. ret = AVERROR(EINVAL);
  2246. goto dump_format;
  2247. }
  2248. assert_avoptions(output_files[i].opts);
  2249. if (strcmp(oc->oformat->name, "rtp")) {
  2250. want_sdp = 0;
  2251. }
  2252. }
  2253. dump_format:
  2254. /* dump the file output parameters - cannot be done before in case
  2255. of stream copy */
  2256. for (i = 0; i < nb_output_files; i++) {
  2257. av_dump_format(output_files[i].ctx, i, output_files[i].ctx->filename, 1);
  2258. }
  2259. /* dump the stream mapping */
  2260. av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
  2261. for (i = 0; i < nb_output_streams; i++) {
  2262. ost = &output_streams[i];
  2263. if (ost->attachment_filename) {
  2264. /* an attached file */
  2265. av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
  2266. ost->attachment_filename, ost->file_index, ost->index);
  2267. continue;
  2268. }
  2269. av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
  2270. input_streams[ost->source_index].file_index,
  2271. input_streams[ost->source_index].st->index,
  2272. ost->file_index,
  2273. ost->index);
  2274. if (ost->sync_ist != &input_streams[ost->source_index])
  2275. av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
  2276. ost->sync_ist->file_index,
  2277. ost->sync_ist->st->index);
  2278. if (ost->stream_copy)
  2279. av_log(NULL, AV_LOG_INFO, " (copy)");
  2280. else
  2281. av_log(NULL, AV_LOG_INFO, " (%s -> %s)", input_streams[ost->source_index].dec ?
  2282. input_streams[ost->source_index].dec->name : "?",
  2283. ost->enc ? ost->enc->name : "?");
  2284. av_log(NULL, AV_LOG_INFO, "\n");
  2285. }
  2286. if (ret) {
  2287. av_log(NULL, AV_LOG_ERROR, "%s\n", error);
  2288. return ret;
  2289. }
  2290. if (want_sdp) {
  2291. print_sdp(output_files, nb_output_files);
  2292. }
  2293. return 0;
  2294. }
  2295. /*
  2296. * The following code is the main loop of the file converter
  2297. */
  2298. static int transcode(OutputFile *output_files,
  2299. int nb_output_files,
  2300. InputFile *input_files,
  2301. int nb_input_files)
  2302. {
  2303. int ret, i;
  2304. AVFormatContext *is, *os;
  2305. OutputStream *ost;
  2306. InputStream *ist;
  2307. uint8_t *no_packet;
  2308. int no_packet_count = 0;
  2309. int64_t timer_start;
  2310. if (!(no_packet = av_mallocz(nb_input_files)))
  2311. exit_program(1);
  2312. ret = transcode_init(output_files, nb_output_files, input_files, nb_input_files);
  2313. if (ret < 0)
  2314. goto fail;
  2315. av_log(NULL, AV_LOG_INFO, "Press ctrl-c to stop encoding\n");
  2316. term_init();
  2317. timer_start = av_gettime();
  2318. for (; received_sigterm == 0;) {
  2319. int file_index, ist_index;
  2320. AVPacket pkt;
  2321. int64_t ipts_min;
  2322. double opts_min;
  2323. ipts_min = INT64_MAX;
  2324. opts_min = 1e100;
  2325. /* select the stream that we must read now by looking at the
  2326. smallest output pts */
  2327. file_index = -1;
  2328. for (i = 0; i < nb_output_streams; i++) {
  2329. OutputFile *of;
  2330. int64_t ipts;
  2331. double opts;
  2332. ost = &output_streams[i];
  2333. of = &output_files[ost->file_index];
  2334. os = output_files[ost->file_index].ctx;
  2335. ist = &input_streams[ost->source_index];
  2336. if (ost->is_past_recording_time || no_packet[ist->file_index] ||
  2337. (os->pb && avio_tell(os->pb) >= of->limit_filesize))
  2338. continue;
  2339. opts = ost->st->pts.val * av_q2d(ost->st->time_base);
  2340. ipts = ist->last_dts;
  2341. if (!input_files[ist->file_index].eof_reached) {
  2342. if (ipts < ipts_min) {
  2343. ipts_min = ipts;
  2344. if (input_sync)
  2345. file_index = ist->file_index;
  2346. }
  2347. if (opts < opts_min) {
  2348. opts_min = opts;
  2349. if (!input_sync) file_index = ist->file_index;
  2350. }
  2351. }
  2352. if (ost->frame_number >= ost->max_frames) {
  2353. int j;
  2354. for (j = 0; j < of->ctx->nb_streams; j++)
  2355. output_streams[of->ost_index + j].is_past_recording_time = 1;
  2356. continue;
  2357. }
  2358. }
  2359. /* if none, if is finished */
  2360. if (file_index < 0) {
  2361. if (no_packet_count) {
  2362. no_packet_count = 0;
  2363. memset(no_packet, 0, nb_input_files);
  2364. usleep(10000);
  2365. continue;
  2366. }
  2367. break;
  2368. }
  2369. /* read a frame from it and output it in the fifo */
  2370. is = input_files[file_index].ctx;
  2371. ret = av_read_frame(is, &pkt);
  2372. if (ret == AVERROR(EAGAIN)) {
  2373. no_packet[file_index] = 1;
  2374. no_packet_count++;
  2375. continue;
  2376. }
  2377. if (ret < 0) {
  2378. input_files[file_index].eof_reached = 1;
  2379. if (opt_shortest)
  2380. break;
  2381. else
  2382. continue;
  2383. }
  2384. no_packet_count = 0;
  2385. memset(no_packet, 0, nb_input_files);
  2386. if (do_pkt_dump) {
  2387. av_pkt_dump_log2(NULL, AV_LOG_DEBUG, &pkt, do_hex_dump,
  2388. is->streams[pkt.stream_index]);
  2389. }
  2390. /* the following test is needed in case new streams appear
  2391. dynamically in stream : we ignore them */
  2392. if (pkt.stream_index >= input_files[file_index].nb_streams)
  2393. goto discard_packet;
  2394. ist_index = input_files[file_index].ist_index + pkt.stream_index;
  2395. ist = &input_streams[ist_index];
  2396. if (ist->discard)
  2397. goto discard_packet;
  2398. if (pkt.dts != AV_NOPTS_VALUE)
  2399. pkt.dts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
  2400. if (pkt.pts != AV_NOPTS_VALUE)
  2401. pkt.pts += av_rescale_q(input_files[ist->file_index].ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
  2402. if (pkt.pts != AV_NOPTS_VALUE)
  2403. pkt.pts *= ist->ts_scale;
  2404. if (pkt.dts != AV_NOPTS_VALUE)
  2405. pkt.dts *= ist->ts_scale;
  2406. //fprintf(stderr, "next:%"PRId64" dts:%"PRId64" off:%"PRId64" %d\n",
  2407. // ist->next_dts,
  2408. // pkt.dts, input_files[ist->file_index].ts_offset,
  2409. // ist->st->codec->codec_type);
  2410. if (pkt.dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE
  2411. && (is->iformat->flags & AVFMT_TS_DISCONT)) {
  2412. int64_t pkt_dts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
  2413. int64_t delta = pkt_dts - ist->next_dts;
  2414. if ((FFABS(delta) > 1LL * dts_delta_threshold * AV_TIME_BASE || pkt_dts + 1 < ist->last_dts) && !copy_ts) {
  2415. input_files[ist->file_index].ts_offset -= delta;
  2416. av_log(NULL, AV_LOG_DEBUG,
  2417. "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
  2418. delta, input_files[ist->file_index].ts_offset);
  2419. pkt.dts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  2420. if (pkt.pts != AV_NOPTS_VALUE)
  2421. pkt.pts-= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
  2422. }
  2423. }
  2424. // fprintf(stderr,"read #%d.%d size=%d\n", ist->file_index, ist->st->index, pkt.size);
  2425. if (output_packet(ist, output_streams, nb_output_streams, &pkt) < 0) {
  2426. av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d\n",
  2427. ist->file_index, ist->st->index);
  2428. if (exit_on_error)
  2429. exit_program(1);
  2430. av_free_packet(&pkt);
  2431. continue;
  2432. }
  2433. discard_packet:
  2434. av_free_packet(&pkt);
  2435. /* dump report by using the output first video and audio streams */
  2436. print_report(output_files, output_streams, nb_output_streams, 0, timer_start);
  2437. }
  2438. /* at the end of stream, we must flush the decoder buffers */
  2439. for (i = 0; i < nb_input_streams; i++) {
  2440. ist = &input_streams[i];
  2441. if (ist->decoding_needed) {
  2442. output_packet(ist, output_streams, nb_output_streams, NULL);
  2443. }
  2444. }
  2445. flush_encoders(output_streams, nb_output_streams);
  2446. term_exit();
  2447. /* write the trailer if needed and close file */
  2448. for (i = 0; i < nb_output_files; i++) {
  2449. os = output_files[i].ctx;
  2450. av_write_trailer(os);
  2451. }
  2452. /* dump report by using the first video and audio streams */
  2453. print_report(output_files, output_streams, nb_output_streams, 1, timer_start);
  2454. /* close each encoder */
  2455. for (i = 0; i < nb_output_streams; i++) {
  2456. ost = &output_streams[i];
  2457. if (ost->encoding_needed) {
  2458. av_freep(&ost->st->codec->stats_in);
  2459. avcodec_close(ost->st->codec);
  2460. }
  2461. #if CONFIG_AVFILTER
  2462. avfilter_graph_free(&ost->graph);
  2463. #endif
  2464. }
  2465. /* close each decoder */
  2466. for (i = 0; i < nb_input_streams; i++) {
  2467. ist = &input_streams[i];
  2468. if (ist->decoding_needed) {
  2469. avcodec_close(ist->st->codec);
  2470. }
  2471. }
  2472. /* finished ! */
  2473. ret = 0;
  2474. fail:
  2475. av_freep(&no_packet);
  2476. if (output_streams) {
  2477. for (i = 0; i < nb_output_streams; i++) {
  2478. ost = &output_streams[i];
  2479. if (ost) {
  2480. if (ost->stream_copy)
  2481. av_freep(&ost->st->codec->extradata);
  2482. if (ost->logfile) {
  2483. fclose(ost->logfile);
  2484. ost->logfile = NULL;
  2485. }
  2486. av_fifo_free(ost->fifo); /* works even if fifo is not
  2487. initialized but set to zero */
  2488. av_freep(&ost->st->codec->subtitle_header);
  2489. av_free(ost->pict_tmp.data[0]);
  2490. av_free(ost->forced_kf_pts);
  2491. if (ost->video_resample)
  2492. sws_freeContext(ost->img_resample_ctx);
  2493. if (ost->resample)
  2494. audio_resample_close(ost->resample);
  2495. if (ost->reformat_ctx)
  2496. av_audio_convert_free(ost->reformat_ctx);
  2497. av_dict_free(&ost->opts);
  2498. }
  2499. }
  2500. }
  2501. return ret;
  2502. }
  2503. static double parse_frame_aspect_ratio(const char *arg)
  2504. {
  2505. int x = 0, y = 0;
  2506. double ar = 0;
  2507. const char *p;
  2508. char *end;
  2509. p = strchr(arg, ':');
  2510. if (p) {
  2511. x = strtol(arg, &end, 10);
  2512. if (end == p)
  2513. y = strtol(end + 1, &end, 10);
  2514. if (x > 0 && y > 0)
  2515. ar = (double)x / (double)y;
  2516. } else
  2517. ar = strtod(arg, NULL);
  2518. if (!ar) {
  2519. av_log(NULL, AV_LOG_FATAL, "Incorrect aspect ratio specification.\n");
  2520. exit_program(1);
  2521. }
  2522. return ar;
  2523. }
  2524. static int opt_audio_codec(OptionsContext *o, const char *opt, const char *arg)
  2525. {
  2526. return parse_option(o, "codec:a", arg, options);
  2527. }
  2528. static int opt_video_codec(OptionsContext *o, const char *opt, const char *arg)
  2529. {
  2530. return parse_option(o, "codec:v", arg, options);
  2531. }
  2532. static int opt_subtitle_codec(OptionsContext *o, const char *opt, const char *arg)
  2533. {
  2534. return parse_option(o, "codec:s", arg, options);
  2535. }
  2536. static int opt_data_codec(OptionsContext *o, const char *opt, const char *arg)
  2537. {
  2538. return parse_option(o, "codec:d", arg, options);
  2539. }
  2540. static int opt_map(OptionsContext *o, const char *opt, const char *arg)
  2541. {
  2542. StreamMap *m = NULL;
  2543. int i, negative = 0, file_idx;
  2544. int sync_file_idx = -1, sync_stream_idx;
  2545. char *p, *sync;
  2546. char *map;
  2547. if (*arg == '-') {
  2548. negative = 1;
  2549. arg++;
  2550. }
  2551. map = av_strdup(arg);
  2552. /* parse sync stream first, just pick first matching stream */
  2553. if (sync = strchr(map, ',')) {
  2554. *sync = 0;
  2555. sync_file_idx = strtol(sync + 1, &sync, 0);
  2556. if (sync_file_idx >= nb_input_files || sync_file_idx < 0) {
  2557. av_log(NULL, AV_LOG_FATAL, "Invalid sync file index: %d.\n", sync_file_idx);
  2558. exit_program(1);
  2559. }
  2560. if (*sync)
  2561. sync++;
  2562. for (i = 0; i < input_files[sync_file_idx].nb_streams; i++)
  2563. if (check_stream_specifier(input_files[sync_file_idx].ctx,
  2564. input_files[sync_file_idx].ctx->streams[i], sync) == 1) {
  2565. sync_stream_idx = i;
  2566. break;
  2567. }
  2568. if (i == input_files[sync_file_idx].nb_streams) {
  2569. av_log(NULL, AV_LOG_FATAL, "Sync stream specification in map %s does not "
  2570. "match any streams.\n", arg);
  2571. exit_program(1);
  2572. }
  2573. }
  2574. file_idx = strtol(map, &p, 0);
  2575. if (file_idx >= nb_input_files || file_idx < 0) {
  2576. av_log(NULL, AV_LOG_FATAL, "Invalid input file index: %d.\n", file_idx);
  2577. exit_program(1);
  2578. }
  2579. if (negative)
  2580. /* disable some already defined maps */
  2581. for (i = 0; i < o->nb_stream_maps; i++) {
  2582. m = &o->stream_maps[i];
  2583. if (file_idx == m->file_index &&
  2584. check_stream_specifier(input_files[m->file_index].ctx,
  2585. input_files[m->file_index].ctx->streams[m->stream_index],
  2586. *p == ':' ? p + 1 : p) > 0)
  2587. m->disabled = 1;
  2588. }
  2589. else
  2590. for (i = 0; i < input_files[file_idx].nb_streams; i++) {
  2591. if (check_stream_specifier(input_files[file_idx].ctx, input_files[file_idx].ctx->streams[i],
  2592. *p == ':' ? p + 1 : p) <= 0)
  2593. continue;
  2594. o->stream_maps = grow_array(o->stream_maps, sizeof(*o->stream_maps),
  2595. &o->nb_stream_maps, o->nb_stream_maps + 1);
  2596. m = &o->stream_maps[o->nb_stream_maps - 1];
  2597. m->file_index = file_idx;
  2598. m->stream_index = i;
  2599. if (sync_file_idx >= 0) {
  2600. m->sync_file_index = sync_file_idx;
  2601. m->sync_stream_index = sync_stream_idx;
  2602. } else {
  2603. m->sync_file_index = file_idx;
  2604. m->sync_stream_index = i;
  2605. }
  2606. }
  2607. if (!m) {
  2608. av_log(NULL, AV_LOG_FATAL, "Stream map '%s' matches no streams.\n", arg);
  2609. exit_program(1);
  2610. }
  2611. av_freep(&map);
  2612. return 0;
  2613. }
  2614. static int opt_attach(OptionsContext *o, const char *opt, const char *arg)
  2615. {
  2616. o->attachments = grow_array(o->attachments, sizeof(*o->attachments),
  2617. &o->nb_attachments, o->nb_attachments + 1);
  2618. o->attachments[o->nb_attachments - 1] = arg;
  2619. return 0;
  2620. }
  2621. /**
  2622. * Parse a metadata specifier in arg.
  2623. * @param type metadata type is written here -- g(lobal)/s(tream)/c(hapter)/p(rogram)
  2624. * @param index for type c/p, chapter/program index is written here
  2625. * @param stream_spec for type s, the stream specifier is written here
  2626. */
  2627. static void parse_meta_type(char *arg, char *type, int *index, const char **stream_spec)
  2628. {
  2629. if (*arg) {
  2630. *type = *arg;
  2631. switch (*arg) {
  2632. case 'g':
  2633. break;
  2634. case 's':
  2635. if (*(++arg) && *arg != ':') {
  2636. av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", arg);
  2637. exit_program(1);
  2638. }
  2639. *stream_spec = *arg == ':' ? arg + 1 : "";
  2640. break;
  2641. case 'c':
  2642. case 'p':
  2643. if (*(++arg) == ':')
  2644. *index = strtol(++arg, NULL, 0);
  2645. break;
  2646. default:
  2647. av_log(NULL, AV_LOG_FATAL, "Invalid metadata type %c.\n", *arg);
  2648. exit_program(1);
  2649. }
  2650. } else
  2651. *type = 'g';
  2652. }
  2653. static int copy_metadata(char *outspec, char *inspec, AVFormatContext *oc, AVFormatContext *ic, OptionsContext *o)
  2654. {
  2655. AVDictionary **meta_in = NULL;
  2656. AVDictionary **meta_out;
  2657. int i, ret = 0;
  2658. char type_in, type_out;
  2659. const char *istream_spec = NULL, *ostream_spec = NULL;
  2660. int idx_in = 0, idx_out = 0;
  2661. parse_meta_type(inspec, &type_in, &idx_in, &istream_spec);
  2662. parse_meta_type(outspec, &type_out, &idx_out, &ostream_spec);
  2663. if (type_in == 'g' || type_out == 'g')
  2664. o->metadata_global_manual = 1;
  2665. if (type_in == 's' || type_out == 's')
  2666. o->metadata_streams_manual = 1;
  2667. if (type_in == 'c' || type_out == 'c')
  2668. o->metadata_chapters_manual = 1;
  2669. #define METADATA_CHECK_INDEX(index, nb_elems, desc)\
  2670. if ((index) < 0 || (index) >= (nb_elems)) {\
  2671. av_log(NULL, AV_LOG_FATAL, "Invalid %s index %d while processing metadata maps.\n",\
  2672. (desc), (index));\
  2673. exit_program(1);\
  2674. }
  2675. #define SET_DICT(type, meta, context, index)\
  2676. switch (type) {\
  2677. case 'g':\
  2678. meta = &context->metadata;\
  2679. break;\
  2680. case 'c':\
  2681. METADATA_CHECK_INDEX(index, context->nb_chapters, "chapter")\
  2682. meta = &context->chapters[index]->metadata;\
  2683. break;\
  2684. case 'p':\
  2685. METADATA_CHECK_INDEX(index, context->nb_programs, "program")\
  2686. meta = &context->programs[index]->metadata;\
  2687. break;\
  2688. }\
  2689. SET_DICT(type_in, meta_in, ic, idx_in);
  2690. SET_DICT(type_out, meta_out, oc, idx_out);
  2691. /* for input streams choose first matching stream */
  2692. if (type_in == 's') {
  2693. for (i = 0; i < ic->nb_streams; i++) {
  2694. if ((ret = check_stream_specifier(ic, ic->streams[i], istream_spec)) > 0) {
  2695. meta_in = &ic->streams[i]->metadata;
  2696. break;
  2697. } else if (ret < 0)
  2698. exit_program(1);
  2699. }
  2700. if (!meta_in) {
  2701. av_log(NULL, AV_LOG_FATAL, "Stream specifier %s does not match any streams.\n", istream_spec);
  2702. exit_program(1);
  2703. }
  2704. }
  2705. if (type_out == 's') {
  2706. for (i = 0; i < oc->nb_streams; i++) {
  2707. if ((ret = check_stream_specifier(oc, oc->streams[i], ostream_spec)) > 0) {
  2708. meta_out = &oc->streams[i]->metadata;
  2709. av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
  2710. } else if (ret < 0)
  2711. exit_program(1);
  2712. }
  2713. } else
  2714. av_dict_copy(meta_out, *meta_in, AV_DICT_DONT_OVERWRITE);
  2715. return 0;
  2716. }
  2717. static AVCodec *find_codec_or_die(const char *name, enum AVMediaType type, int encoder)
  2718. {
  2719. const char *codec_string = encoder ? "encoder" : "decoder";
  2720. AVCodec *codec;
  2721. codec = encoder ?
  2722. avcodec_find_encoder_by_name(name) :
  2723. avcodec_find_decoder_by_name(name);
  2724. if (!codec) {
  2725. av_log(NULL, AV_LOG_FATAL, "Unknown %s '%s'\n", codec_string, name);
  2726. exit_program(1);
  2727. }
  2728. if (codec->type != type) {
  2729. av_log(NULL, AV_LOG_FATAL, "Invalid %s type '%s'\n", codec_string, name);
  2730. exit_program(1);
  2731. }
  2732. return codec;
  2733. }
  2734. static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream *st)
  2735. {
  2736. char *codec_name = NULL;
  2737. MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, st);
  2738. if (codec_name) {
  2739. AVCodec *codec = find_codec_or_die(codec_name, st->codec->codec_type, 0);
  2740. st->codec->codec_id = codec->id;
  2741. return codec;
  2742. } else
  2743. return avcodec_find_decoder(st->codec->codec_id);
  2744. }
  2745. /**
  2746. * Add all the streams from the given input file to the global
  2747. * list of input streams.
  2748. */
  2749. static void add_input_streams(OptionsContext *o, AVFormatContext *ic)
  2750. {
  2751. int i;
  2752. for (i = 0; i < ic->nb_streams; i++) {
  2753. AVStream *st = ic->streams[i];
  2754. AVCodecContext *dec = st->codec;
  2755. InputStream *ist;
  2756. input_streams = grow_array(input_streams, sizeof(*input_streams), &nb_input_streams, nb_input_streams + 1);
  2757. ist = &input_streams[nb_input_streams - 1];
  2758. ist->st = st;
  2759. ist->file_index = nb_input_files;
  2760. ist->discard = 1;
  2761. ist->opts = filter_codec_opts(codec_opts, ist->st->codec->codec_id, ic, st);
  2762. ist->ts_scale = 1.0;
  2763. MATCH_PER_STREAM_OPT(ts_scale, dbl, ist->ts_scale, ic, st);
  2764. ist->dec = choose_decoder(o, ic, st);
  2765. switch (dec->codec_type) {
  2766. case AVMEDIA_TYPE_AUDIO:
  2767. if (o->audio_disable)
  2768. st->discard = AVDISCARD_ALL;
  2769. break;
  2770. case AVMEDIA_TYPE_VIDEO:
  2771. if (dec->lowres) {
  2772. dec->flags |= CODEC_FLAG_EMU_EDGE;
  2773. dec->height >>= dec->lowres;
  2774. dec->width >>= dec->lowres;
  2775. }
  2776. if (o->video_disable)
  2777. st->discard = AVDISCARD_ALL;
  2778. else if (video_discard)
  2779. st->discard = video_discard;
  2780. break;
  2781. case AVMEDIA_TYPE_DATA:
  2782. break;
  2783. case AVMEDIA_TYPE_SUBTITLE:
  2784. if (o->subtitle_disable)
  2785. st->discard = AVDISCARD_ALL;
  2786. break;
  2787. case AVMEDIA_TYPE_ATTACHMENT:
  2788. case AVMEDIA_TYPE_UNKNOWN:
  2789. break;
  2790. default:
  2791. abort();
  2792. }
  2793. }
  2794. }
  2795. static void assert_file_overwrite(const char *filename)
  2796. {
  2797. if (!file_overwrite &&
  2798. (strchr(filename, ':') == NULL || filename[1] == ':' ||
  2799. av_strstart(filename, "file:", NULL))) {
  2800. if (avio_check(filename, 0) == 0) {
  2801. if (!using_stdin) {
  2802. fprintf(stderr,"File '%s' already exists. Overwrite ? [y/N] ", filename);
  2803. fflush(stderr);
  2804. if (!read_yesno()) {
  2805. fprintf(stderr, "Not overwriting - exiting\n");
  2806. exit_program(1);
  2807. }
  2808. }
  2809. else {
  2810. fprintf(stderr,"File '%s' already exists. Exiting.\n", filename);
  2811. exit_program(1);
  2812. }
  2813. }
  2814. }
  2815. }
  2816. static void dump_attachment(AVStream *st, const char *filename)
  2817. {
  2818. int ret;
  2819. AVIOContext *out = NULL;
  2820. AVDictionaryEntry *e;
  2821. if (!st->codec->extradata_size) {
  2822. av_log(NULL, AV_LOG_WARNING, "No extradata to dump in stream #%d:%d.\n",
  2823. nb_input_files - 1, st->index);
  2824. return;
  2825. }
  2826. if (!*filename && (e = av_dict_get(st->metadata, "filename", NULL, 0)))
  2827. filename = e->value;
  2828. if (!*filename) {
  2829. av_log(NULL, AV_LOG_FATAL, "No filename specified and no 'filename' tag"
  2830. "in stream #%d:%d.\n", nb_input_files - 1, st->index);
  2831. exit_program(1);
  2832. }
  2833. assert_file_overwrite(filename);
  2834. if ((ret = avio_open2(&out, filename, AVIO_FLAG_WRITE, &int_cb, NULL)) < 0) {
  2835. av_log(NULL, AV_LOG_FATAL, "Could not open file %s for writing.\n",
  2836. filename);
  2837. exit_program(1);
  2838. }
  2839. avio_write(out, st->codec->extradata, st->codec->extradata_size);
  2840. avio_flush(out);
  2841. avio_close(out);
  2842. }
  2843. static int opt_input_file(OptionsContext *o, const char *opt, const char *filename)
  2844. {
  2845. AVFormatContext *ic;
  2846. AVInputFormat *file_iformat = NULL;
  2847. int err, i, ret;
  2848. int64_t timestamp;
  2849. uint8_t buf[128];
  2850. AVDictionary **opts;
  2851. int orig_nb_streams; // number of streams before avformat_find_stream_info
  2852. if (o->format) {
  2853. if (!(file_iformat = av_find_input_format(o->format))) {
  2854. av_log(NULL, AV_LOG_FATAL, "Unknown input format: '%s'\n", o->format);
  2855. exit_program(1);
  2856. }
  2857. }
  2858. if (!strcmp(filename, "-"))
  2859. filename = "pipe:";
  2860. using_stdin |= !strncmp(filename, "pipe:", 5) ||
  2861. !strcmp(filename, "/dev/stdin");
  2862. /* get default parameters from command line */
  2863. ic = avformat_alloc_context();
  2864. if (!ic) {
  2865. print_error(filename, AVERROR(ENOMEM));
  2866. exit_program(1);
  2867. }
  2868. if (o->nb_audio_sample_rate) {
  2869. snprintf(buf, sizeof(buf), "%d", o->audio_sample_rate[o->nb_audio_sample_rate - 1].u.i);
  2870. av_dict_set(&format_opts, "sample_rate", buf, 0);
  2871. }
  2872. if (o->nb_audio_channels) {
  2873. snprintf(buf, sizeof(buf), "%d", o->audio_channels[o->nb_audio_channels - 1].u.i);
  2874. av_dict_set(&format_opts, "channels", buf, 0);
  2875. }
  2876. if (o->nb_frame_rates) {
  2877. av_dict_set(&format_opts, "framerate", o->frame_rates[o->nb_frame_rates - 1].u.str, 0);
  2878. }
  2879. if (o->nb_frame_sizes) {
  2880. av_dict_set(&format_opts, "video_size", o->frame_sizes[o->nb_frame_sizes - 1].u.str, 0);
  2881. }
  2882. if (o->nb_frame_pix_fmts)
  2883. av_dict_set(&format_opts, "pixel_format", o->frame_pix_fmts[o->nb_frame_pix_fmts - 1].u.str, 0);
  2884. ic->flags |= AVFMT_FLAG_NONBLOCK;
  2885. ic->interrupt_callback = int_cb;
  2886. /* open the input file with generic libav function */
  2887. err = avformat_open_input(&ic, filename, file_iformat, &format_opts);
  2888. if (err < 0) {
  2889. print_error(filename, err);
  2890. exit_program(1);
  2891. }
  2892. assert_avoptions(format_opts);
  2893. /* apply forced codec ids */
  2894. for (i = 0; i < ic->nb_streams; i++)
  2895. choose_decoder(o, ic, ic->streams[i]);
  2896. /* Set AVCodecContext options for avformat_find_stream_info */
  2897. opts = setup_find_stream_info_opts(ic, codec_opts);
  2898. orig_nb_streams = ic->nb_streams;
  2899. /* If not enough info to get the stream parameters, we decode the
  2900. first frames to get it. (used in mpeg case for example) */
  2901. ret = avformat_find_stream_info(ic, opts);
  2902. if (ret < 0) {
  2903. av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", filename);
  2904. avformat_close_input(&ic);
  2905. exit_program(1);
  2906. }
  2907. timestamp = o->start_time;
  2908. /* add the stream start time */
  2909. if (ic->start_time != AV_NOPTS_VALUE)
  2910. timestamp += ic->start_time;
  2911. /* if seeking requested, we execute it */
  2912. if (o->start_time != 0) {
  2913. ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
  2914. if (ret < 0) {
  2915. av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
  2916. filename, (double)timestamp / AV_TIME_BASE);
  2917. }
  2918. }
  2919. /* update the current parameters so that they match the one of the input stream */
  2920. add_input_streams(o, ic);
  2921. /* dump the file content */
  2922. av_dump_format(ic, nb_input_files, filename, 0);
  2923. input_files = grow_array(input_files, sizeof(*input_files), &nb_input_files, nb_input_files + 1);
  2924. input_files[nb_input_files - 1].ctx = ic;
  2925. input_files[nb_input_files - 1].ist_index = nb_input_streams - ic->nb_streams;
  2926. input_files[nb_input_files - 1].ts_offset = o->input_ts_offset - (copy_ts ? 0 : timestamp);
  2927. input_files[nb_input_files - 1].nb_streams = ic->nb_streams;
  2928. input_files[nb_input_files - 1].rate_emu = o->rate_emu;
  2929. for (i = 0; i < o->nb_dump_attachment; i++) {
  2930. int j;
  2931. for (j = 0; j < ic->nb_streams; j++) {
  2932. AVStream *st = ic->streams[j];
  2933. if (check_stream_specifier(ic, st, o->dump_attachment[i].specifier) == 1)
  2934. dump_attachment(st, o->dump_attachment[i].u.str);
  2935. }
  2936. }
  2937. for (i = 0; i < orig_nb_streams; i++)
  2938. av_dict_free(&opts[i]);
  2939. av_freep(&opts);
  2940. reset_options(o);
  2941. return 0;
  2942. }
  2943. static void parse_forced_key_frames(char *kf, OutputStream *ost,
  2944. AVCodecContext *avctx)
  2945. {
  2946. char *p;
  2947. int n = 1, i;
  2948. int64_t t;
  2949. for (p = kf; *p; p++)
  2950. if (*p == ',')
  2951. n++;
  2952. ost->forced_kf_count = n;
  2953. ost->forced_kf_pts = av_malloc(sizeof(*ost->forced_kf_pts) * n);
  2954. if (!ost->forced_kf_pts) {
  2955. av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
  2956. exit_program(1);
  2957. }
  2958. for (i = 0; i < n; i++) {
  2959. p = i ? strchr(p, ',') + 1 : kf;
  2960. t = parse_time_or_die("force_key_frames", p, 1);
  2961. ost->forced_kf_pts[i] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
  2962. }
  2963. }
  2964. static uint8_t *get_line(AVIOContext *s)
  2965. {
  2966. AVIOContext *line;
  2967. uint8_t *buf;
  2968. char c;
  2969. if (avio_open_dyn_buf(&line) < 0) {
  2970. av_log(NULL, AV_LOG_FATAL, "Could not alloc buffer for reading preset.\n");
  2971. exit_program(1);
  2972. }
  2973. while ((c = avio_r8(s)) && c != '\n')
  2974. avio_w8(line, c);
  2975. avio_w8(line, 0);
  2976. avio_close_dyn_buf(line, &buf);
  2977. return buf;
  2978. }
  2979. static int get_preset_file_2(const char *preset_name, const char *codec_name, AVIOContext **s)
  2980. {
  2981. int i, ret = 1;
  2982. char filename[1000];
  2983. const char *base[3] = { getenv("AVCONV_DATADIR"),
  2984. getenv("HOME"),
  2985. AVCONV_DATADIR,
  2986. };
  2987. for (i = 0; i < FF_ARRAY_ELEMS(base) && ret; i++) {
  2988. if (!base[i])
  2989. continue;
  2990. if (codec_name) {
  2991. snprintf(filename, sizeof(filename), "%s%s/%s-%s.avpreset", base[i],
  2992. i != 1 ? "" : "/.avconv", codec_name, preset_name);
  2993. ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
  2994. }
  2995. if (ret) {
  2996. snprintf(filename, sizeof(filename), "%s%s/%s.avpreset", base[i],
  2997. i != 1 ? "" : "/.avconv", preset_name);
  2998. ret = avio_open2(s, filename, AVIO_FLAG_READ, &int_cb, NULL);
  2999. }
  3000. }
  3001. return ret;
  3002. }
  3003. static void choose_encoder(OptionsContext *o, AVFormatContext *s, OutputStream *ost)
  3004. {
  3005. char *codec_name = NULL;
  3006. MATCH_PER_STREAM_OPT(codec_names, str, codec_name, s, ost->st);
  3007. if (!codec_name) {
  3008. ost->st->codec->codec_id = av_guess_codec(s->oformat, NULL, s->filename,
  3009. NULL, ost->st->codec->codec_type);
  3010. ost->enc = avcodec_find_encoder(ost->st->codec->codec_id);
  3011. } else if (!strcmp(codec_name, "copy"))
  3012. ost->stream_copy = 1;
  3013. else {
  3014. ost->enc = find_codec_or_die(codec_name, ost->st->codec->codec_type, 1);
  3015. ost->st->codec->codec_id = ost->enc->id;
  3016. }
  3017. }
  3018. static OutputStream *new_output_stream(OptionsContext *o, AVFormatContext *oc, enum AVMediaType type)
  3019. {
  3020. OutputStream *ost;
  3021. AVStream *st = avformat_new_stream(oc, NULL);
  3022. int idx = oc->nb_streams - 1, ret = 0;
  3023. char *bsf = NULL, *next, *codec_tag = NULL;
  3024. AVBitStreamFilterContext *bsfc, *bsfc_prev = NULL;
  3025. double qscale = -1;
  3026. char *buf = NULL, *arg = NULL, *preset = NULL;
  3027. AVIOContext *s = NULL;
  3028. if (!st) {
  3029. av_log(NULL, AV_LOG_FATAL, "Could not alloc stream.\n");
  3030. exit_program(1);
  3031. }
  3032. if (oc->nb_streams - 1 < o->nb_streamid_map)
  3033. st->id = o->streamid_map[oc->nb_streams - 1];
  3034. output_streams = grow_array(output_streams, sizeof(*output_streams), &nb_output_streams,
  3035. nb_output_streams + 1);
  3036. ost = &output_streams[nb_output_streams - 1];
  3037. ost->file_index = nb_output_files;
  3038. ost->index = idx;
  3039. ost->st = st;
  3040. st->codec->codec_type = type;
  3041. choose_encoder(o, oc, ost);
  3042. if (ost->enc) {
  3043. ost->opts = filter_codec_opts(codec_opts, ost->enc->id, oc, st);
  3044. }
  3045. avcodec_get_context_defaults3(st->codec, ost->enc);
  3046. st->codec->codec_type = type; // XXX hack, avcodec_get_context_defaults2() sets type to unknown for stream copy
  3047. MATCH_PER_STREAM_OPT(presets, str, preset, oc, st);
  3048. if (preset && (!(ret = get_preset_file_2(preset, ost->enc->name, &s)))) {
  3049. do {
  3050. buf = get_line(s);
  3051. if (!buf[0] || buf[0] == '#') {
  3052. av_free(buf);
  3053. continue;
  3054. }
  3055. if (!(arg = strchr(buf, '='))) {
  3056. av_log(NULL, AV_LOG_FATAL, "Invalid line found in the preset file.\n");
  3057. exit_program(1);
  3058. }
  3059. *arg++ = 0;
  3060. av_dict_set(&ost->opts, buf, arg, AV_DICT_DONT_OVERWRITE);
  3061. av_free(buf);
  3062. } while (!s->eof_reached);
  3063. avio_close(s);
  3064. }
  3065. if (ret) {
  3066. av_log(NULL, AV_LOG_FATAL,
  3067. "Preset %s specified for stream %d:%d, but could not be opened.\n",
  3068. preset, ost->file_index, ost->index);
  3069. exit_program(1);
  3070. }
  3071. ost->max_frames = INT64_MAX;
  3072. MATCH_PER_STREAM_OPT(max_frames, i64, ost->max_frames, oc, st);
  3073. MATCH_PER_STREAM_OPT(bitstream_filters, str, bsf, oc, st);
  3074. while (bsf) {
  3075. if (next = strchr(bsf, ','))
  3076. *next++ = 0;
  3077. if (!(bsfc = av_bitstream_filter_init(bsf))) {
  3078. av_log(NULL, AV_LOG_FATAL, "Unknown bitstream filter %s\n", bsf);
  3079. exit_program(1);
  3080. }
  3081. if (bsfc_prev)
  3082. bsfc_prev->next = bsfc;
  3083. else
  3084. ost->bitstream_filters = bsfc;
  3085. bsfc_prev = bsfc;
  3086. bsf = next;
  3087. }
  3088. MATCH_PER_STREAM_OPT(codec_tags, str, codec_tag, oc, st);
  3089. if (codec_tag) {
  3090. uint32_t tag = strtol(codec_tag, &next, 0);
  3091. if (*next)
  3092. tag = AV_RL32(codec_tag);
  3093. st->codec->codec_tag = tag;
  3094. }
  3095. MATCH_PER_STREAM_OPT(qscale, dbl, qscale, oc, st);
  3096. if (qscale >= 0 || same_quant) {
  3097. st->codec->flags |= CODEC_FLAG_QSCALE;
  3098. st->codec->global_quality = FF_QP2LAMBDA * qscale;
  3099. }
  3100. if (oc->oformat->flags & AVFMT_GLOBALHEADER)
  3101. st->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
  3102. av_opt_get_int(sws_opts, "sws_flags", 0, &ost->sws_flags);
  3103. return ost;
  3104. }
  3105. static void parse_matrix_coeffs(uint16_t *dest, const char *str)
  3106. {
  3107. int i;
  3108. const char *p = str;
  3109. for (i = 0;; i++) {
  3110. dest[i] = atoi(p);
  3111. if (i == 63)
  3112. break;
  3113. p = strchr(p, ',');
  3114. if (!p) {
  3115. av_log(NULL, AV_LOG_FATAL, "Syntax error in matrix \"%s\" at coeff %d\n", str, i);
  3116. exit_program(1);
  3117. }
  3118. p++;
  3119. }
  3120. }
  3121. static OutputStream *new_video_stream(OptionsContext *o, AVFormatContext *oc)
  3122. {
  3123. AVStream *st;
  3124. OutputStream *ost;
  3125. AVCodecContext *video_enc;
  3126. ost = new_output_stream(o, oc, AVMEDIA_TYPE_VIDEO);
  3127. st = ost->st;
  3128. video_enc = st->codec;
  3129. if (!ost->stream_copy) {
  3130. const char *p = NULL;
  3131. char *forced_key_frames = NULL, *frame_rate = NULL, *frame_size = NULL;
  3132. char *frame_aspect_ratio = NULL, *frame_pix_fmt = NULL;
  3133. char *intra_matrix = NULL, *inter_matrix = NULL, *filters = NULL;
  3134. int i;
  3135. MATCH_PER_STREAM_OPT(frame_rates, str, frame_rate, oc, st);
  3136. if (frame_rate && av_parse_video_rate(&ost->frame_rate, frame_rate) < 0) {
  3137. av_log(NULL, AV_LOG_FATAL, "Invalid framerate value: %s\n", frame_rate);
  3138. exit_program(1);
  3139. }
  3140. MATCH_PER_STREAM_OPT(frame_sizes, str, frame_size, oc, st);
  3141. if (frame_size && av_parse_video_size(&video_enc->width, &video_enc->height, frame_size) < 0) {
  3142. av_log(NULL, AV_LOG_FATAL, "Invalid frame size: %s.\n", frame_size);
  3143. exit_program(1);
  3144. }
  3145. MATCH_PER_STREAM_OPT(frame_aspect_ratios, str, frame_aspect_ratio, oc, st);
  3146. if (frame_aspect_ratio)
  3147. ost->frame_aspect_ratio = parse_frame_aspect_ratio(frame_aspect_ratio);
  3148. MATCH_PER_STREAM_OPT(frame_pix_fmts, str, frame_pix_fmt, oc, st);
  3149. if (frame_pix_fmt && (video_enc->pix_fmt = av_get_pix_fmt(frame_pix_fmt)) == PIX_FMT_NONE) {
  3150. av_log(NULL, AV_LOG_FATAL, "Unknown pixel format requested: %s.\n", frame_pix_fmt);
  3151. exit_program(1);
  3152. }
  3153. st->sample_aspect_ratio = video_enc->sample_aspect_ratio;
  3154. MATCH_PER_STREAM_OPT(intra_matrices, str, intra_matrix, oc, st);
  3155. if (intra_matrix) {
  3156. if (!(video_enc->intra_matrix = av_mallocz(sizeof(*video_enc->intra_matrix) * 64))) {
  3157. av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for intra matrix.\n");
  3158. exit_program(1);
  3159. }
  3160. parse_matrix_coeffs(video_enc->intra_matrix, intra_matrix);
  3161. }
  3162. MATCH_PER_STREAM_OPT(inter_matrices, str, inter_matrix, oc, st);
  3163. if (inter_matrix) {
  3164. if (!(video_enc->inter_matrix = av_mallocz(sizeof(*video_enc->inter_matrix) * 64))) {
  3165. av_log(NULL, AV_LOG_FATAL, "Could not allocate memory for inter matrix.\n");
  3166. exit_program(1);
  3167. }
  3168. parse_matrix_coeffs(video_enc->inter_matrix, inter_matrix);
  3169. }
  3170. MATCH_PER_STREAM_OPT(rc_overrides, str, p, oc, st);
  3171. for (i = 0; p; i++) {
  3172. int start, end, q;
  3173. int e = sscanf(p, "%d,%d,%d", &start, &end, &q);
  3174. if (e != 3) {
  3175. av_log(NULL, AV_LOG_FATAL, "error parsing rc_override\n");
  3176. exit_program(1);
  3177. }
  3178. video_enc->rc_override =
  3179. av_realloc(video_enc->rc_override,
  3180. sizeof(RcOverride) * (i + 1));
  3181. video_enc->rc_override[i].start_frame = start;
  3182. video_enc->rc_override[i].end_frame = end;
  3183. if (q > 0) {
  3184. video_enc->rc_override[i].qscale = q;
  3185. video_enc->rc_override[i].quality_factor = 1.0;
  3186. }
  3187. else {
  3188. video_enc->rc_override[i].qscale = 0;
  3189. video_enc->rc_override[i].quality_factor = -q/100.0;
  3190. }
  3191. p = strchr(p, '/');
  3192. if (p) p++;
  3193. }
  3194. video_enc->rc_override_count = i;
  3195. if (!video_enc->rc_initial_buffer_occupancy)
  3196. video_enc->rc_initial_buffer_occupancy = video_enc->rc_buffer_size * 3 / 4;
  3197. video_enc->intra_dc_precision = intra_dc_precision - 8;
  3198. /* two pass mode */
  3199. if (do_pass) {
  3200. if (do_pass == 1) {
  3201. video_enc->flags |= CODEC_FLAG_PASS1;
  3202. } else {
  3203. video_enc->flags |= CODEC_FLAG_PASS2;
  3204. }
  3205. }
  3206. MATCH_PER_STREAM_OPT(forced_key_frames, str, forced_key_frames, oc, st);
  3207. if (forced_key_frames)
  3208. parse_forced_key_frames(forced_key_frames, ost, video_enc);
  3209. MATCH_PER_STREAM_OPT(force_fps, i, ost->force_fps, oc, st);
  3210. ost->top_field_first = -1;
  3211. MATCH_PER_STREAM_OPT(top_field_first, i, ost->top_field_first, oc, st);
  3212. #if CONFIG_AVFILTER
  3213. MATCH_PER_STREAM_OPT(filters, str, filters, oc, st);
  3214. if (filters)
  3215. ost->avfilter = av_strdup(filters);
  3216. #endif
  3217. } else {
  3218. MATCH_PER_STREAM_OPT(copy_initial_nonkeyframes, i, ost->copy_initial_nonkeyframes, oc ,st);
  3219. }
  3220. return ost;
  3221. }
  3222. static OutputStream *new_audio_stream(OptionsContext *o, AVFormatContext *oc)
  3223. {
  3224. AVStream *st;
  3225. OutputStream *ost;
  3226. AVCodecContext *audio_enc;
  3227. ost = new_output_stream(o, oc, AVMEDIA_TYPE_AUDIO);
  3228. st = ost->st;
  3229. audio_enc = st->codec;
  3230. audio_enc->codec_type = AVMEDIA_TYPE_AUDIO;
  3231. if (!ost->stream_copy) {
  3232. char *sample_fmt = NULL;
  3233. MATCH_PER_STREAM_OPT(audio_channels, i, audio_enc->channels, oc, st);
  3234. MATCH_PER_STREAM_OPT(sample_fmts, str, sample_fmt, oc, st);
  3235. if (sample_fmt &&
  3236. (audio_enc->sample_fmt = av_get_sample_fmt(sample_fmt)) == AV_SAMPLE_FMT_NONE) {
  3237. av_log(NULL, AV_LOG_FATAL, "Invalid sample format '%s'\n", sample_fmt);
  3238. exit_program(1);
  3239. }
  3240. MATCH_PER_STREAM_OPT(audio_sample_rate, i, audio_enc->sample_rate, oc, st);
  3241. }
  3242. return ost;
  3243. }
  3244. static OutputStream *new_data_stream(OptionsContext *o, AVFormatContext *oc)
  3245. {
  3246. OutputStream *ost;
  3247. ost = new_output_stream(o, oc, AVMEDIA_TYPE_DATA);
  3248. if (!ost->stream_copy) {
  3249. av_log(NULL, AV_LOG_FATAL, "Data stream encoding not supported yet (only streamcopy)\n");
  3250. exit_program(1);
  3251. }
  3252. return ost;
  3253. }
  3254. static OutputStream *new_attachment_stream(OptionsContext *o, AVFormatContext *oc)
  3255. {
  3256. OutputStream *ost = new_output_stream(o, oc, AVMEDIA_TYPE_ATTACHMENT);
  3257. ost->stream_copy = 1;
  3258. return ost;
  3259. }
  3260. static OutputStream *new_subtitle_stream(OptionsContext *o, AVFormatContext *oc)
  3261. {
  3262. AVStream *st;
  3263. OutputStream *ost;
  3264. AVCodecContext *subtitle_enc;
  3265. ost = new_output_stream(o, oc, AVMEDIA_TYPE_SUBTITLE);
  3266. st = ost->st;
  3267. subtitle_enc = st->codec;
  3268. subtitle_enc->codec_type = AVMEDIA_TYPE_SUBTITLE;
  3269. return ost;
  3270. }
  3271. /* arg format is "output-stream-index:streamid-value". */
  3272. static int opt_streamid(OptionsContext *o, const char *opt, const char *arg)
  3273. {
  3274. int idx;
  3275. char *p;
  3276. char idx_str[16];
  3277. av_strlcpy(idx_str, arg, sizeof(idx_str));
  3278. p = strchr(idx_str, ':');
  3279. if (!p) {
  3280. av_log(NULL, AV_LOG_FATAL,
  3281. "Invalid value '%s' for option '%s', required syntax is 'index:value'\n",
  3282. arg, opt);
  3283. exit_program(1);
  3284. }
  3285. *p++ = '\0';
  3286. idx = parse_number_or_die(opt, idx_str, OPT_INT, 0, INT_MAX);
  3287. o->streamid_map = grow_array(o->streamid_map, sizeof(*o->streamid_map), &o->nb_streamid_map, idx+1);
  3288. o->streamid_map[idx] = parse_number_or_die(opt, p, OPT_INT, 0, INT_MAX);
  3289. return 0;
  3290. }
  3291. static int copy_chapters(InputFile *ifile, OutputFile *ofile, int copy_metadata)
  3292. {
  3293. AVFormatContext *is = ifile->ctx;
  3294. AVFormatContext *os = ofile->ctx;
  3295. int i;
  3296. for (i = 0; i < is->nb_chapters; i++) {
  3297. AVChapter *in_ch = is->chapters[i], *out_ch;
  3298. int64_t ts_off = av_rescale_q(ofile->start_time - ifile->ts_offset,
  3299. AV_TIME_BASE_Q, in_ch->time_base);
  3300. int64_t rt = (ofile->recording_time == INT64_MAX) ? INT64_MAX :
  3301. av_rescale_q(ofile->recording_time, AV_TIME_BASE_Q, in_ch->time_base);
  3302. if (in_ch->end < ts_off)
  3303. continue;
  3304. if (rt != INT64_MAX && in_ch->start > rt + ts_off)
  3305. break;
  3306. out_ch = av_mallocz(sizeof(AVChapter));
  3307. if (!out_ch)
  3308. return AVERROR(ENOMEM);
  3309. out_ch->id = in_ch->id;
  3310. out_ch->time_base = in_ch->time_base;
  3311. out_ch->start = FFMAX(0, in_ch->start - ts_off);
  3312. out_ch->end = FFMIN(rt, in_ch->end - ts_off);
  3313. if (copy_metadata)
  3314. av_dict_copy(&out_ch->metadata, in_ch->metadata, 0);
  3315. os->nb_chapters++;
  3316. os->chapters = av_realloc(os->chapters, sizeof(AVChapter) * os->nb_chapters);
  3317. if (!os->chapters)
  3318. return AVERROR(ENOMEM);
  3319. os->chapters[os->nb_chapters - 1] = out_ch;
  3320. }
  3321. return 0;
  3322. }
  3323. static void opt_output_file(void *optctx, const char *filename)
  3324. {
  3325. OptionsContext *o = optctx;
  3326. AVFormatContext *oc;
  3327. int i, err;
  3328. AVOutputFormat *file_oformat;
  3329. OutputStream *ost;
  3330. InputStream *ist;
  3331. if (!strcmp(filename, "-"))
  3332. filename = "pipe:";
  3333. oc = avformat_alloc_context();
  3334. if (!oc) {
  3335. print_error(filename, AVERROR(ENOMEM));
  3336. exit_program(1);
  3337. }
  3338. if (o->format) {
  3339. file_oformat = av_guess_format(o->format, NULL, NULL);
  3340. if (!file_oformat) {
  3341. av_log(NULL, AV_LOG_FATAL, "Requested output format '%s' is not a suitable output format\n", o->format);
  3342. exit_program(1);
  3343. }
  3344. } else {
  3345. file_oformat = av_guess_format(NULL, filename, NULL);
  3346. if (!file_oformat) {
  3347. av_log(NULL, AV_LOG_FATAL, "Unable to find a suitable output format for '%s'\n",
  3348. filename);
  3349. exit_program(1);
  3350. }
  3351. }
  3352. oc->oformat = file_oformat;
  3353. oc->interrupt_callback = int_cb;
  3354. av_strlcpy(oc->filename, filename, sizeof(oc->filename));
  3355. if (!o->nb_stream_maps) {
  3356. /* pick the "best" stream of each type */
  3357. #define NEW_STREAM(type, index)\
  3358. if (index >= 0) {\
  3359. ost = new_ ## type ## _stream(o, oc);\
  3360. ost->source_index = index;\
  3361. ost->sync_ist = &input_streams[index];\
  3362. input_streams[index].discard = 0;\
  3363. }
  3364. /* video: highest resolution */
  3365. if (!o->video_disable && oc->oformat->video_codec != CODEC_ID_NONE) {
  3366. int area = 0, idx = -1;
  3367. for (i = 0; i < nb_input_streams; i++) {
  3368. ist = &input_streams[i];
  3369. if (ist->st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
  3370. ist->st->codec->width * ist->st->codec->height > area) {
  3371. area = ist->st->codec->width * ist->st->codec->height;
  3372. idx = i;
  3373. }
  3374. }
  3375. NEW_STREAM(video, idx);
  3376. }
  3377. /* audio: most channels */
  3378. if (!o->audio_disable && oc->oformat->audio_codec != CODEC_ID_NONE) {
  3379. int channels = 0, idx = -1;
  3380. for (i = 0; i < nb_input_streams; i++) {
  3381. ist = &input_streams[i];
  3382. if (ist->st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
  3383. ist->st->codec->channels > channels) {
  3384. channels = ist->st->codec->channels;
  3385. idx = i;
  3386. }
  3387. }
  3388. NEW_STREAM(audio, idx);
  3389. }
  3390. /* subtitles: pick first */
  3391. if (!o->subtitle_disable && oc->oformat->subtitle_codec != CODEC_ID_NONE) {
  3392. for (i = 0; i < nb_input_streams; i++)
  3393. if (input_streams[i].st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
  3394. NEW_STREAM(subtitle, i);
  3395. break;
  3396. }
  3397. }
  3398. /* do something with data? */
  3399. } else {
  3400. for (i = 0; i < o->nb_stream_maps; i++) {
  3401. StreamMap *map = &o->stream_maps[i];
  3402. if (map->disabled)
  3403. continue;
  3404. ist = &input_streams[input_files[map->file_index].ist_index + map->stream_index];
  3405. switch (ist->st->codec->codec_type) {
  3406. case AVMEDIA_TYPE_VIDEO: ost = new_video_stream(o, oc); break;
  3407. case AVMEDIA_TYPE_AUDIO: ost = new_audio_stream(o, oc); break;
  3408. case AVMEDIA_TYPE_SUBTITLE: ost = new_subtitle_stream(o, oc); break;
  3409. case AVMEDIA_TYPE_DATA: ost = new_data_stream(o, oc); break;
  3410. case AVMEDIA_TYPE_ATTACHMENT: ost = new_attachment_stream(o, oc); break;
  3411. default:
  3412. av_log(NULL, AV_LOG_FATAL, "Cannot map stream #%d:%d - unsupported type.\n",
  3413. map->file_index, map->stream_index);
  3414. exit_program(1);
  3415. }
  3416. ost->source_index = input_files[map->file_index].ist_index + map->stream_index;
  3417. ost->sync_ist = &input_streams[input_files[map->sync_file_index].ist_index +
  3418. map->sync_stream_index];
  3419. ist->discard = 0;
  3420. }
  3421. }
  3422. /* handle attached files */
  3423. for (i = 0; i < o->nb_attachments; i++) {
  3424. AVIOContext *pb;
  3425. uint8_t *attachment;
  3426. const char *p;
  3427. int64_t len;
  3428. if ((err = avio_open2(&pb, o->attachments[i], AVIO_FLAG_READ, &int_cb, NULL)) < 0) {
  3429. av_log(NULL, AV_LOG_FATAL, "Could not open attachment file %s.\n",
  3430. o->attachments[i]);
  3431. exit_program(1);
  3432. }
  3433. if ((len = avio_size(pb)) <= 0) {
  3434. av_log(NULL, AV_LOG_FATAL, "Could not get size of the attachment %s.\n",
  3435. o->attachments[i]);
  3436. exit_program(1);
  3437. }
  3438. if (!(attachment = av_malloc(len))) {
  3439. av_log(NULL, AV_LOG_FATAL, "Attachment %s too large to fit into memory.\n",
  3440. o->attachments[i]);
  3441. exit_program(1);
  3442. }
  3443. avio_read(pb, attachment, len);
  3444. ost = new_attachment_stream(o, oc);
  3445. ost->stream_copy = 0;
  3446. ost->source_index = -1;
  3447. ost->attachment_filename = o->attachments[i];
  3448. ost->st->codec->extradata = attachment;
  3449. ost->st->codec->extradata_size = len;
  3450. p = strrchr(o->attachments[i], '/');
  3451. av_dict_set(&ost->st->metadata, "filename", (p && *p) ? p + 1 : o->attachments[i], AV_DICT_DONT_OVERWRITE);
  3452. avio_close(pb);
  3453. }
  3454. output_files = grow_array(output_files, sizeof(*output_files), &nb_output_files, nb_output_files + 1);
  3455. output_files[nb_output_files - 1].ctx = oc;
  3456. output_files[nb_output_files - 1].ost_index = nb_output_streams - oc->nb_streams;
  3457. output_files[nb_output_files - 1].recording_time = o->recording_time;
  3458. if (o->recording_time != INT64_MAX)
  3459. oc->duration = o->recording_time;
  3460. output_files[nb_output_files - 1].start_time = o->start_time;
  3461. output_files[nb_output_files - 1].limit_filesize = o->limit_filesize;
  3462. av_dict_copy(&output_files[nb_output_files - 1].opts, format_opts, 0);
  3463. /* check filename in case of an image number is expected */
  3464. if (oc->oformat->flags & AVFMT_NEEDNUMBER) {
  3465. if (!av_filename_number_test(oc->filename)) {
  3466. print_error(oc->filename, AVERROR(EINVAL));
  3467. exit_program(1);
  3468. }
  3469. }
  3470. if (!(oc->oformat->flags & AVFMT_NOFILE)) {
  3471. /* test if it already exists to avoid losing precious files */
  3472. assert_file_overwrite(filename);
  3473. /* open the file */
  3474. if ((err = avio_open2(&oc->pb, filename, AVIO_FLAG_WRITE,
  3475. &oc->interrupt_callback,
  3476. &output_files[nb_output_files - 1].opts)) < 0) {
  3477. print_error(filename, err);
  3478. exit_program(1);
  3479. }
  3480. }
  3481. if (o->mux_preload) {
  3482. uint8_t buf[64];
  3483. snprintf(buf, sizeof(buf), "%d", (int)(o->mux_preload*AV_TIME_BASE));
  3484. av_dict_set(&output_files[nb_output_files - 1].opts, "preload", buf, 0);
  3485. }
  3486. oc->max_delay = (int)(o->mux_max_delay * AV_TIME_BASE);
  3487. oc->flags |= AVFMT_FLAG_NONBLOCK;
  3488. /* copy metadata */
  3489. for (i = 0; i < o->nb_metadata_map; i++) {
  3490. char *p;
  3491. int in_file_index = strtol(o->metadata_map[i].u.str, &p, 0);
  3492. if (in_file_index < 0)
  3493. continue;
  3494. if (in_file_index >= nb_input_files) {
  3495. av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d while processing metadata maps\n", in_file_index);
  3496. exit_program(1);
  3497. }
  3498. copy_metadata(o->metadata_map[i].specifier, *p ? p + 1 : p, oc, input_files[in_file_index].ctx, o);
  3499. }
  3500. /* copy chapters */
  3501. if (o->chapters_input_file >= nb_input_files) {
  3502. if (o->chapters_input_file == INT_MAX) {
  3503. /* copy chapters from the first input file that has them*/
  3504. o->chapters_input_file = -1;
  3505. for (i = 0; i < nb_input_files; i++)
  3506. if (input_files[i].ctx->nb_chapters) {
  3507. o->chapters_input_file = i;
  3508. break;
  3509. }
  3510. } else {
  3511. av_log(NULL, AV_LOG_FATAL, "Invalid input file index %d in chapter mapping.\n",
  3512. o->chapters_input_file);
  3513. exit_program(1);
  3514. }
  3515. }
  3516. if (o->chapters_input_file >= 0)
  3517. copy_chapters(&input_files[o->chapters_input_file], &output_files[nb_output_files - 1],
  3518. !o->metadata_chapters_manual);
  3519. /* copy global metadata by default */
  3520. if (!o->metadata_global_manual && nb_input_files)
  3521. av_dict_copy(&oc->metadata, input_files[0].ctx->metadata,
  3522. AV_DICT_DONT_OVERWRITE);
  3523. if (!o->metadata_streams_manual)
  3524. for (i = output_files[nb_output_files - 1].ost_index; i < nb_output_streams; i++) {
  3525. InputStream *ist;
  3526. if (output_streams[i].source_index < 0) /* this is true e.g. for attached files */
  3527. continue;
  3528. ist = &input_streams[output_streams[i].source_index];
  3529. av_dict_copy(&output_streams[i].st->metadata, ist->st->metadata, AV_DICT_DONT_OVERWRITE);
  3530. }
  3531. /* process manually set metadata */
  3532. for (i = 0; i < o->nb_metadata; i++) {
  3533. AVDictionary **m;
  3534. char type, *val;
  3535. const char *stream_spec;
  3536. int index = 0, j, ret;
  3537. val = strchr(o->metadata[i].u.str, '=');
  3538. if (!val) {
  3539. av_log(NULL, AV_LOG_FATAL, "No '=' character in metadata string %s.\n",
  3540. o->metadata[i].u.str);
  3541. exit_program(1);
  3542. }
  3543. *val++ = 0;
  3544. parse_meta_type(o->metadata[i].specifier, &type, &index, &stream_spec);
  3545. if (type == 's') {
  3546. for (j = 0; j < oc->nb_streams; j++) {
  3547. if ((ret = check_stream_specifier(oc, oc->streams[j], stream_spec)) > 0) {
  3548. av_dict_set(&oc->streams[j]->metadata, o->metadata[i].u.str, *val ? val : NULL, 0);
  3549. } else if (ret < 0)
  3550. exit_program(1);
  3551. }
  3552. printf("ret %d, stream_spec %s\n", ret, stream_spec);
  3553. }
  3554. else {
  3555. switch (type) {
  3556. case 'g':
  3557. m = &oc->metadata;
  3558. break;
  3559. case 'c':
  3560. if (index < 0 || index >= oc->nb_chapters) {
  3561. av_log(NULL, AV_LOG_FATAL, "Invalid chapter index %d in metadata specifier.\n", index);
  3562. exit_program(1);
  3563. }
  3564. m = &oc->chapters[index]->metadata;
  3565. break;
  3566. default:
  3567. av_log(NULL, AV_LOG_FATAL, "Invalid metadata specifier %s.\n", o->metadata[i].specifier);
  3568. exit_program(1);
  3569. }
  3570. av_dict_set(m, o->metadata[i].u.str, *val ? val : NULL, 0);
  3571. }
  3572. }
  3573. reset_options(o);
  3574. }
  3575. /* same option as mencoder */
  3576. static int opt_pass(const char *opt, const char *arg)
  3577. {
  3578. do_pass = parse_number_or_die(opt, arg, OPT_INT, 1, 2);
  3579. return 0;
  3580. }
  3581. static int64_t getutime(void)
  3582. {
  3583. #if HAVE_GETRUSAGE
  3584. struct rusage rusage;
  3585. getrusage(RUSAGE_SELF, &rusage);
  3586. return (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
  3587. #elif HAVE_GETPROCESSTIMES
  3588. HANDLE proc;
  3589. FILETIME c, e, k, u;
  3590. proc = GetCurrentProcess();
  3591. GetProcessTimes(proc, &c, &e, &k, &u);
  3592. return ((int64_t) u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
  3593. #else
  3594. return av_gettime();
  3595. #endif
  3596. }
  3597. static int64_t getmaxrss(void)
  3598. {
  3599. #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
  3600. struct rusage rusage;
  3601. getrusage(RUSAGE_SELF, &rusage);
  3602. return (int64_t)rusage.ru_maxrss * 1024;
  3603. #elif HAVE_GETPROCESSMEMORYINFO
  3604. HANDLE proc;
  3605. PROCESS_MEMORY_COUNTERS memcounters;
  3606. proc = GetCurrentProcess();
  3607. memcounters.cb = sizeof(memcounters);
  3608. GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
  3609. return memcounters.PeakPagefileUsage;
  3610. #else
  3611. return 0;
  3612. #endif
  3613. }
  3614. static int opt_audio_qscale(OptionsContext *o, const char *opt, const char *arg)
  3615. {
  3616. return parse_option(o, "q:a", arg, options);
  3617. }
  3618. static void show_usage(void)
  3619. {
  3620. printf("Hyper fast Audio and Video encoder\n");
  3621. printf("usage: %s [options] [[infile options] -i infile]... {[outfile options] outfile}...\n", program_name);
  3622. printf("\n");
  3623. }
  3624. static void show_help(void)
  3625. {
  3626. int flags = AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_ENCODING_PARAM;
  3627. av_log_set_callback(log_callback_help);
  3628. show_usage();
  3629. show_help_options(options, "Main options:\n",
  3630. OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB, 0);
  3631. show_help_options(options, "\nAdvanced options:\n",
  3632. OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_SUBTITLE | OPT_GRAB,
  3633. OPT_EXPERT);
  3634. show_help_options(options, "\nVideo options:\n",
  3635. OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
  3636. OPT_VIDEO);
  3637. show_help_options(options, "\nAdvanced Video options:\n",
  3638. OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
  3639. OPT_VIDEO | OPT_EXPERT);
  3640. show_help_options(options, "\nAudio options:\n",
  3641. OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
  3642. OPT_AUDIO);
  3643. show_help_options(options, "\nAdvanced Audio options:\n",
  3644. OPT_EXPERT | OPT_AUDIO | OPT_VIDEO | OPT_GRAB,
  3645. OPT_AUDIO | OPT_EXPERT);
  3646. show_help_options(options, "\nSubtitle options:\n",
  3647. OPT_SUBTITLE | OPT_GRAB,
  3648. OPT_SUBTITLE);
  3649. show_help_options(options, "\nAudio/Video grab options:\n",
  3650. OPT_GRAB,
  3651. OPT_GRAB);
  3652. printf("\n");
  3653. show_help_children(avcodec_get_class(), flags);
  3654. show_help_children(avformat_get_class(), flags);
  3655. show_help_children(sws_get_class(), flags);
  3656. }
  3657. static int opt_target(OptionsContext *o, const char *opt, const char *arg)
  3658. {
  3659. enum { PAL, NTSC, FILM, UNKNOWN } norm = UNKNOWN;
  3660. static const char *const frame_rates[] = { "25", "30000/1001", "24000/1001" };
  3661. if (!strncmp(arg, "pal-", 4)) {
  3662. norm = PAL;
  3663. arg += 4;
  3664. } else if (!strncmp(arg, "ntsc-", 5)) {
  3665. norm = NTSC;
  3666. arg += 5;
  3667. } else if (!strncmp(arg, "film-", 5)) {
  3668. norm = FILM;
  3669. arg += 5;
  3670. } else {
  3671. /* Try to determine PAL/NTSC by peeking in the input files */
  3672. if (nb_input_files) {
  3673. int i, j, fr;
  3674. for (j = 0; j < nb_input_files; j++) {
  3675. for (i = 0; i < input_files[j].nb_streams; i++) {
  3676. AVCodecContext *c = input_files[j].ctx->streams[i]->codec;
  3677. if (c->codec_type != AVMEDIA_TYPE_VIDEO)
  3678. continue;
  3679. fr = c->time_base.den * 1000 / c->time_base.num;
  3680. if (fr == 25000) {
  3681. norm = PAL;
  3682. break;
  3683. } else if ((fr == 29970) || (fr == 23976)) {
  3684. norm = NTSC;
  3685. break;
  3686. }
  3687. }
  3688. if (norm != UNKNOWN)
  3689. break;
  3690. }
  3691. }
  3692. if (norm != UNKNOWN)
  3693. av_log(NULL, AV_LOG_INFO, "Assuming %s for target.\n", norm == PAL ? "PAL" : "NTSC");
  3694. }
  3695. if (norm == UNKNOWN) {
  3696. av_log(NULL, AV_LOG_FATAL, "Could not determine norm (PAL/NTSC/NTSC-Film) for target.\n");
  3697. av_log(NULL, AV_LOG_FATAL, "Please prefix target with \"pal-\", \"ntsc-\" or \"film-\",\n");
  3698. av_log(NULL, AV_LOG_FATAL, "or set a framerate with \"-r xxx\".\n");
  3699. exit_program(1);
  3700. }
  3701. if (!strcmp(arg, "vcd")) {
  3702. opt_video_codec(o, "c:v", "mpeg1video");
  3703. opt_audio_codec(o, "c:a", "mp2");
  3704. parse_option(o, "f", "vcd", options);
  3705. parse_option(o, "s", norm == PAL ? "352x288" : "352x240", options);
  3706. parse_option(o, "r", frame_rates[norm], options);
  3707. opt_default("g", norm == PAL ? "15" : "18");
  3708. opt_default("b", "1150000");
  3709. opt_default("maxrate", "1150000");
  3710. opt_default("minrate", "1150000");
  3711. opt_default("bufsize", "327680"); // 40*1024*8;
  3712. opt_default("b:a", "224000");
  3713. parse_option(o, "ar", "44100", options);
  3714. parse_option(o, "ac", "2", options);
  3715. opt_default("packetsize", "2324");
  3716. opt_default("muxrate", "1411200"); // 2352 * 75 * 8;
  3717. /* We have to offset the PTS, so that it is consistent with the SCR.
  3718. SCR starts at 36000, but the first two packs contain only padding
  3719. and the first pack from the other stream, respectively, may also have
  3720. been written before.
  3721. So the real data starts at SCR 36000+3*1200. */
  3722. o->mux_preload = (36000 + 3 * 1200) / 90000.0; // 0.44
  3723. } else if (!strcmp(arg, "svcd")) {
  3724. opt_video_codec(o, "c:v", "mpeg2video");
  3725. opt_audio_codec(o, "c:a", "mp2");
  3726. parse_option(o, "f", "svcd", options);
  3727. parse_option(o, "s", norm == PAL ? "480x576" : "480x480", options);
  3728. parse_option(o, "r", frame_rates[norm], options);
  3729. opt_default("g", norm == PAL ? "15" : "18");
  3730. opt_default("b", "2040000");
  3731. opt_default("maxrate", "2516000");
  3732. opt_default("minrate", "0"); // 1145000;
  3733. opt_default("bufsize", "1835008"); // 224*1024*8;
  3734. opt_default("flags", "+scan_offset");
  3735. opt_default("b:a", "224000");
  3736. parse_option(o, "ar", "44100", options);
  3737. opt_default("packetsize", "2324");
  3738. } else if (!strcmp(arg, "dvd")) {
  3739. opt_video_codec(o, "c:v", "mpeg2video");
  3740. opt_audio_codec(o, "c:a", "ac3");
  3741. parse_option(o, "f", "dvd", options);
  3742. parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
  3743. parse_option(o, "r", frame_rates[norm], options);
  3744. opt_default("g", norm == PAL ? "15" : "18");
  3745. opt_default("b", "6000000");
  3746. opt_default("maxrate", "9000000");
  3747. opt_default("minrate", "0"); // 1500000;
  3748. opt_default("bufsize", "1835008"); // 224*1024*8;
  3749. opt_default("packetsize", "2048"); // from www.mpucoder.com: DVD sectors contain 2048 bytes of data, this is also the size of one pack.
  3750. opt_default("muxrate", "10080000"); // from mplex project: data_rate = 1260000. mux_rate = data_rate * 8
  3751. opt_default("b:a", "448000");
  3752. parse_option(o, "ar", "48000", options);
  3753. } else if (!strncmp(arg, "dv", 2)) {
  3754. parse_option(o, "f", "dv", options);
  3755. parse_option(o, "s", norm == PAL ? "720x576" : "720x480", options);
  3756. parse_option(o, "pix_fmt", !strncmp(arg, "dv50", 4) ? "yuv422p" :
  3757. norm == PAL ? "yuv420p" : "yuv411p", options);
  3758. parse_option(o, "r", frame_rates[norm], options);
  3759. parse_option(o, "ar", "48000", options);
  3760. parse_option(o, "ac", "2", options);
  3761. } else {
  3762. av_log(NULL, AV_LOG_ERROR, "Unknown target: %s\n", arg);
  3763. return AVERROR(EINVAL);
  3764. }
  3765. return 0;
  3766. }
  3767. static int opt_vstats_file(const char *opt, const char *arg)
  3768. {
  3769. av_free (vstats_filename);
  3770. vstats_filename = av_strdup (arg);
  3771. return 0;
  3772. }
  3773. static int opt_vstats(const char *opt, const char *arg)
  3774. {
  3775. char filename[40];
  3776. time_t today2 = time(NULL);
  3777. struct tm *today = localtime(&today2);
  3778. snprintf(filename, sizeof(filename), "vstats_%02d%02d%02d.log", today->tm_hour, today->tm_min,
  3779. today->tm_sec);
  3780. return opt_vstats_file(opt, filename);
  3781. }
  3782. static int opt_video_frames(OptionsContext *o, const char *opt, const char *arg)
  3783. {
  3784. return parse_option(o, "frames:v", arg, options);
  3785. }
  3786. static int opt_audio_frames(OptionsContext *o, const char *opt, const char *arg)
  3787. {
  3788. return parse_option(o, "frames:a", arg, options);
  3789. }
  3790. static int opt_data_frames(OptionsContext *o, const char *opt, const char *arg)
  3791. {
  3792. return parse_option(o, "frames:d", arg, options);
  3793. }
  3794. static int opt_video_tag(OptionsContext *o, const char *opt, const char *arg)
  3795. {
  3796. return parse_option(o, "tag:v", arg, options);
  3797. }
  3798. static int opt_audio_tag(OptionsContext *o, const char *opt, const char *arg)
  3799. {
  3800. return parse_option(o, "tag:a", arg, options);
  3801. }
  3802. static int opt_subtitle_tag(OptionsContext *o, const char *opt, const char *arg)
  3803. {
  3804. return parse_option(o, "tag:s", arg, options);
  3805. }
  3806. static int opt_video_filters(OptionsContext *o, const char *opt, const char *arg)
  3807. {
  3808. return parse_option(o, "filter:v", arg, options);
  3809. }
  3810. static int opt_vsync(const char *opt, const char *arg)
  3811. {
  3812. if (!av_strcasecmp(arg, "cfr")) video_sync_method = VSYNC_CFR;
  3813. else if (!av_strcasecmp(arg, "vfr")) video_sync_method = VSYNC_VFR;
  3814. else if (!av_strcasecmp(arg, "passthrough")) video_sync_method = VSYNC_PASSTHROUGH;
  3815. if (video_sync_method == VSYNC_AUTO)
  3816. video_sync_method = parse_number_or_die("vsync", arg, OPT_INT, VSYNC_AUTO, VSYNC_VFR);
  3817. return 0;
  3818. }
  3819. static int opt_deinterlace(const char *opt, const char *arg)
  3820. {
  3821. av_log(NULL, AV_LOG_WARNING, "-%s is deprecated, use -filter:v yadif instead\n", opt);
  3822. do_deinterlace = 1;
  3823. return 0;
  3824. }
  3825. #define OFFSET(x) offsetof(OptionsContext, x)
  3826. static const OptionDef options[] = {
  3827. /* main options */
  3828. #include "cmdutils_common_opts.h"
  3829. { "f", HAS_ARG | OPT_STRING | OPT_OFFSET, {.off = OFFSET(format)}, "force format", "fmt" },
  3830. { "i", HAS_ARG | OPT_FUNC2, {(void*)opt_input_file}, "input file name", "filename" },
  3831. { "y", OPT_BOOL, {(void*)&file_overwrite}, "overwrite output files" },
  3832. { "c", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
  3833. { "codec", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(codec_names)}, "codec name", "codec" },
  3834. { "pre", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(presets)}, "preset name", "preset" },
  3835. { "map", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_map}, "set input stream mapping", "[-]input_file_id[:stream_specifier][,sync_file_id[:stream_specifier]]" },
  3836. { "map_metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata_map)}, "set metadata information of outfile from infile",
  3837. "outfile[,metadata]:infile[,metadata]" },
  3838. { "map_chapters", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(chapters_input_file)}, "set chapters mapping", "input_file_index" },
  3839. { "t", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(recording_time)}, "record or transcode \"duration\" seconds of audio/video", "duration" },
  3840. { "fs", HAS_ARG | OPT_INT64 | OPT_OFFSET, {.off = OFFSET(limit_filesize)}, "set the limit file size in bytes", "limit_size" }, //
  3841. { "ss", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(start_time)}, "set the start time offset", "time_off" },
  3842. { "itsoffset", HAS_ARG | OPT_TIME | OPT_OFFSET, {.off = OFFSET(input_ts_offset)}, "set the input ts offset", "time_off" },
  3843. { "itsscale", HAS_ARG | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(ts_scale)}, "set the input ts scale", "scale" },
  3844. { "metadata", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(metadata)}, "add metadata", "string=string" },
  3845. { "dframes", HAS_ARG | OPT_FUNC2, {(void*)opt_data_frames}, "set the number of data frames to record", "number" },
  3846. { "benchmark", OPT_BOOL | OPT_EXPERT, {(void*)&do_benchmark},
  3847. "add timings for benchmarking" },
  3848. { "timelimit", HAS_ARG, {(void*)opt_timelimit}, "set max runtime in seconds", "limit" },
  3849. { "dump", OPT_BOOL | OPT_EXPERT, {(void*)&do_pkt_dump},
  3850. "dump each input packet" },
  3851. { "hex", OPT_BOOL | OPT_EXPERT, {(void*)&do_hex_dump},
  3852. "when dumping packets, also dump the payload" },
  3853. { "re", OPT_BOOL | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(rate_emu)}, "read input at native frame rate", "" },
  3854. { "target", HAS_ARG | OPT_FUNC2, {(void*)opt_target}, "specify target file type (\"vcd\", \"svcd\", \"dvd\", \"dv\", \"dv50\", \"pal-vcd\", \"ntsc-svcd\", ...)", "type" },
  3855. { "vsync", HAS_ARG | OPT_EXPERT, {(void*)opt_vsync}, "video sync method", "" },
  3856. { "async", HAS_ARG | OPT_INT | OPT_EXPERT, {(void*)&audio_sync_method}, "audio sync method", "" },
  3857. { "adrift_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&audio_drift_threshold}, "audio drift threshold", "threshold" },
  3858. { "copyts", OPT_BOOL | OPT_EXPERT, {(void*)&copy_ts}, "copy timestamps" },
  3859. { "copytb", OPT_BOOL | OPT_EXPERT, {(void*)&copy_tb}, "copy input stream time base when stream copying" },
  3860. { "shortest", OPT_BOOL | OPT_EXPERT, {(void*)&opt_shortest}, "finish encoding within shortest input" }, //
  3861. { "dts_delta_threshold", HAS_ARG | OPT_FLOAT | OPT_EXPERT, {(void*)&dts_delta_threshold}, "timestamp discontinuity delta threshold", "threshold" },
  3862. { "xerror", OPT_BOOL, {(void*)&exit_on_error}, "exit on error", "error" },
  3863. { "copyinkf", OPT_BOOL | OPT_EXPERT | OPT_SPEC, {.off = OFFSET(copy_initial_nonkeyframes)}, "copy initial non-keyframes" },
  3864. { "frames", OPT_INT64 | HAS_ARG | OPT_SPEC, {.off = OFFSET(max_frames)}, "set the number of frames to record", "number" },
  3865. { "tag", OPT_STRING | HAS_ARG | OPT_SPEC, {.off = OFFSET(codec_tags)}, "force codec tag/fourcc", "fourcc/tag" },
  3866. { "q", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
  3867. { "qscale", HAS_ARG | OPT_EXPERT | OPT_DOUBLE | OPT_SPEC, {.off = OFFSET(qscale)}, "use fixed quality scale (VBR)", "q" },
  3868. #if CONFIG_AVFILTER
  3869. { "filter", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(filters)}, "set stream filterchain", "filter_list" },
  3870. #endif
  3871. { "stats", OPT_BOOL, {&print_stats}, "print progress report during encoding", },
  3872. { "attach", HAS_ARG | OPT_FUNC2, {(void*)opt_attach}, "add an attachment to the output file", "filename" },
  3873. { "dump_attachment", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(dump_attachment)}, "extract an attachment into a file", "filename" },
  3874. /* video options */
  3875. { "vframes", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_frames}, "set the number of video frames to record", "number" },
  3876. { "r", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_rates)}, "set frame rate (Hz value, fraction or abbreviation)", "rate" },
  3877. { "s", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_sizes)}, "set frame size (WxH or abbreviation)", "size" },
  3878. { "aspect", HAS_ARG | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_aspect_ratios)}, "set aspect ratio (4:3, 16:9 or 1.3333, 1.7777)", "aspect" },
  3879. { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(frame_pix_fmts)}, "set pixel format", "format" },
  3880. { "vn", OPT_BOOL | OPT_VIDEO | OPT_OFFSET, {.off = OFFSET(video_disable)}, "disable video" },
  3881. { "vdt", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&video_discard}, "discard threshold", "n" },
  3882. { "rc_override", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(rc_overrides)}, "rate control override for specific intervals", "override" },
  3883. { "vcodec", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_codec}, "force video codec ('copy' to copy stream)", "codec" },
  3884. { "same_quant", OPT_BOOL | OPT_VIDEO, {(void*)&same_quant},
  3885. "use same quantizer as source (implies VBR)" },
  3886. { "pass", HAS_ARG | OPT_VIDEO, {(void*)opt_pass}, "select the pass number (1 or 2)", "n" },
  3887. { "passlogfile", HAS_ARG | OPT_STRING | OPT_VIDEO, {(void*)&pass_logfilename_prefix}, "select two pass log file name prefix", "prefix" },
  3888. { "deinterlace", OPT_EXPERT | OPT_VIDEO, {(void*)opt_deinterlace},
  3889. "this option is deprecated, use the yadif filter instead" },
  3890. { "vstats", OPT_EXPERT | OPT_VIDEO, {(void*)&opt_vstats}, "dump video coding statistics to file" },
  3891. { "vstats_file", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_vstats_file}, "dump video coding statistics to file", "file" },
  3892. #if CONFIG_AVFILTER
  3893. { "vf", HAS_ARG | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_filters}, "video filters", "filter list" },
  3894. #endif
  3895. { "intra_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(intra_matrices)}, "specify intra matrix coeffs", "matrix" },
  3896. { "inter_matrix", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_STRING | OPT_SPEC, {.off = OFFSET(inter_matrices)}, "specify inter matrix coeffs", "matrix" },
  3897. { "top", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_INT| OPT_SPEC, {.off = OFFSET(top_field_first)}, "top=1/bottom=0/auto=-1 field first", "" },
  3898. { "dc", OPT_INT | HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)&intra_dc_precision}, "intra_dc_precision", "precision" },
  3899. { "vtag", HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_FUNC2, {(void*)opt_video_tag}, "force video tag/fourcc", "fourcc/tag" },
  3900. { "qphist", OPT_BOOL | OPT_EXPERT | OPT_VIDEO, { (void *)&qp_hist }, "show QP histogram" },
  3901. { "force_fps", OPT_BOOL | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(force_fps)}, "force the selected framerate, disable the best supported framerate selection" },
  3902. { "streamid", HAS_ARG | OPT_EXPERT | OPT_FUNC2, {(void*)opt_streamid}, "set the value of an outfile streamid", "streamIndex:value" },
  3903. { "force_key_frames", OPT_STRING | HAS_ARG | OPT_EXPERT | OPT_VIDEO | OPT_SPEC, {.off = OFFSET(forced_key_frames)}, "force key frames at specified timestamps", "timestamps" },
  3904. /* audio options */
  3905. { "aframes", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_frames}, "set the number of audio frames to record", "number" },
  3906. { "aq", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_qscale}, "set audio quality (codec-specific)", "quality", },
  3907. { "ar", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_sample_rate)}, "set audio sampling rate (in Hz)", "rate" },
  3908. { "ac", HAS_ARG | OPT_AUDIO | OPT_INT | OPT_SPEC, {.off = OFFSET(audio_channels)}, "set number of audio channels", "channels" },
  3909. { "an", OPT_BOOL | OPT_AUDIO | OPT_OFFSET, {.off = OFFSET(audio_disable)}, "disable audio" },
  3910. { "acodec", HAS_ARG | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_codec}, "force audio codec ('copy' to copy stream)", "codec" },
  3911. { "atag", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_FUNC2, {(void*)opt_audio_tag}, "force audio tag/fourcc", "fourcc/tag" },
  3912. { "vol", OPT_INT | HAS_ARG | OPT_AUDIO, {(void*)&audio_volume}, "change audio volume (256=normal)" , "volume" }, //
  3913. { "sample_fmt", HAS_ARG | OPT_EXPERT | OPT_AUDIO | OPT_SPEC | OPT_STRING, {.off = OFFSET(sample_fmts)}, "set sample format", "format" },
  3914. /* subtitle options */
  3915. { "sn", OPT_BOOL | OPT_SUBTITLE | OPT_OFFSET, {.off = OFFSET(subtitle_disable)}, "disable subtitle" },
  3916. { "scodec", HAS_ARG | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_codec}, "force subtitle codec ('copy' to copy stream)", "codec" },
  3917. { "stag", HAS_ARG | OPT_EXPERT | OPT_SUBTITLE | OPT_FUNC2, {(void*)opt_subtitle_tag}, "force subtitle tag/fourcc", "fourcc/tag" },
  3918. /* grab options */
  3919. { "isync", OPT_BOOL | OPT_EXPERT | OPT_GRAB, {(void*)&input_sync}, "sync read on input", "" },
  3920. /* muxer options */
  3921. { "muxdelay", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_max_delay)}, "set the maximum demux-decode delay", "seconds" },
  3922. { "muxpreload", OPT_FLOAT | HAS_ARG | OPT_EXPERT | OPT_OFFSET, {.off = OFFSET(mux_preload)}, "set the initial demux-decode delay", "seconds" },
  3923. { "bsf", HAS_ARG | OPT_STRING | OPT_SPEC, {.off = OFFSET(bitstream_filters)}, "A comma-separated list of bitstream filters", "bitstream_filters" },
  3924. /* data codec support */
  3925. { "dcodec", HAS_ARG | OPT_DATA | OPT_FUNC2, {(void*)opt_data_codec}, "force data codec ('copy' to copy stream)", "codec" },
  3926. { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, {(void*)opt_default}, "generic catch all option", "" },
  3927. { NULL, },
  3928. };
  3929. int main(int argc, char **argv)
  3930. {
  3931. OptionsContext o = { 0 };
  3932. int64_t ti;
  3933. reset_options(&o);
  3934. av_log_set_flags(AV_LOG_SKIP_REPEATED);
  3935. parse_loglevel(argc, argv, options);
  3936. avcodec_register_all();
  3937. #if CONFIG_AVDEVICE
  3938. avdevice_register_all();
  3939. #endif
  3940. #if CONFIG_AVFILTER
  3941. avfilter_register_all();
  3942. #endif
  3943. av_register_all();
  3944. avformat_network_init();
  3945. show_banner();
  3946. /* parse options */
  3947. parse_options(&o, argc, argv, options, opt_output_file);
  3948. if (nb_output_files <= 0 && nb_input_files == 0) {
  3949. show_usage();
  3950. av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
  3951. exit_program(1);
  3952. }
  3953. /* file converter / grab */
  3954. if (nb_output_files <= 0) {
  3955. fprintf(stderr, "At least one output file must be specified\n");
  3956. exit_program(1);
  3957. }
  3958. if (nb_input_files == 0) {
  3959. av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
  3960. exit_program(1);
  3961. }
  3962. ti = getutime();
  3963. if (transcode(output_files, nb_output_files, input_files, nb_input_files) < 0)
  3964. exit_program(1);
  3965. ti = getutime() - ti;
  3966. if (do_benchmark) {
  3967. int maxrss = getmaxrss() / 1024;
  3968. printf("bench: utime=%0.3fs maxrss=%ikB\n", ti / 1000000.0, maxrss);
  3969. }
  3970. exit_program(0);
  3971. return 0;
  3972. }