You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1418 lines
46KB

  1. /*
  2. * muxing functions for use within FFmpeg
  3. * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avformat.h"
  22. #include "internal.h"
  23. #include "libavcodec/internal.h"
  24. #include "libavcodec/packet_internal.h"
  25. #include "libavutil/opt.h"
  26. #include "libavutil/dict.h"
  27. #include "libavutil/pixdesc.h"
  28. #include "libavutil/timestamp.h"
  29. #include "libavutil/avassert.h"
  30. #include "libavutil/avstring.h"
  31. #include "libavutil/internal.h"
  32. #include "libavutil/mathematics.h"
  33. /**
  34. * @file
  35. * muxing functions for use within libavformat
  36. */
  37. /* fraction handling */
  38. /**
  39. * f = val + (num / den) + 0.5.
  40. *
  41. * 'num' is normalized so that it is such as 0 <= num < den.
  42. *
  43. * @param f fractional number
  44. * @param val integer value
  45. * @param num must be >= 0
  46. * @param den must be >= 1
  47. */
  48. static void frac_init(FFFrac *f, int64_t val, int64_t num, int64_t den)
  49. {
  50. num += (den >> 1);
  51. if (num >= den) {
  52. val += num / den;
  53. num = num % den;
  54. }
  55. f->val = val;
  56. f->num = num;
  57. f->den = den;
  58. }
  59. /**
  60. * Fractional addition to f: f = f + (incr / f->den).
  61. *
  62. * @param f fractional number
  63. * @param incr increment, can be positive or negative
  64. */
  65. static void frac_add(FFFrac *f, int64_t incr)
  66. {
  67. int64_t num, den;
  68. num = f->num + incr;
  69. den = f->den;
  70. if (num < 0) {
  71. f->val += num / den;
  72. num = num % den;
  73. if (num < 0) {
  74. num += den;
  75. f->val--;
  76. }
  77. } else if (num >= den) {
  78. f->val += num / den;
  79. num = num % den;
  80. }
  81. f->num = num;
  82. }
  83. AVRational ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precision)
  84. {
  85. AVRational q;
  86. int j;
  87. q = st->time_base;
  88. for (j=2; j<14; j+= 1+(j>2))
  89. while (q.den / q.num < min_precision && q.num % j == 0)
  90. q.num /= j;
  91. while (q.den / q.num < min_precision && q.den < (1<<24))
  92. q.den <<= 1;
  93. return q;
  94. }
  95. enum AVChromaLocation ff_choose_chroma_location(AVFormatContext *s, AVStream *st)
  96. {
  97. AVCodecParameters *par = st->codecpar;
  98. const AVPixFmtDescriptor *pix_desc = av_pix_fmt_desc_get(par->format);
  99. if (par->chroma_location != AVCHROMA_LOC_UNSPECIFIED)
  100. return par->chroma_location;
  101. if (pix_desc) {
  102. if (pix_desc->log2_chroma_h == 0) {
  103. return AVCHROMA_LOC_TOPLEFT;
  104. } else if (pix_desc->log2_chroma_w == 1 && pix_desc->log2_chroma_h == 1) {
  105. if (par->field_order == AV_FIELD_UNKNOWN || par->field_order == AV_FIELD_PROGRESSIVE) {
  106. switch (par->codec_id) {
  107. case AV_CODEC_ID_MJPEG:
  108. case AV_CODEC_ID_MPEG1VIDEO: return AVCHROMA_LOC_CENTER;
  109. }
  110. }
  111. if (par->field_order == AV_FIELD_UNKNOWN || par->field_order != AV_FIELD_PROGRESSIVE) {
  112. switch (par->codec_id) {
  113. case AV_CODEC_ID_MPEG2VIDEO: return AVCHROMA_LOC_LEFT;
  114. }
  115. }
  116. }
  117. }
  118. return AVCHROMA_LOC_UNSPECIFIED;
  119. }
  120. int avformat_alloc_output_context2(AVFormatContext **avctx, ff_const59 AVOutputFormat *oformat,
  121. const char *format, const char *filename)
  122. {
  123. AVFormatContext *s = avformat_alloc_context();
  124. int ret = 0;
  125. *avctx = NULL;
  126. if (!s)
  127. goto nomem;
  128. if (!oformat) {
  129. if (format) {
  130. oformat = av_guess_format(format, NULL, NULL);
  131. if (!oformat) {
  132. av_log(s, AV_LOG_ERROR, "Requested output format '%s' is not a suitable output format\n", format);
  133. ret = AVERROR(EINVAL);
  134. goto error;
  135. }
  136. } else {
  137. oformat = av_guess_format(NULL, filename, NULL);
  138. if (!oformat) {
  139. ret = AVERROR(EINVAL);
  140. av_log(s, AV_LOG_ERROR, "Unable to find a suitable output format for '%s'\n",
  141. filename);
  142. goto error;
  143. }
  144. }
  145. }
  146. s->oformat = oformat;
  147. if (s->oformat->priv_data_size > 0) {
  148. s->priv_data = av_mallocz(s->oformat->priv_data_size);
  149. if (!s->priv_data)
  150. goto nomem;
  151. if (s->oformat->priv_class) {
  152. *(const AVClass**)s->priv_data= s->oformat->priv_class;
  153. av_opt_set_defaults(s->priv_data);
  154. }
  155. } else
  156. s->priv_data = NULL;
  157. if (filename) {
  158. #if FF_API_FORMAT_FILENAME
  159. FF_DISABLE_DEPRECATION_WARNINGS
  160. av_strlcpy(s->filename, filename, sizeof(s->filename));
  161. FF_ENABLE_DEPRECATION_WARNINGS
  162. #endif
  163. if (!(s->url = av_strdup(filename)))
  164. goto nomem;
  165. }
  166. *avctx = s;
  167. return 0;
  168. nomem:
  169. av_log(s, AV_LOG_ERROR, "Out of memory\n");
  170. ret = AVERROR(ENOMEM);
  171. error:
  172. avformat_free_context(s);
  173. return ret;
  174. }
  175. static int validate_codec_tag(AVFormatContext *s, AVStream *st)
  176. {
  177. const AVCodecTag *avctag;
  178. int n;
  179. enum AVCodecID id = AV_CODEC_ID_NONE;
  180. int64_t tag = -1;
  181. /**
  182. * Check that tag + id is in the table
  183. * If neither is in the table -> OK
  184. * If tag is in the table with another id -> FAIL
  185. * If id is in the table with another tag -> FAIL unless strict < normal
  186. */
  187. for (n = 0; s->oformat->codec_tag[n]; n++) {
  188. avctag = s->oformat->codec_tag[n];
  189. while (avctag->id != AV_CODEC_ID_NONE) {
  190. if (avpriv_toupper4(avctag->tag) == avpriv_toupper4(st->codecpar->codec_tag)) {
  191. id = avctag->id;
  192. if (id == st->codecpar->codec_id)
  193. return 1;
  194. }
  195. if (avctag->id == st->codecpar->codec_id)
  196. tag = avctag->tag;
  197. avctag++;
  198. }
  199. }
  200. if (id != AV_CODEC_ID_NONE)
  201. return 0;
  202. if (tag >= 0 && (s->strict_std_compliance >= FF_COMPLIANCE_NORMAL))
  203. return 0;
  204. return 1;
  205. }
  206. static int init_muxer(AVFormatContext *s, AVDictionary **options)
  207. {
  208. int ret = 0, i;
  209. AVStream *st;
  210. AVDictionary *tmp = NULL;
  211. AVCodecParameters *par = NULL;
  212. const AVOutputFormat *of = s->oformat;
  213. const AVCodecDescriptor *desc;
  214. AVDictionaryEntry *e;
  215. if (options)
  216. av_dict_copy(&tmp, *options, 0);
  217. if ((ret = av_opt_set_dict(s, &tmp)) < 0)
  218. goto fail;
  219. if (s->priv_data && s->oformat->priv_class && *(const AVClass**)s->priv_data==s->oformat->priv_class &&
  220. (ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
  221. goto fail;
  222. #if FF_API_FORMAT_FILENAME
  223. FF_DISABLE_DEPRECATION_WARNINGS
  224. if (!s->url && !(s->url = av_strdup(s->filename))) {
  225. FF_ENABLE_DEPRECATION_WARNINGS
  226. #else
  227. if (!s->url && !(s->url = av_strdup(""))) {
  228. #endif
  229. ret = AVERROR(ENOMEM);
  230. goto fail;
  231. }
  232. #if FF_API_LAVF_AVCTX
  233. FF_DISABLE_DEPRECATION_WARNINGS
  234. if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT) {
  235. if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
  236. av_log(s, AV_LOG_WARNING,
  237. "The AVFormatContext is not in set to bitexact mode, only "
  238. "the AVCodecContext. If this is not intended, set "
  239. "AVFormatContext.flags |= AVFMT_FLAG_BITEXACT.\n");
  240. }
  241. }
  242. FF_ENABLE_DEPRECATION_WARNINGS
  243. #endif
  244. // some sanity checks
  245. if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
  246. av_log(s, AV_LOG_ERROR, "No streams to mux were specified\n");
  247. ret = AVERROR(EINVAL);
  248. goto fail;
  249. }
  250. for (i = 0; i < s->nb_streams; i++) {
  251. st = s->streams[i];
  252. par = st->codecpar;
  253. #if FF_API_LAVF_AVCTX
  254. FF_DISABLE_DEPRECATION_WARNINGS
  255. if (st->codecpar->codec_type == AVMEDIA_TYPE_UNKNOWN &&
  256. st->codec->codec_type != AVMEDIA_TYPE_UNKNOWN) {
  257. av_log(s, AV_LOG_WARNING, "Using AVStream.codec to pass codec "
  258. "parameters to muxers is deprecated, use AVStream.codecpar "
  259. "instead.\n");
  260. ret = avcodec_parameters_from_context(st->codecpar, st->codec);
  261. if (ret < 0)
  262. goto fail;
  263. }
  264. FF_ENABLE_DEPRECATION_WARNINGS
  265. #endif
  266. if (!st->time_base.num) {
  267. /* fall back on the default timebase values */
  268. if (par->codec_type == AVMEDIA_TYPE_AUDIO && par->sample_rate)
  269. avpriv_set_pts_info(st, 64, 1, par->sample_rate);
  270. else
  271. avpriv_set_pts_info(st, 33, 1, 90000);
  272. }
  273. switch (par->codec_type) {
  274. case AVMEDIA_TYPE_AUDIO:
  275. if (par->sample_rate <= 0) {
  276. av_log(s, AV_LOG_ERROR, "sample rate not set\n");
  277. ret = AVERROR(EINVAL);
  278. goto fail;
  279. }
  280. if (!par->block_align)
  281. par->block_align = par->channels *
  282. av_get_bits_per_sample(par->codec_id) >> 3;
  283. break;
  284. case AVMEDIA_TYPE_VIDEO:
  285. if ((par->width <= 0 || par->height <= 0) &&
  286. !(of->flags & AVFMT_NODIMENSIONS)) {
  287. av_log(s, AV_LOG_ERROR, "dimensions not set\n");
  288. ret = AVERROR(EINVAL);
  289. goto fail;
  290. }
  291. if (av_cmp_q(st->sample_aspect_ratio, par->sample_aspect_ratio)
  292. && fabs(av_q2d(st->sample_aspect_ratio) - av_q2d(par->sample_aspect_ratio)) > 0.004*av_q2d(st->sample_aspect_ratio)
  293. ) {
  294. if (st->sample_aspect_ratio.num != 0 &&
  295. st->sample_aspect_ratio.den != 0 &&
  296. par->sample_aspect_ratio.num != 0 &&
  297. par->sample_aspect_ratio.den != 0) {
  298. av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
  299. "(%d/%d) and encoder layer (%d/%d)\n",
  300. st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
  301. par->sample_aspect_ratio.num,
  302. par->sample_aspect_ratio.den);
  303. ret = AVERROR(EINVAL);
  304. goto fail;
  305. }
  306. }
  307. break;
  308. }
  309. desc = avcodec_descriptor_get(par->codec_id);
  310. if (desc && desc->props & AV_CODEC_PROP_REORDER)
  311. st->internal->reorder = 1;
  312. st->internal->is_intra_only = ff_is_intra_only(par->codec_id);
  313. if (of->codec_tag) {
  314. if ( par->codec_tag
  315. && par->codec_id == AV_CODEC_ID_RAWVIDEO
  316. && ( av_codec_get_tag(of->codec_tag, par->codec_id) == 0
  317. || av_codec_get_tag(of->codec_tag, par->codec_id) == MKTAG('r', 'a', 'w', ' '))
  318. && !validate_codec_tag(s, st)) {
  319. // the current rawvideo encoding system ends up setting
  320. // the wrong codec_tag for avi/mov, we override it here
  321. par->codec_tag = 0;
  322. }
  323. if (par->codec_tag) {
  324. if (!validate_codec_tag(s, st)) {
  325. const uint32_t otag = av_codec_get_tag(s->oformat->codec_tag, par->codec_id);
  326. av_log(s, AV_LOG_ERROR,
  327. "Tag %s incompatible with output codec id '%d' (%s)\n",
  328. av_fourcc2str(par->codec_tag), par->codec_id, av_fourcc2str(otag));
  329. ret = AVERROR_INVALIDDATA;
  330. goto fail;
  331. }
  332. } else
  333. par->codec_tag = av_codec_get_tag(of->codec_tag, par->codec_id);
  334. }
  335. if (par->codec_type != AVMEDIA_TYPE_ATTACHMENT)
  336. s->internal->nb_interleaved_streams++;
  337. }
  338. if (!s->priv_data && of->priv_data_size > 0) {
  339. s->priv_data = av_mallocz(of->priv_data_size);
  340. if (!s->priv_data) {
  341. ret = AVERROR(ENOMEM);
  342. goto fail;
  343. }
  344. if (of->priv_class) {
  345. *(const AVClass **)s->priv_data = of->priv_class;
  346. av_opt_set_defaults(s->priv_data);
  347. if ((ret = av_opt_set_dict2(s->priv_data, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
  348. goto fail;
  349. }
  350. }
  351. /* set muxer identification string */
  352. if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
  353. av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
  354. } else {
  355. av_dict_set(&s->metadata, "encoder", NULL, 0);
  356. }
  357. for (e = NULL; e = av_dict_get(s->metadata, "encoder-", e, AV_DICT_IGNORE_SUFFIX); ) {
  358. av_dict_set(&s->metadata, e->key, NULL, 0);
  359. }
  360. if (options) {
  361. av_dict_free(options);
  362. *options = tmp;
  363. }
  364. if (s->oformat->init) {
  365. if ((ret = s->oformat->init(s)) < 0) {
  366. if (s->oformat->deinit)
  367. s->oformat->deinit(s);
  368. return ret;
  369. }
  370. return ret == 0;
  371. }
  372. return 0;
  373. fail:
  374. av_dict_free(&tmp);
  375. return ret;
  376. }
  377. static int init_pts(AVFormatContext *s)
  378. {
  379. int i;
  380. AVStream *st;
  381. /* init PTS generation */
  382. for (i = 0; i < s->nb_streams; i++) {
  383. int64_t den = AV_NOPTS_VALUE;
  384. st = s->streams[i];
  385. switch (st->codecpar->codec_type) {
  386. case AVMEDIA_TYPE_AUDIO:
  387. den = (int64_t)st->time_base.num * st->codecpar->sample_rate;
  388. break;
  389. case AVMEDIA_TYPE_VIDEO:
  390. den = (int64_t)st->time_base.num * st->time_base.den;
  391. break;
  392. default:
  393. break;
  394. }
  395. if (!st->internal->priv_pts)
  396. st->internal->priv_pts = av_mallocz(sizeof(*st->internal->priv_pts));
  397. if (!st->internal->priv_pts)
  398. return AVERROR(ENOMEM);
  399. if (den != AV_NOPTS_VALUE) {
  400. if (den <= 0)
  401. return AVERROR_INVALIDDATA;
  402. frac_init(st->internal->priv_pts, 0, 0, den);
  403. }
  404. }
  405. if (s->avoid_negative_ts < 0) {
  406. av_assert2(s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_AUTO);
  407. if (s->oformat->flags & (AVFMT_TS_NEGATIVE | AVFMT_NOTIMESTAMPS)) {
  408. s->avoid_negative_ts = 0;
  409. } else
  410. s->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE;
  411. }
  412. return 0;
  413. }
  414. static void flush_if_needed(AVFormatContext *s)
  415. {
  416. if (s->pb && s->pb->error >= 0) {
  417. if (s->flush_packets == 1 || s->flags & AVFMT_FLAG_FLUSH_PACKETS)
  418. avio_flush(s->pb);
  419. else if (s->flush_packets && !(s->oformat->flags & AVFMT_NOFILE))
  420. avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_FLUSH_POINT);
  421. }
  422. }
  423. static void deinit_muxer(AVFormatContext *s)
  424. {
  425. if (s->oformat && s->oformat->deinit && s->internal->initialized)
  426. s->oformat->deinit(s);
  427. s->internal->initialized =
  428. s->internal->streams_initialized = 0;
  429. }
  430. int avformat_init_output(AVFormatContext *s, AVDictionary **options)
  431. {
  432. int ret = 0;
  433. if ((ret = init_muxer(s, options)) < 0)
  434. return ret;
  435. s->internal->initialized = 1;
  436. s->internal->streams_initialized = ret;
  437. if (s->oformat->init && ret) {
  438. if ((ret = init_pts(s)) < 0)
  439. return ret;
  440. return AVSTREAM_INIT_IN_INIT_OUTPUT;
  441. }
  442. return AVSTREAM_INIT_IN_WRITE_HEADER;
  443. }
  444. int avformat_write_header(AVFormatContext *s, AVDictionary **options)
  445. {
  446. int ret = 0;
  447. int already_initialized = s->internal->initialized;
  448. int streams_already_initialized = s->internal->streams_initialized;
  449. if (!already_initialized)
  450. if ((ret = avformat_init_output(s, options)) < 0)
  451. return ret;
  452. if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
  453. avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_HEADER);
  454. if (s->oformat->write_header) {
  455. ret = s->oformat->write_header(s);
  456. if (ret >= 0 && s->pb && s->pb->error < 0)
  457. ret = s->pb->error;
  458. if (ret < 0)
  459. goto fail;
  460. flush_if_needed(s);
  461. }
  462. if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
  463. avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_UNKNOWN);
  464. if (!s->internal->streams_initialized) {
  465. if ((ret = init_pts(s)) < 0)
  466. goto fail;
  467. }
  468. return streams_already_initialized;
  469. fail:
  470. deinit_muxer(s);
  471. return ret;
  472. }
  473. #define AV_PKT_FLAG_UNCODED_FRAME 0x2000
  474. #if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
  475. FF_DISABLE_DEPRECATION_WARNINGS
  476. //FIXME merge with compute_pkt_fields
  477. static int compute_muxer_pkt_fields(AVFormatContext *s, AVStream *st, AVPacket *pkt)
  478. {
  479. int delay = FFMAX(st->codecpar->video_delay, st->internal->avctx->max_b_frames > 0);
  480. int i;
  481. int frame_size;
  482. if (!s->internal->missing_ts_warning &&
  483. !(s->oformat->flags & AVFMT_NOTIMESTAMPS) &&
  484. (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC) || (st->disposition & AV_DISPOSITION_TIMED_THUMBNAILS)) &&
  485. (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE)) {
  486. av_log(s, AV_LOG_WARNING,
  487. "Timestamps are unset in a packet for stream %d. "
  488. "This is deprecated and will stop working in the future. "
  489. "Fix your code to set the timestamps properly\n", st->index);
  490. s->internal->missing_ts_warning = 1;
  491. }
  492. if (s->debug & FF_FDEBUG_TS)
  493. av_log(s, AV_LOG_DEBUG, "compute_muxer_pkt_fields: pts:%s dts:%s cur_dts:%s b:%d size:%d st:%d\n",
  494. av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), delay, pkt->size, pkt->stream_index);
  495. if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE && delay == 0)
  496. pkt->pts = pkt->dts;
  497. //XXX/FIXME this is a temporary hack until all encoders output pts
  498. if ((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !delay) {
  499. static int warned;
  500. if (!warned) {
  501. av_log(s, AV_LOG_WARNING, "Encoder did not produce proper pts, making some up.\n");
  502. warned = 1;
  503. }
  504. pkt->dts =
  505. // pkt->pts= st->cur_dts;
  506. pkt->pts = st->internal->priv_pts->val;
  507. }
  508. //calculate dts from pts
  509. if (pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY) {
  510. st->internal->pts_buffer[0] = pkt->pts;
  511. for (i = 1; i < delay + 1 && st->internal->pts_buffer[i] == AV_NOPTS_VALUE; i++)
  512. st->internal->pts_buffer[i] = pkt->pts + (i - delay - 1) * pkt->duration;
  513. for (i = 0; i<delay && st->internal->pts_buffer[i] > st->internal->pts_buffer[i + 1]; i++)
  514. FFSWAP(int64_t, st->internal->pts_buffer[i], st->internal->pts_buffer[i + 1]);
  515. pkt->dts = st->internal->pts_buffer[0];
  516. }
  517. if (st->cur_dts && st->cur_dts != AV_NOPTS_VALUE &&
  518. ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) &&
  519. st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE &&
  520. st->codecpar->codec_type != AVMEDIA_TYPE_DATA &&
  521. st->cur_dts >= pkt->dts) || st->cur_dts > pkt->dts)) {
  522. av_log(s, AV_LOG_ERROR,
  523. "Application provided invalid, non monotonically increasing dts to muxer in stream %d: %s >= %s\n",
  524. st->index, av_ts2str(st->cur_dts), av_ts2str(pkt->dts));
  525. return AVERROR(EINVAL);
  526. }
  527. if (pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts < pkt->dts) {
  528. av_log(s, AV_LOG_ERROR,
  529. "pts (%s) < dts (%s) in stream %d\n",
  530. av_ts2str(pkt->pts), av_ts2str(pkt->dts),
  531. st->index);
  532. return AVERROR(EINVAL);
  533. }
  534. if (s->debug & FF_FDEBUG_TS)
  535. av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%s dts2:%s\n",
  536. av_ts2str(pkt->pts), av_ts2str(pkt->dts));
  537. st->cur_dts = pkt->dts;
  538. st->internal->priv_pts->val = pkt->dts;
  539. /* update pts */
  540. switch (st->codecpar->codec_type) {
  541. case AVMEDIA_TYPE_AUDIO:
  542. frame_size = (pkt->flags & AV_PKT_FLAG_UNCODED_FRAME) ?
  543. (*(AVFrame **)pkt->data)->nb_samples :
  544. av_get_audio_frame_duration(st->codec, pkt->size);
  545. /* HACK/FIXME, we skip the initial 0 size packets as they are most
  546. * likely equal to the encoder delay, but it would be better if we
  547. * had the real timestamps from the encoder */
  548. if (frame_size >= 0 && (pkt->size || st->internal->priv_pts->num != st->internal->priv_pts->den >> 1 || st->internal->priv_pts->val)) {
  549. frac_add(st->internal->priv_pts, (int64_t)st->time_base.den * frame_size);
  550. }
  551. break;
  552. case AVMEDIA_TYPE_VIDEO:
  553. frac_add(st->internal->priv_pts, (int64_t)st->time_base.den * st->time_base.num);
  554. break;
  555. }
  556. return 0;
  557. }
  558. FF_ENABLE_DEPRECATION_WARNINGS
  559. #endif
  560. static void guess_pkt_duration(AVFormatContext *s, AVStream *st, AVPacket *pkt)
  561. {
  562. if (pkt->duration < 0 && st->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE) {
  563. av_log(s, AV_LOG_WARNING, "Packet with invalid duration %"PRId64" in stream %d\n",
  564. pkt->duration, pkt->stream_index);
  565. pkt->duration = 0;
  566. }
  567. if (pkt->duration)
  568. return;
  569. switch (st->codecpar->codec_type) {
  570. case AVMEDIA_TYPE_VIDEO:
  571. if (st->avg_frame_rate.num > 0 && st->avg_frame_rate.den > 0) {
  572. pkt->duration = av_rescale_q(1, av_inv_q(st->avg_frame_rate),
  573. st->time_base);
  574. } else if (st->time_base.num * 1000LL > st->time_base.den)
  575. pkt->duration = 1;
  576. break;
  577. case AVMEDIA_TYPE_AUDIO: {
  578. int frame_size = av_get_audio_frame_duration2(st->codecpar, pkt->size);
  579. if (frame_size && st->codecpar->sample_rate) {
  580. pkt->duration = av_rescale_q(frame_size,
  581. (AVRational){1, st->codecpar->sample_rate},
  582. st->time_base);
  583. }
  584. break;
  585. }
  586. }
  587. }
  588. /**
  589. * Shift timestamps and call muxer; the original pts/dts are not kept.
  590. *
  591. * FIXME: this function should NEVER get undefined pts/dts beside when the
  592. * AVFMT_NOTIMESTAMPS is set.
  593. * Those additional safety checks should be dropped once the correct checks
  594. * are set in the callers.
  595. */
  596. static int write_packet(AVFormatContext *s, AVPacket *pkt)
  597. {
  598. int ret;
  599. // If the timestamp offsetting below is adjusted, adjust
  600. // ff_interleaved_peek similarly.
  601. if (s->output_ts_offset) {
  602. AVStream *st = s->streams[pkt->stream_index];
  603. int64_t offset = av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
  604. if (pkt->dts != AV_NOPTS_VALUE)
  605. pkt->dts += offset;
  606. if (pkt->pts != AV_NOPTS_VALUE)
  607. pkt->pts += offset;
  608. }
  609. if (s->avoid_negative_ts > 0) {
  610. AVStream *st = s->streams[pkt->stream_index];
  611. int64_t offset = st->internal->mux_ts_offset;
  612. int64_t ts = s->internal->avoid_negative_ts_use_pts ? pkt->pts : pkt->dts;
  613. if (s->internal->offset == AV_NOPTS_VALUE && ts != AV_NOPTS_VALUE &&
  614. (ts < 0 || s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO)) {
  615. s->internal->offset = -ts;
  616. s->internal->offset_timebase = st->time_base;
  617. }
  618. if (s->internal->offset != AV_NOPTS_VALUE && !offset) {
  619. offset = st->internal->mux_ts_offset =
  620. av_rescale_q_rnd(s->internal->offset,
  621. s->internal->offset_timebase,
  622. st->time_base,
  623. AV_ROUND_UP);
  624. }
  625. if (pkt->dts != AV_NOPTS_VALUE)
  626. pkt->dts += offset;
  627. if (pkt->pts != AV_NOPTS_VALUE)
  628. pkt->pts += offset;
  629. if (s->internal->avoid_negative_ts_use_pts) {
  630. if (pkt->pts != AV_NOPTS_VALUE && pkt->pts < 0) {
  631. av_log(s, AV_LOG_WARNING, "failed to avoid negative "
  632. "pts %s in stream %d.\n"
  633. "Try -avoid_negative_ts 1 as a possible workaround.\n",
  634. av_ts2str(pkt->pts),
  635. pkt->stream_index
  636. );
  637. }
  638. } else {
  639. av_assert2(pkt->dts == AV_NOPTS_VALUE || pkt->dts >= 0 || s->max_interleave_delta > 0);
  640. if (pkt->dts != AV_NOPTS_VALUE && pkt->dts < 0) {
  641. av_log(s, AV_LOG_WARNING,
  642. "Packets poorly interleaved, failed to avoid negative "
  643. "timestamp %s in stream %d.\n"
  644. "Try -max_interleave_delta 0 as a possible workaround.\n",
  645. av_ts2str(pkt->dts),
  646. pkt->stream_index
  647. );
  648. }
  649. }
  650. }
  651. if ((pkt->flags & AV_PKT_FLAG_UNCODED_FRAME)) {
  652. AVFrame **frame = (AVFrame **)pkt->data;
  653. av_assert0(pkt->size == sizeof(*frame));
  654. ret = s->oformat->write_uncoded_frame(s, pkt->stream_index, frame, 0);
  655. } else {
  656. ret = s->oformat->write_packet(s, pkt);
  657. }
  658. if (s->pb && ret >= 0) {
  659. flush_if_needed(s);
  660. if (s->pb->error < 0)
  661. ret = s->pb->error;
  662. }
  663. if (ret >= 0)
  664. s->streams[pkt->stream_index]->nb_frames++;
  665. return ret;
  666. }
  667. static int check_packet(AVFormatContext *s, AVPacket *pkt)
  668. {
  669. if (pkt->stream_index < 0 || pkt->stream_index >= s->nb_streams) {
  670. av_log(s, AV_LOG_ERROR, "Invalid packet stream index: %d\n",
  671. pkt->stream_index);
  672. return AVERROR(EINVAL);
  673. }
  674. if (s->streams[pkt->stream_index]->codecpar->codec_type == AVMEDIA_TYPE_ATTACHMENT) {
  675. av_log(s, AV_LOG_ERROR, "Received a packet for an attachment stream.\n");
  676. return AVERROR(EINVAL);
  677. }
  678. return 0;
  679. }
  680. static int prepare_input_packet(AVFormatContext *s, AVStream *st, AVPacket *pkt)
  681. {
  682. #if !FF_API_COMPUTE_PKT_FIELDS2 || !FF_API_LAVF_AVCTX
  683. /* sanitize the timestamps */
  684. if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
  685. /* when there is no reordering (so dts is equal to pts), but
  686. * only one of them is set, set the other as well */
  687. if (!st->internal->reorder) {
  688. if (pkt->pts == AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE)
  689. pkt->pts = pkt->dts;
  690. if (pkt->dts == AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE)
  691. pkt->dts = pkt->pts;
  692. }
  693. /* check that the timestamps are set */
  694. if (pkt->pts == AV_NOPTS_VALUE || pkt->dts == AV_NOPTS_VALUE) {
  695. av_log(s, AV_LOG_ERROR,
  696. "Timestamps are unset in a packet for stream %d\n", st->index);
  697. return AVERROR(EINVAL);
  698. }
  699. /* check that the dts are increasing (or at least non-decreasing,
  700. * if the format allows it */
  701. if (st->cur_dts != AV_NOPTS_VALUE &&
  702. ((!(s->oformat->flags & AVFMT_TS_NONSTRICT) && st->cur_dts >= pkt->dts) ||
  703. st->cur_dts > pkt->dts)) {
  704. av_log(s, AV_LOG_ERROR,
  705. "Application provided invalid, non monotonically increasing "
  706. "dts to muxer in stream %d: %" PRId64 " >= %" PRId64 "\n",
  707. st->index, st->cur_dts, pkt->dts);
  708. return AVERROR(EINVAL);
  709. }
  710. if (pkt->pts < pkt->dts) {
  711. av_log(s, AV_LOG_ERROR, "pts %" PRId64 " < dts %" PRId64 " in stream %d\n",
  712. pkt->pts, pkt->dts, st->index);
  713. return AVERROR(EINVAL);
  714. }
  715. }
  716. #endif
  717. /* update flags */
  718. if (st->internal->is_intra_only)
  719. pkt->flags |= AV_PKT_FLAG_KEY;
  720. return 0;
  721. }
  722. #define CHUNK_START 0x1000
  723. int ff_interleave_add_packet(AVFormatContext *s, AVPacket *pkt,
  724. int (*compare)(AVFormatContext *, const AVPacket *, const AVPacket *))
  725. {
  726. int ret;
  727. PacketList **next_point, *this_pktl;
  728. AVStream *st = s->streams[pkt->stream_index];
  729. int chunked = s->max_chunk_size || s->max_chunk_duration;
  730. this_pktl = av_malloc(sizeof(PacketList));
  731. if (!this_pktl) {
  732. av_packet_unref(pkt);
  733. return AVERROR(ENOMEM);
  734. }
  735. if ((ret = av_packet_make_refcounted(pkt)) < 0) {
  736. av_free(this_pktl);
  737. av_packet_unref(pkt);
  738. return ret;
  739. }
  740. av_packet_move_ref(&this_pktl->pkt, pkt);
  741. pkt = &this_pktl->pkt;
  742. if (st->internal->last_in_packet_buffer) {
  743. next_point = &(st->internal->last_in_packet_buffer->next);
  744. } else {
  745. next_point = &s->internal->packet_buffer;
  746. }
  747. if (chunked) {
  748. uint64_t max= av_rescale_q_rnd(s->max_chunk_duration, AV_TIME_BASE_Q, st->time_base, AV_ROUND_UP);
  749. st->internal->interleaver_chunk_size += pkt->size;
  750. st->internal->interleaver_chunk_duration += pkt->duration;
  751. if ( (s->max_chunk_size && st->internal->interleaver_chunk_size > s->max_chunk_size)
  752. || (max && st->internal->interleaver_chunk_duration > max)) {
  753. st->internal->interleaver_chunk_size = 0;
  754. pkt->flags |= CHUNK_START;
  755. if (max && st->internal->interleaver_chunk_duration > max) {
  756. int64_t syncoffset = (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)*max/2;
  757. int64_t syncto = av_rescale(pkt->dts + syncoffset, 1, max)*max - syncoffset;
  758. st->internal->interleaver_chunk_duration += (pkt->dts - syncto)/8 - max;
  759. } else
  760. st->internal->interleaver_chunk_duration = 0;
  761. }
  762. }
  763. if (*next_point) {
  764. if (chunked && !(pkt->flags & CHUNK_START))
  765. goto next_non_null;
  766. if (compare(s, &s->internal->packet_buffer_end->pkt, pkt)) {
  767. while ( *next_point
  768. && ((chunked && !((*next_point)->pkt.flags&CHUNK_START))
  769. || !compare(s, &(*next_point)->pkt, pkt)))
  770. next_point = &(*next_point)->next;
  771. if (*next_point)
  772. goto next_non_null;
  773. } else {
  774. next_point = &(s->internal->packet_buffer_end->next);
  775. }
  776. }
  777. av_assert1(!*next_point);
  778. s->internal->packet_buffer_end = this_pktl;
  779. next_non_null:
  780. this_pktl->next = *next_point;
  781. st->internal->last_in_packet_buffer = *next_point = this_pktl;
  782. return 0;
  783. }
  784. static int interleave_compare_dts(AVFormatContext *s, const AVPacket *next,
  785. const AVPacket *pkt)
  786. {
  787. AVStream *st = s->streams[pkt->stream_index];
  788. AVStream *st2 = s->streams[next->stream_index];
  789. int comp = av_compare_ts(next->dts, st2->time_base, pkt->dts,
  790. st->time_base);
  791. if (s->audio_preload) {
  792. int preload = st ->codecpar->codec_type == AVMEDIA_TYPE_AUDIO;
  793. int preload2 = st2->codecpar->codec_type == AVMEDIA_TYPE_AUDIO;
  794. if (preload != preload2) {
  795. int64_t ts, ts2;
  796. preload *= s->audio_preload;
  797. preload2 *= s->audio_preload;
  798. ts = av_rescale_q(pkt ->dts, st ->time_base, AV_TIME_BASE_Q) - preload;
  799. ts2= av_rescale_q(next->dts, st2->time_base, AV_TIME_BASE_Q) - preload2;
  800. if (ts == ts2) {
  801. ts = ((uint64_t)pkt ->dts*st ->time_base.num*AV_TIME_BASE - (uint64_t)preload *st ->time_base.den)*st2->time_base.den
  802. - ((uint64_t)next->dts*st2->time_base.num*AV_TIME_BASE - (uint64_t)preload2*st2->time_base.den)*st ->time_base.den;
  803. ts2 = 0;
  804. }
  805. comp = (ts2 > ts) - (ts2 < ts);
  806. }
  807. }
  808. if (comp == 0)
  809. return pkt->stream_index < next->stream_index;
  810. return comp > 0;
  811. }
  812. int ff_interleave_packet_per_dts(AVFormatContext *s, AVPacket *out,
  813. AVPacket *pkt, int flush)
  814. {
  815. PacketList *pktl;
  816. int stream_count = 0;
  817. int noninterleaved_count = 0;
  818. int i, ret;
  819. int eof = flush;
  820. if (pkt) {
  821. if ((ret = ff_interleave_add_packet(s, pkt, interleave_compare_dts)) < 0)
  822. return ret;
  823. }
  824. for (i = 0; i < s->nb_streams; i++) {
  825. if (s->streams[i]->internal->last_in_packet_buffer) {
  826. ++stream_count;
  827. } else if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_ATTACHMENT &&
  828. s->streams[i]->codecpar->codec_id != AV_CODEC_ID_VP8 &&
  829. s->streams[i]->codecpar->codec_id != AV_CODEC_ID_VP9) {
  830. ++noninterleaved_count;
  831. }
  832. }
  833. if (s->internal->nb_interleaved_streams == stream_count)
  834. flush = 1;
  835. if (s->max_interleave_delta > 0 &&
  836. s->internal->packet_buffer &&
  837. !flush &&
  838. s->internal->nb_interleaved_streams == stream_count+noninterleaved_count
  839. ) {
  840. AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
  841. int64_t delta_dts = INT64_MIN;
  842. int64_t top_dts = av_rescale_q(top_pkt->dts,
  843. s->streams[top_pkt->stream_index]->time_base,
  844. AV_TIME_BASE_Q);
  845. for (i = 0; i < s->nb_streams; i++) {
  846. int64_t last_dts;
  847. const PacketList *last = s->streams[i]->internal->last_in_packet_buffer;
  848. if (!last)
  849. continue;
  850. last_dts = av_rescale_q(last->pkt.dts,
  851. s->streams[i]->time_base,
  852. AV_TIME_BASE_Q);
  853. delta_dts = FFMAX(delta_dts, last_dts - top_dts);
  854. }
  855. if (delta_dts > s->max_interleave_delta) {
  856. av_log(s, AV_LOG_DEBUG,
  857. "Delay between the first packet and last packet in the "
  858. "muxing queue is %"PRId64" > %"PRId64": forcing output\n",
  859. delta_dts, s->max_interleave_delta);
  860. flush = 1;
  861. }
  862. }
  863. if (s->internal->packet_buffer &&
  864. eof &&
  865. (s->flags & AVFMT_FLAG_SHORTEST) &&
  866. s->internal->shortest_end == AV_NOPTS_VALUE) {
  867. AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
  868. s->internal->shortest_end = av_rescale_q(top_pkt->dts,
  869. s->streams[top_pkt->stream_index]->time_base,
  870. AV_TIME_BASE_Q);
  871. }
  872. if (s->internal->shortest_end != AV_NOPTS_VALUE) {
  873. while (s->internal->packet_buffer) {
  874. AVPacket *top_pkt = &s->internal->packet_buffer->pkt;
  875. AVStream *st;
  876. int64_t top_dts = av_rescale_q(top_pkt->dts,
  877. s->streams[top_pkt->stream_index]->time_base,
  878. AV_TIME_BASE_Q);
  879. if (s->internal->shortest_end + 1 >= top_dts)
  880. break;
  881. pktl = s->internal->packet_buffer;
  882. st = s->streams[pktl->pkt.stream_index];
  883. s->internal->packet_buffer = pktl->next;
  884. if (!s->internal->packet_buffer)
  885. s->internal->packet_buffer_end = NULL;
  886. if (st->internal->last_in_packet_buffer == pktl)
  887. st->internal->last_in_packet_buffer = NULL;
  888. av_packet_unref(&pktl->pkt);
  889. av_freep(&pktl);
  890. flush = 0;
  891. }
  892. }
  893. if (stream_count && flush) {
  894. AVStream *st;
  895. pktl = s->internal->packet_buffer;
  896. *out = pktl->pkt;
  897. st = s->streams[out->stream_index];
  898. s->internal->packet_buffer = pktl->next;
  899. if (!s->internal->packet_buffer)
  900. s->internal->packet_buffer_end = NULL;
  901. if (st->internal->last_in_packet_buffer == pktl)
  902. st->internal->last_in_packet_buffer = NULL;
  903. av_freep(&pktl);
  904. return 1;
  905. } else {
  906. return 0;
  907. }
  908. }
  909. int ff_get_muxer_ts_offset(AVFormatContext *s, int stream_index, int64_t *offset)
  910. {
  911. AVStream *st;
  912. if (stream_index < 0 || stream_index >= s->nb_streams)
  913. return AVERROR(EINVAL);
  914. st = s->streams[stream_index];
  915. *offset = st->internal->mux_ts_offset;
  916. if (s->output_ts_offset)
  917. *offset += av_rescale_q(s->output_ts_offset, AV_TIME_BASE_Q, st->time_base);
  918. return 0;
  919. }
  920. const AVPacket *ff_interleaved_peek(AVFormatContext *s, int stream)
  921. {
  922. PacketList *pktl = s->internal->packet_buffer;
  923. while (pktl) {
  924. if (pktl->pkt.stream_index == stream) {
  925. return &pktl->pkt;
  926. }
  927. pktl = pktl->next;
  928. }
  929. return NULL;
  930. }
  931. /**
  932. * Interleave an AVPacket correctly so it can be muxed.
  933. * @param out the interleaved packet will be output here
  934. * @param in the input packet; will always be blank on return if not NULL
  935. * @param flush 1 if no further packets are available as input and all
  936. * remaining packets should be output
  937. * @return 1 if a packet was output, 0 if no packet could be output,
  938. * < 0 if an error occurred
  939. */
  940. static int interleave_packet(AVFormatContext *s, AVPacket *out, AVPacket *in, int flush)
  941. {
  942. if (s->oformat->interleave_packet) {
  943. return s->oformat->interleave_packet(s, out, in, flush);
  944. } else
  945. return ff_interleave_packet_per_dts(s, out, in, flush);
  946. }
  947. static int check_bitstream(AVFormatContext *s, AVStream *st, AVPacket *pkt)
  948. {
  949. int ret;
  950. if (!(s->flags & AVFMT_FLAG_AUTO_BSF))
  951. return 1;
  952. if (s->oformat->check_bitstream) {
  953. if (!st->internal->bitstream_checked) {
  954. if ((ret = s->oformat->check_bitstream(s, pkt)) < 0)
  955. return ret;
  956. else if (ret == 1)
  957. st->internal->bitstream_checked = 1;
  958. }
  959. }
  960. return 1;
  961. }
  962. static int interleaved_write_packet(AVFormatContext *s, AVPacket *pkt, int flush)
  963. {
  964. for (;; ) {
  965. AVPacket opkt;
  966. int ret = interleave_packet(s, &opkt, pkt, flush);
  967. if (ret <= 0)
  968. return ret;
  969. pkt = NULL;
  970. ret = write_packet(s, &opkt);
  971. av_packet_unref(&opkt);
  972. if (ret < 0)
  973. return ret;
  974. }
  975. }
  976. static int write_packet_common(AVFormatContext *s, AVStream *st, AVPacket *pkt, int interleaved)
  977. {
  978. int ret;
  979. if (s->debug & FF_FDEBUG_TS)
  980. av_log(s, AV_LOG_DEBUG, "%s size:%d dts:%s pts:%s\n", __FUNCTION__,
  981. pkt->size, av_ts2str(pkt->dts), av_ts2str(pkt->pts));
  982. guess_pkt_duration(s, st, pkt);
  983. #if FF_API_COMPUTE_PKT_FIELDS2 && FF_API_LAVF_AVCTX
  984. if ((ret = compute_muxer_pkt_fields(s, st, pkt)) < 0 && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
  985. return ret;
  986. #endif
  987. if (interleaved) {
  988. if (pkt->dts == AV_NOPTS_VALUE && !(s->oformat->flags & AVFMT_NOTIMESTAMPS))
  989. return AVERROR(EINVAL);
  990. return interleaved_write_packet(s, pkt, 0);
  991. } else {
  992. return write_packet(s, pkt);
  993. }
  994. }
  995. static int write_packets_from_bsfs(AVFormatContext *s, AVStream *st, AVPacket *pkt, int interleaved)
  996. {
  997. AVBSFContext *bsfc = st->internal->bsfc;
  998. int ret;
  999. if ((ret = av_bsf_send_packet(bsfc, pkt)) < 0) {
  1000. av_log(s, AV_LOG_ERROR,
  1001. "Failed to send packet to filter %s for stream %d\n",
  1002. bsfc->filter->name, st->index);
  1003. return ret;
  1004. }
  1005. do {
  1006. ret = av_bsf_receive_packet(bsfc, pkt);
  1007. if (ret < 0) {
  1008. if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
  1009. return 0;
  1010. av_log(s, AV_LOG_ERROR, "Error applying bitstream filters to an output "
  1011. "packet for stream #%d: %s\n", st->index, av_err2str(ret));
  1012. if (!(s->error_recognition & AV_EF_EXPLODE) && ret != AVERROR(ENOMEM))
  1013. continue;
  1014. return ret;
  1015. }
  1016. av_packet_rescale_ts(pkt, bsfc->time_base_out, st->time_base);
  1017. ret = write_packet_common(s, st, pkt, interleaved);
  1018. if (ret >= 0 && !interleaved) // a successful write_packet_common already unrefed pkt for interleaved
  1019. av_packet_unref(pkt);
  1020. } while (ret >= 0);
  1021. return ret;
  1022. }
  1023. static int write_packets_common(AVFormatContext *s, AVPacket *pkt, int interleaved)
  1024. {
  1025. AVStream *st;
  1026. int ret = check_packet(s, pkt);
  1027. if (ret < 0)
  1028. return ret;
  1029. st = s->streams[pkt->stream_index];
  1030. ret = prepare_input_packet(s, st, pkt);
  1031. if (ret < 0)
  1032. return ret;
  1033. ret = check_bitstream(s, st, pkt);
  1034. if (ret < 0)
  1035. return ret;
  1036. if (st->internal->bsfc) {
  1037. return write_packets_from_bsfs(s, st, pkt, interleaved);
  1038. } else {
  1039. return write_packet_common(s, st, pkt, interleaved);
  1040. }
  1041. }
  1042. int av_write_frame(AVFormatContext *s, AVPacket *in)
  1043. {
  1044. AVPacket local_pkt, *pkt = &local_pkt;
  1045. int ret;
  1046. if (!in) {
  1047. if (s->oformat->flags & AVFMT_ALLOW_FLUSH) {
  1048. ret = s->oformat->write_packet(s, NULL);
  1049. flush_if_needed(s);
  1050. if (ret >= 0 && s->pb && s->pb->error < 0)
  1051. ret = s->pb->error;
  1052. return ret;
  1053. }
  1054. return 1;
  1055. }
  1056. if (in->flags & AV_PKT_FLAG_UNCODED_FRAME) {
  1057. pkt = in;
  1058. } else {
  1059. /* We don't own in, so we have to make sure not to modify it.
  1060. * The following avoids copying in's data unnecessarily.
  1061. * Copying side data is unavoidable as a bitstream filter
  1062. * may change it, e.g. free it on errors. */
  1063. pkt->buf = NULL;
  1064. pkt->data = in->data;
  1065. pkt->size = in->size;
  1066. ret = av_packet_copy_props(pkt, in);
  1067. if (ret < 0)
  1068. return ret;
  1069. if (in->buf) {
  1070. pkt->buf = av_buffer_ref(in->buf);
  1071. if (!pkt->buf) {
  1072. ret = AVERROR(ENOMEM);
  1073. goto fail;
  1074. }
  1075. }
  1076. }
  1077. ret = write_packets_common(s, pkt, 0/*non-interleaved*/);
  1078. fail:
  1079. // Uncoded frames using the noninterleaved codepath are also freed here
  1080. av_packet_unref(pkt);
  1081. return ret;
  1082. }
  1083. int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
  1084. {
  1085. int ret;
  1086. if (pkt) {
  1087. ret = write_packets_common(s, pkt, 1/*interleaved*/);
  1088. if (ret < 0)
  1089. av_packet_unref(pkt);
  1090. return ret;
  1091. } else {
  1092. av_log(s, AV_LOG_TRACE, "av_interleaved_write_frame FLUSH\n");
  1093. return interleaved_write_packet(s, NULL, 1/*flush*/);
  1094. }
  1095. }
  1096. int av_write_trailer(AVFormatContext *s)
  1097. {
  1098. int i, ret1, ret = 0;
  1099. AVPacket pkt = {0};
  1100. av_init_packet(&pkt);
  1101. for (i = 0; i < s->nb_streams; i++) {
  1102. if (s->streams[i]->internal->bsfc) {
  1103. ret1 = write_packets_from_bsfs(s, s->streams[i], &pkt, 1/*interleaved*/);
  1104. if (ret1 < 0)
  1105. av_packet_unref(&pkt);
  1106. if (ret >= 0)
  1107. ret = ret1;
  1108. }
  1109. }
  1110. ret1 = interleaved_write_packet(s, NULL, 1);
  1111. if (ret >= 0)
  1112. ret = ret1;
  1113. if (s->oformat->write_trailer) {
  1114. if (!(s->oformat->flags & AVFMT_NOFILE) && s->pb)
  1115. avio_write_marker(s->pb, AV_NOPTS_VALUE, AVIO_DATA_MARKER_TRAILER);
  1116. if (ret >= 0) {
  1117. ret = s->oformat->write_trailer(s);
  1118. } else {
  1119. s->oformat->write_trailer(s);
  1120. }
  1121. }
  1122. deinit_muxer(s);
  1123. if (s->pb)
  1124. avio_flush(s->pb);
  1125. if (ret == 0)
  1126. ret = s->pb ? s->pb->error : 0;
  1127. for (i = 0; i < s->nb_streams; i++) {
  1128. av_freep(&s->streams[i]->priv_data);
  1129. av_freep(&s->streams[i]->internal->index_entries);
  1130. }
  1131. if (s->oformat->priv_class)
  1132. av_opt_free(s->priv_data);
  1133. av_freep(&s->priv_data);
  1134. return ret;
  1135. }
  1136. int av_get_output_timestamp(struct AVFormatContext *s, int stream,
  1137. int64_t *dts, int64_t *wall)
  1138. {
  1139. if (!s->oformat || !s->oformat->get_output_timestamp)
  1140. return AVERROR(ENOSYS);
  1141. s->oformat->get_output_timestamp(s, stream, dts, wall);
  1142. return 0;
  1143. }
  1144. int ff_write_chained(AVFormatContext *dst, int dst_stream, AVPacket *pkt,
  1145. AVFormatContext *src, int interleave)
  1146. {
  1147. AVPacket local_pkt;
  1148. int ret;
  1149. local_pkt = *pkt;
  1150. local_pkt.stream_index = dst_stream;
  1151. av_packet_rescale_ts(&local_pkt,
  1152. src->streams[pkt->stream_index]->time_base,
  1153. dst->streams[dst_stream]->time_base);
  1154. if (interleave) ret = av_interleaved_write_frame(dst, &local_pkt);
  1155. else ret = av_write_frame(dst, &local_pkt);
  1156. pkt->buf = local_pkt.buf;
  1157. pkt->side_data = local_pkt.side_data;
  1158. pkt->side_data_elems = local_pkt.side_data_elems;
  1159. return ret;
  1160. }
  1161. static void uncoded_frame_free(void *unused, uint8_t *data)
  1162. {
  1163. av_frame_free((AVFrame **)data);
  1164. av_free(data);
  1165. }
  1166. static int write_uncoded_frame_internal(AVFormatContext *s, int stream_index,
  1167. AVFrame *frame, int interleaved)
  1168. {
  1169. AVPacket pkt, *pktp;
  1170. av_assert0(s->oformat);
  1171. if (!s->oformat->write_uncoded_frame) {
  1172. av_frame_free(&frame);
  1173. return AVERROR(ENOSYS);
  1174. }
  1175. if (!frame) {
  1176. pktp = NULL;
  1177. } else {
  1178. size_t bufsize = sizeof(frame) + AV_INPUT_BUFFER_PADDING_SIZE;
  1179. AVFrame **framep = av_mallocz(bufsize);
  1180. if (!framep)
  1181. goto fail;
  1182. pktp = &pkt;
  1183. av_init_packet(&pkt);
  1184. pkt.buf = av_buffer_create((void *)framep, bufsize,
  1185. uncoded_frame_free, NULL, 0);
  1186. if (!pkt.buf) {
  1187. av_free(framep);
  1188. fail:
  1189. av_frame_free(&frame);
  1190. return AVERROR(ENOMEM);
  1191. }
  1192. *framep = frame;
  1193. pkt.data = (void *)framep;
  1194. pkt.size = sizeof(frame);
  1195. pkt.pts =
  1196. pkt.dts = frame->pts;
  1197. pkt.duration = frame->pkt_duration;
  1198. pkt.stream_index = stream_index;
  1199. pkt.flags |= AV_PKT_FLAG_UNCODED_FRAME;
  1200. }
  1201. return interleaved ? av_interleaved_write_frame(s, pktp) :
  1202. av_write_frame(s, pktp);
  1203. }
  1204. int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
  1205. AVFrame *frame)
  1206. {
  1207. return write_uncoded_frame_internal(s, stream_index, frame, 0);
  1208. }
  1209. int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,
  1210. AVFrame *frame)
  1211. {
  1212. return write_uncoded_frame_internal(s, stream_index, frame, 1);
  1213. }
  1214. int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index)
  1215. {
  1216. av_assert0(s->oformat);
  1217. if (!s->oformat->write_uncoded_frame)
  1218. return AVERROR(ENOSYS);
  1219. return s->oformat->write_uncoded_frame(s, stream_index, NULL,
  1220. AV_WRITE_UNCODED_FRAME_QUERY);
  1221. }