You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2413 lines
94KB

  1. /*
  2. * MPEG-DASH ISO BMFF segmenter
  3. * Copyright (c) 2014 Martin Storsjo
  4. * Copyright (c) 2018 Akamai Technologies, Inc.
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if HAVE_UNISTD_H
  24. #include <unistd.h>
  25. #endif
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/avutil.h"
  28. #include "libavutil/avstring.h"
  29. #include "libavutil/intreadwrite.h"
  30. #include "libavutil/mathematics.h"
  31. #include "libavutil/opt.h"
  32. #include "libavutil/parseutils.h"
  33. #include "libavutil/rational.h"
  34. #include "libavutil/time.h"
  35. #include "libavutil/time_internal.h"
  36. #include "av1.h"
  37. #include "avc.h"
  38. #include "avformat.h"
  39. #include "avio_internal.h"
  40. #include "hlsplaylist.h"
  41. #if CONFIG_HTTP_PROTOCOL
  42. #include "http.h"
  43. #endif
  44. #include "internal.h"
  45. #include "isom.h"
  46. #include "os_support.h"
  47. #include "url.h"
  48. #include "vpcc.h"
  49. #include "dash.h"
  50. typedef enum {
  51. SEGMENT_TYPE_AUTO = 0,
  52. SEGMENT_TYPE_MP4,
  53. SEGMENT_TYPE_WEBM,
  54. SEGMENT_TYPE_NB
  55. } SegmentType;
  56. enum {
  57. FRAG_TYPE_NONE = 0,
  58. FRAG_TYPE_EVERY_FRAME,
  59. FRAG_TYPE_DURATION,
  60. FRAG_TYPE_PFRAMES,
  61. FRAG_TYPE_NB
  62. };
  63. #define MPD_PROFILE_DASH 1
  64. #define MPD_PROFILE_DVB 2
  65. typedef struct Segment {
  66. char file[1024];
  67. int64_t start_pos;
  68. int range_length, index_length;
  69. int64_t time;
  70. double prog_date_time;
  71. int64_t duration;
  72. int n;
  73. } Segment;
  74. typedef struct AdaptationSet {
  75. int id;
  76. char *descriptor;
  77. int64_t seg_duration;
  78. int64_t frag_duration;
  79. int frag_type;
  80. enum AVMediaType media_type;
  81. AVDictionary *metadata;
  82. AVRational min_frame_rate, max_frame_rate;
  83. int ambiguous_frame_rate;
  84. int64_t max_frag_duration;
  85. int max_width, max_height;
  86. int nb_streams;
  87. AVRational par;
  88. int trick_idx;
  89. } AdaptationSet;
  90. typedef struct OutputStream {
  91. AVFormatContext *ctx;
  92. int ctx_inited, as_idx;
  93. AVIOContext *out;
  94. AVCodecParserContext *parser;
  95. AVCodecContext *parser_avctx;
  96. int packets_written;
  97. char initfile[1024];
  98. int64_t init_start_pos, pos;
  99. int init_range_length;
  100. int nb_segments, segments_size, segment_index;
  101. int64_t seg_duration;
  102. int64_t frag_duration;
  103. int64_t last_duration;
  104. Segment **segments;
  105. int64_t first_pts, start_pts, max_pts;
  106. int64_t last_dts, last_pts;
  107. int last_flags;
  108. int bit_rate;
  109. int first_segment_bit_rate;
  110. SegmentType segment_type; /* segment type selected for this particular stream */
  111. const char *format_name;
  112. const char *extension_name;
  113. const char *single_file_name; /* file names selected for this particular stream */
  114. const char *init_seg_name;
  115. const char *media_seg_name;
  116. char codec_str[100];
  117. int written_len;
  118. char filename[1024];
  119. char full_path[1024];
  120. char temp_path[1024];
  121. double availability_time_offset;
  122. AVProducerReferenceTime producer_reference_time;
  123. char producer_reference_time_str[100];
  124. int total_pkt_size;
  125. int64_t total_pkt_duration;
  126. int muxer_overhead;
  127. int frag_type;
  128. int64_t gop_size;
  129. AVRational sar;
  130. int coding_dependency;
  131. } OutputStream;
  132. typedef struct DASHContext {
  133. const AVClass *class; /* Class for private options. */
  134. char *adaptation_sets;
  135. AdaptationSet *as;
  136. int nb_as;
  137. int window_size;
  138. int extra_window_size;
  139. #if FF_API_DASH_MIN_SEG_DURATION
  140. int min_seg_duration;
  141. #endif
  142. int64_t seg_duration;
  143. int64_t frag_duration;
  144. int remove_at_exit;
  145. int use_template;
  146. int use_timeline;
  147. int single_file;
  148. OutputStream *streams;
  149. int has_video;
  150. int64_t last_duration;
  151. int64_t total_duration;
  152. char availability_start_time[100];
  153. time_t start_time_s;
  154. int64_t presentation_time_offset;
  155. char dirname[1024];
  156. const char *single_file_name; /* file names as specified in options */
  157. const char *init_seg_name;
  158. const char *media_seg_name;
  159. const char *utc_timing_url;
  160. const char *method;
  161. const char *user_agent;
  162. AVDictionary *http_opts;
  163. int hls_playlist;
  164. const char *hls_master_name;
  165. int http_persistent;
  166. int master_playlist_created;
  167. AVIOContext *mpd_out;
  168. AVIOContext *m3u8_out;
  169. int streaming;
  170. int64_t timeout;
  171. int index_correction;
  172. AVDictionary *format_options;
  173. int global_sidx;
  174. SegmentType segment_type_option; /* segment type as specified in options */
  175. int ignore_io_errors;
  176. int lhls;
  177. int ldash;
  178. int master_publish_rate;
  179. int nr_of_streams_to_flush;
  180. int nr_of_streams_flushed;
  181. int frag_type;
  182. int write_prft;
  183. int64_t max_gop_size;
  184. int64_t max_segment_duration;
  185. int profile;
  186. int64_t target_latency;
  187. int target_latency_refid;
  188. AVRational min_playback_rate;
  189. AVRational max_playback_rate;
  190. int64_t update_period;
  191. } DASHContext;
  192. static struct codec_string {
  193. int id;
  194. const char *str;
  195. } codecs[] = {
  196. { AV_CODEC_ID_VP8, "vp8" },
  197. { AV_CODEC_ID_VP9, "vp9" },
  198. { AV_CODEC_ID_VORBIS, "vorbis" },
  199. { AV_CODEC_ID_OPUS, "opus" },
  200. { AV_CODEC_ID_FLAC, "flac" },
  201. { 0, NULL }
  202. };
  203. static struct format_string {
  204. SegmentType segment_type;
  205. const char *str;
  206. } formats[] = {
  207. { SEGMENT_TYPE_AUTO, "auto" },
  208. { SEGMENT_TYPE_MP4, "mp4" },
  209. { SEGMENT_TYPE_WEBM, "webm" },
  210. { 0, NULL }
  211. };
  212. static int dashenc_io_open(AVFormatContext *s, AVIOContext **pb, char *filename,
  213. AVDictionary **options) {
  214. DASHContext *c = s->priv_data;
  215. int http_base_proto = filename ? ff_is_http_proto(filename) : 0;
  216. int err = AVERROR_MUXER_NOT_FOUND;
  217. if (!*pb || !http_base_proto || !c->http_persistent) {
  218. err = s->io_open(s, pb, filename, AVIO_FLAG_WRITE, options);
  219. #if CONFIG_HTTP_PROTOCOL
  220. } else {
  221. URLContext *http_url_context = ffio_geturlcontext(*pb);
  222. av_assert0(http_url_context);
  223. err = ff_http_do_new_request(http_url_context, filename);
  224. if (err < 0)
  225. ff_format_io_close(s, pb);
  226. #endif
  227. }
  228. return err;
  229. }
  230. static void dashenc_io_close(AVFormatContext *s, AVIOContext **pb, char *filename) {
  231. DASHContext *c = s->priv_data;
  232. int http_base_proto = filename ? ff_is_http_proto(filename) : 0;
  233. if (!*pb)
  234. return;
  235. if (!http_base_proto || !c->http_persistent) {
  236. ff_format_io_close(s, pb);
  237. #if CONFIG_HTTP_PROTOCOL
  238. } else {
  239. URLContext *http_url_context = ffio_geturlcontext(*pb);
  240. av_assert0(http_url_context);
  241. avio_flush(*pb);
  242. ffurl_shutdown(http_url_context, AVIO_FLAG_WRITE);
  243. #endif
  244. }
  245. }
  246. static const char *get_format_str(SegmentType segment_type) {
  247. int i;
  248. for (i = 0; i < SEGMENT_TYPE_NB; i++)
  249. if (formats[i].segment_type == segment_type)
  250. return formats[i].str;
  251. return NULL;
  252. }
  253. static const char *get_extension_str(SegmentType type, int single_file)
  254. {
  255. switch (type) {
  256. case SEGMENT_TYPE_MP4: return single_file ? "mp4" : "m4s";
  257. case SEGMENT_TYPE_WEBM: return "webm";
  258. default: return NULL;
  259. }
  260. }
  261. static int handle_io_open_error(AVFormatContext *s, int err, char *url) {
  262. DASHContext *c = s->priv_data;
  263. char errbuf[AV_ERROR_MAX_STRING_SIZE];
  264. av_strerror(err, errbuf, sizeof(errbuf));
  265. av_log(s, c->ignore_io_errors ? AV_LOG_WARNING : AV_LOG_ERROR,
  266. "Unable to open %s for writing: %s\n", url, errbuf);
  267. return c->ignore_io_errors ? 0 : err;
  268. }
  269. static inline SegmentType select_segment_type(SegmentType segment_type, enum AVCodecID codec_id)
  270. {
  271. if (segment_type == SEGMENT_TYPE_AUTO) {
  272. if (codec_id == AV_CODEC_ID_OPUS || codec_id == AV_CODEC_ID_VORBIS ||
  273. codec_id == AV_CODEC_ID_VP8 || codec_id == AV_CODEC_ID_VP9) {
  274. segment_type = SEGMENT_TYPE_WEBM;
  275. } else {
  276. segment_type = SEGMENT_TYPE_MP4;
  277. }
  278. }
  279. return segment_type;
  280. }
  281. static int init_segment_types(AVFormatContext *s)
  282. {
  283. DASHContext *c = s->priv_data;
  284. int has_mp4_streams = 0;
  285. for (int i = 0; i < s->nb_streams; ++i) {
  286. OutputStream *os = &c->streams[i];
  287. SegmentType segment_type = select_segment_type(
  288. c->segment_type_option, s->streams[i]->codecpar->codec_id);
  289. os->segment_type = segment_type;
  290. os->format_name = get_format_str(segment_type);
  291. if (!os->format_name) {
  292. av_log(s, AV_LOG_ERROR, "Could not select DASH segment type for stream %d\n", i);
  293. return AVERROR_MUXER_NOT_FOUND;
  294. }
  295. os->extension_name = get_extension_str(segment_type, c->single_file);
  296. if (!os->extension_name) {
  297. av_log(s, AV_LOG_ERROR, "Could not get extension type for stream %d\n", i);
  298. return AVERROR_MUXER_NOT_FOUND;
  299. }
  300. has_mp4_streams |= segment_type == SEGMENT_TYPE_MP4;
  301. }
  302. if (c->hls_playlist && !has_mp4_streams) {
  303. av_log(s, AV_LOG_WARNING, "No mp4 streams, disabling HLS manifest generation\n");
  304. c->hls_playlist = 0;
  305. }
  306. return 0;
  307. }
  308. static int check_file_extension(const char *filename, const char *extension) {
  309. char *dot;
  310. if (!filename || !extension)
  311. return -1;
  312. dot = strrchr(filename, '.');
  313. if (dot && !strcmp(dot + 1, extension))
  314. return 0;
  315. return -1;
  316. }
  317. static void set_vp9_codec_str(AVFormatContext *s, AVCodecParameters *par,
  318. AVRational *frame_rate, char *str, int size) {
  319. VPCC vpcc;
  320. int ret = ff_isom_get_vpcc_features(s, par, frame_rate, &vpcc);
  321. if (ret == 0) {
  322. av_strlcatf(str, size, "vp09.%02d.%02d.%02d",
  323. vpcc.profile, vpcc.level, vpcc.bitdepth);
  324. } else {
  325. // Default to just vp9 in case of error while finding out profile or level
  326. av_log(s, AV_LOG_WARNING, "Could not find VP9 profile and/or level\n");
  327. av_strlcpy(str, "vp9", size);
  328. }
  329. return;
  330. }
  331. static void set_codec_str(AVFormatContext *s, AVCodecParameters *par,
  332. AVRational *frame_rate, char *str, int size)
  333. {
  334. const AVCodecTag *tags[2] = { NULL, NULL };
  335. uint32_t tag;
  336. int i;
  337. // common Webm codecs are not part of RFC 6381
  338. for (i = 0; codecs[i].id; i++)
  339. if (codecs[i].id == par->codec_id) {
  340. if (codecs[i].id == AV_CODEC_ID_VP9) {
  341. set_vp9_codec_str(s, par, frame_rate, str, size);
  342. } else {
  343. av_strlcpy(str, codecs[i].str, size);
  344. }
  345. return;
  346. }
  347. // for codecs part of RFC 6381
  348. if (par->codec_type == AVMEDIA_TYPE_VIDEO)
  349. tags[0] = ff_codec_movvideo_tags;
  350. else if (par->codec_type == AVMEDIA_TYPE_AUDIO)
  351. tags[0] = ff_codec_movaudio_tags;
  352. else
  353. return;
  354. tag = par->codec_tag;
  355. if (!tag)
  356. tag = av_codec_get_tag(tags, par->codec_id);
  357. if (!tag)
  358. return;
  359. if (size < 5)
  360. return;
  361. AV_WL32(str, tag);
  362. str[4] = '\0';
  363. if (!strcmp(str, "mp4a") || !strcmp(str, "mp4v")) {
  364. uint32_t oti;
  365. tags[0] = ff_mp4_obj_type;
  366. oti = av_codec_get_tag(tags, par->codec_id);
  367. if (oti)
  368. av_strlcatf(str, size, ".%02"PRIx32, oti);
  369. else
  370. return;
  371. if (tag == MKTAG('m', 'p', '4', 'a')) {
  372. if (par->extradata_size >= 2) {
  373. int aot = par->extradata[0] >> 3;
  374. if (aot == 31)
  375. aot = ((AV_RB16(par->extradata) >> 5) & 0x3f) + 32;
  376. av_strlcatf(str, size, ".%d", aot);
  377. }
  378. } else if (tag == MKTAG('m', 'p', '4', 'v')) {
  379. // Unimplemented, should output ProfileLevelIndication as a decimal number
  380. av_log(s, AV_LOG_WARNING, "Incomplete RFC 6381 codec string for mp4v\n");
  381. }
  382. } else if (!strcmp(str, "avc1")) {
  383. uint8_t *tmpbuf = NULL;
  384. uint8_t *extradata = par->extradata;
  385. int extradata_size = par->extradata_size;
  386. if (!extradata_size)
  387. return;
  388. if (extradata[0] != 1) {
  389. AVIOContext *pb;
  390. if (avio_open_dyn_buf(&pb) < 0)
  391. return;
  392. if (ff_isom_write_avcc(pb, extradata, extradata_size) < 0) {
  393. ffio_free_dyn_buf(&pb);
  394. return;
  395. }
  396. extradata_size = avio_close_dyn_buf(pb, &extradata);
  397. tmpbuf = extradata;
  398. }
  399. if (extradata_size >= 4)
  400. av_strlcatf(str, size, ".%02x%02x%02x",
  401. extradata[1], extradata[2], extradata[3]);
  402. av_free(tmpbuf);
  403. } else if (!strcmp(str, "av01")) {
  404. AV1SequenceParameters seq;
  405. if (!par->extradata_size)
  406. return;
  407. if (ff_av1_parse_seq_header(&seq, par->extradata, par->extradata_size) < 0)
  408. return;
  409. av_strlcatf(str, size, ".%01u.%02u%s.%02u",
  410. seq.profile, seq.level, seq.tier ? "H" : "M", seq.bitdepth);
  411. if (seq.color_description_present_flag)
  412. av_strlcatf(str, size, ".%01u.%01u%01u%01u.%02u.%02u.%02u.%01u",
  413. seq.monochrome,
  414. seq.chroma_subsampling_x, seq.chroma_subsampling_y, seq.chroma_sample_position,
  415. seq.color_primaries, seq.transfer_characteristics, seq.matrix_coefficients,
  416. seq.color_range);
  417. }
  418. }
  419. static int flush_dynbuf(DASHContext *c, OutputStream *os, int *range_length)
  420. {
  421. uint8_t *buffer;
  422. if (!os->ctx->pb) {
  423. return AVERROR(EINVAL);
  424. }
  425. // flush
  426. av_write_frame(os->ctx, NULL);
  427. avio_flush(os->ctx->pb);
  428. if (!c->single_file) {
  429. // write out to file
  430. *range_length = avio_close_dyn_buf(os->ctx->pb, &buffer);
  431. os->ctx->pb = NULL;
  432. if (os->out)
  433. avio_write(os->out, buffer + os->written_len, *range_length - os->written_len);
  434. os->written_len = 0;
  435. av_free(buffer);
  436. // re-open buffer
  437. return avio_open_dyn_buf(&os->ctx->pb);
  438. } else {
  439. *range_length = avio_tell(os->ctx->pb) - os->pos;
  440. return 0;
  441. }
  442. }
  443. static void set_http_options(AVDictionary **options, DASHContext *c)
  444. {
  445. if (c->method)
  446. av_dict_set(options, "method", c->method, 0);
  447. av_dict_copy(options, c->http_opts, 0);
  448. if (c->user_agent)
  449. av_dict_set(options, "user_agent", c->user_agent, 0);
  450. if (c->http_persistent)
  451. av_dict_set_int(options, "multiple_requests", 1, 0);
  452. if (c->timeout >= 0)
  453. av_dict_set_int(options, "timeout", c->timeout, 0);
  454. }
  455. static void get_hls_playlist_name(char *playlist_name, int string_size,
  456. const char *base_url, int id) {
  457. if (base_url)
  458. snprintf(playlist_name, string_size, "%smedia_%d.m3u8", base_url, id);
  459. else
  460. snprintf(playlist_name, string_size, "media_%d.m3u8", id);
  461. }
  462. static void get_start_index_number(OutputStream *os, DASHContext *c,
  463. int *start_index, int *start_number) {
  464. *start_index = 0;
  465. *start_number = 1;
  466. if (c->window_size) {
  467. *start_index = FFMAX(os->nb_segments - c->window_size, 0);
  468. *start_number = FFMAX(os->segment_index - c->window_size, 1);
  469. }
  470. }
  471. static void write_hls_media_playlist(OutputStream *os, AVFormatContext *s,
  472. int representation_id, int final,
  473. char *prefetch_url) {
  474. DASHContext *c = s->priv_data;
  475. int timescale = os->ctx->streams[0]->time_base.den;
  476. char temp_filename_hls[1024];
  477. char filename_hls[1024];
  478. AVDictionary *http_opts = NULL;
  479. int target_duration = 0;
  480. int ret = 0;
  481. const char *proto = avio_find_protocol_name(c->dirname);
  482. int use_rename = proto && !strcmp(proto, "file");
  483. int i, start_index, start_number;
  484. double prog_date_time = 0;
  485. get_start_index_number(os, c, &start_index, &start_number);
  486. if (!c->hls_playlist || start_index >= os->nb_segments ||
  487. os->segment_type != SEGMENT_TYPE_MP4)
  488. return;
  489. get_hls_playlist_name(filename_hls, sizeof(filename_hls),
  490. c->dirname, representation_id);
  491. snprintf(temp_filename_hls, sizeof(temp_filename_hls), use_rename ? "%s.tmp" : "%s", filename_hls);
  492. set_http_options(&http_opts, c);
  493. ret = dashenc_io_open(s, &c->m3u8_out, temp_filename_hls, &http_opts);
  494. av_dict_free(&http_opts);
  495. if (ret < 0) {
  496. handle_io_open_error(s, ret, temp_filename_hls);
  497. return;
  498. }
  499. for (i = start_index; i < os->nb_segments; i++) {
  500. Segment *seg = os->segments[i];
  501. double duration = (double) seg->duration / timescale;
  502. if (target_duration <= duration)
  503. target_duration = lrint(duration);
  504. }
  505. ff_hls_write_playlist_header(c->m3u8_out, 6, -1, target_duration,
  506. start_number, PLAYLIST_TYPE_NONE, 0);
  507. ff_hls_write_init_file(c->m3u8_out, os->initfile, c->single_file,
  508. os->init_range_length, os->init_start_pos);
  509. for (i = start_index; i < os->nb_segments; i++) {
  510. Segment *seg = os->segments[i];
  511. if (prog_date_time == 0) {
  512. if (os->nb_segments == 1)
  513. prog_date_time = c->start_time_s;
  514. else
  515. prog_date_time = seg->prog_date_time;
  516. }
  517. seg->prog_date_time = prog_date_time;
  518. ret = ff_hls_write_file_entry(c->m3u8_out, 0, c->single_file,
  519. (double) seg->duration / timescale, 0,
  520. seg->range_length, seg->start_pos, NULL,
  521. c->single_file ? os->initfile : seg->file,
  522. &prog_date_time, 0, 0, 0);
  523. if (ret < 0) {
  524. av_log(os->ctx, AV_LOG_WARNING, "ff_hls_write_file_entry get error\n");
  525. }
  526. }
  527. if (prefetch_url)
  528. avio_printf(c->m3u8_out, "#EXT-X-PREFETCH:%s\n", prefetch_url);
  529. if (final)
  530. ff_hls_write_end_list(c->m3u8_out);
  531. dashenc_io_close(s, &c->m3u8_out, temp_filename_hls);
  532. if (use_rename)
  533. ff_rename(temp_filename_hls, filename_hls, os->ctx);
  534. }
  535. static int flush_init_segment(AVFormatContext *s, OutputStream *os)
  536. {
  537. DASHContext *c = s->priv_data;
  538. int ret, range_length;
  539. ret = flush_dynbuf(c, os, &range_length);
  540. if (ret < 0)
  541. return ret;
  542. os->pos = os->init_range_length = range_length;
  543. if (!c->single_file) {
  544. char filename[1024];
  545. snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->initfile);
  546. dashenc_io_close(s, &os->out, filename);
  547. }
  548. return 0;
  549. }
  550. static void dash_free(AVFormatContext *s)
  551. {
  552. DASHContext *c = s->priv_data;
  553. int i, j;
  554. if (c->as) {
  555. for (i = 0; i < c->nb_as; i++) {
  556. av_dict_free(&c->as[i].metadata);
  557. av_freep(&c->as[i].descriptor);
  558. }
  559. av_freep(&c->as);
  560. c->nb_as = 0;
  561. }
  562. if (!c->streams)
  563. return;
  564. for (i = 0; i < s->nb_streams; i++) {
  565. OutputStream *os = &c->streams[i];
  566. if (os->ctx && os->ctx->pb) {
  567. if (!c->single_file)
  568. ffio_free_dyn_buf(&os->ctx->pb);
  569. else
  570. avio_close(os->ctx->pb);
  571. }
  572. ff_format_io_close(s, &os->out);
  573. avformat_free_context(os->ctx);
  574. avcodec_free_context(&os->parser_avctx);
  575. av_parser_close(os->parser);
  576. for (j = 0; j < os->nb_segments; j++)
  577. av_free(os->segments[j]);
  578. av_free(os->segments);
  579. av_freep(&os->single_file_name);
  580. av_freep(&os->init_seg_name);
  581. av_freep(&os->media_seg_name);
  582. }
  583. av_freep(&c->streams);
  584. ff_format_io_close(s, &c->mpd_out);
  585. ff_format_io_close(s, &c->m3u8_out);
  586. }
  587. static void output_segment_list(OutputStream *os, AVIOContext *out, AVFormatContext *s,
  588. int representation_id, int final)
  589. {
  590. DASHContext *c = s->priv_data;
  591. int i, start_index, start_number;
  592. get_start_index_number(os, c, &start_index, &start_number);
  593. if (c->use_template) {
  594. int timescale = c->use_timeline ? os->ctx->streams[0]->time_base.den : AV_TIME_BASE;
  595. avio_printf(out, "\t\t\t\t<SegmentTemplate timescale=\"%d\" ", timescale);
  596. if (!c->use_timeline) {
  597. avio_printf(out, "duration=\"%"PRId64"\" ", os->seg_duration);
  598. if (c->streaming && os->availability_time_offset)
  599. avio_printf(out, "availabilityTimeOffset=\"%.3f\" ",
  600. os->availability_time_offset);
  601. }
  602. if (c->streaming && os->availability_time_offset && !final)
  603. avio_printf(out, "availabilityTimeComplete=\"false\" ");
  604. avio_printf(out, "initialization=\"%s\" media=\"%s\" startNumber=\"%d\"", os->init_seg_name, os->media_seg_name, c->use_timeline ? start_number : 1);
  605. if (c->presentation_time_offset)
  606. avio_printf(out, " presentationTimeOffset=\"%"PRId64"\"", c->presentation_time_offset);
  607. avio_printf(out, ">\n");
  608. if (c->use_timeline) {
  609. int64_t cur_time = 0;
  610. avio_printf(out, "\t\t\t\t\t<SegmentTimeline>\n");
  611. for (i = start_index; i < os->nb_segments; ) {
  612. Segment *seg = os->segments[i];
  613. int repeat = 0;
  614. avio_printf(out, "\t\t\t\t\t\t<S ");
  615. if (i == start_index || seg->time != cur_time) {
  616. cur_time = seg->time;
  617. avio_printf(out, "t=\"%"PRId64"\" ", seg->time);
  618. }
  619. avio_printf(out, "d=\"%"PRId64"\" ", seg->duration);
  620. while (i + repeat + 1 < os->nb_segments &&
  621. os->segments[i + repeat + 1]->duration == seg->duration &&
  622. os->segments[i + repeat + 1]->time == os->segments[i + repeat]->time + os->segments[i + repeat]->duration)
  623. repeat++;
  624. if (repeat > 0)
  625. avio_printf(out, "r=\"%d\" ", repeat);
  626. avio_printf(out, "/>\n");
  627. i += 1 + repeat;
  628. cur_time += (1 + repeat) * seg->duration;
  629. }
  630. avio_printf(out, "\t\t\t\t\t</SegmentTimeline>\n");
  631. }
  632. avio_printf(out, "\t\t\t\t</SegmentTemplate>\n");
  633. } else if (c->single_file) {
  634. avio_printf(out, "\t\t\t\t<BaseURL>%s</BaseURL>\n", os->initfile);
  635. avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%"PRId64"\" startNumber=\"%d\">\n", AV_TIME_BASE, FFMIN(os->seg_duration, os->last_duration), start_number);
  636. avio_printf(out, "\t\t\t\t\t<Initialization range=\"%"PRId64"-%"PRId64"\" />\n", os->init_start_pos, os->init_start_pos + os->init_range_length - 1);
  637. for (i = start_index; i < os->nb_segments; i++) {
  638. Segment *seg = os->segments[i];
  639. avio_printf(out, "\t\t\t\t\t<SegmentURL mediaRange=\"%"PRId64"-%"PRId64"\" ", seg->start_pos, seg->start_pos + seg->range_length - 1);
  640. if (seg->index_length)
  641. avio_printf(out, "indexRange=\"%"PRId64"-%"PRId64"\" ", seg->start_pos, seg->start_pos + seg->index_length - 1);
  642. avio_printf(out, "/>\n");
  643. }
  644. avio_printf(out, "\t\t\t\t</SegmentList>\n");
  645. } else {
  646. avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%"PRId64"\" startNumber=\"%d\">\n", AV_TIME_BASE, FFMIN(os->seg_duration, os->last_duration), start_number);
  647. avio_printf(out, "\t\t\t\t\t<Initialization sourceURL=\"%s\" />\n", os->initfile);
  648. for (i = start_index; i < os->nb_segments; i++) {
  649. Segment *seg = os->segments[i];
  650. avio_printf(out, "\t\t\t\t\t<SegmentURL media=\"%s\" />\n", seg->file);
  651. }
  652. avio_printf(out, "\t\t\t\t</SegmentList>\n");
  653. }
  654. if (!c->lhls || final) {
  655. write_hls_media_playlist(os, s, representation_id, final, NULL);
  656. }
  657. }
  658. static char *xmlescape(const char *str) {
  659. int outlen = strlen(str)*3/2 + 6;
  660. char *out = av_realloc(NULL, outlen + 1);
  661. int pos = 0;
  662. if (!out)
  663. return NULL;
  664. for (; *str; str++) {
  665. if (pos + 6 > outlen) {
  666. char *tmp;
  667. outlen = 2 * outlen + 6;
  668. tmp = av_realloc(out, outlen + 1);
  669. if (!tmp) {
  670. av_free(out);
  671. return NULL;
  672. }
  673. out = tmp;
  674. }
  675. if (*str == '&') {
  676. memcpy(&out[pos], "&amp;", 5);
  677. pos += 5;
  678. } else if (*str == '<') {
  679. memcpy(&out[pos], "&lt;", 4);
  680. pos += 4;
  681. } else if (*str == '>') {
  682. memcpy(&out[pos], "&gt;", 4);
  683. pos += 4;
  684. } else if (*str == '\'') {
  685. memcpy(&out[pos], "&apos;", 6);
  686. pos += 6;
  687. } else if (*str == '\"') {
  688. memcpy(&out[pos], "&quot;", 6);
  689. pos += 6;
  690. } else {
  691. out[pos++] = *str;
  692. }
  693. }
  694. out[pos] = '\0';
  695. return out;
  696. }
  697. static void write_time(AVIOContext *out, int64_t time)
  698. {
  699. int seconds = time / AV_TIME_BASE;
  700. int fractions = time % AV_TIME_BASE;
  701. int minutes = seconds / 60;
  702. int hours = minutes / 60;
  703. seconds %= 60;
  704. minutes %= 60;
  705. avio_printf(out, "PT");
  706. if (hours)
  707. avio_printf(out, "%dH", hours);
  708. if (hours || minutes)
  709. avio_printf(out, "%dM", minutes);
  710. avio_printf(out, "%d.%dS", seconds, fractions / (AV_TIME_BASE / 10));
  711. }
  712. static void format_date(char *buf, int size, int64_t time_us)
  713. {
  714. struct tm *ptm, tmbuf;
  715. int64_t time_ms = time_us / 1000;
  716. const time_t time_s = time_ms / 1000;
  717. int millisec = time_ms - (time_s * 1000);
  718. ptm = gmtime_r(&time_s, &tmbuf);
  719. if (ptm) {
  720. int len;
  721. if (!strftime(buf, size, "%Y-%m-%dT%H:%M:%S", ptm)) {
  722. buf[0] = '\0';
  723. return;
  724. }
  725. len = strlen(buf);
  726. snprintf(buf + len, size - len, ".%03dZ", millisec);
  727. }
  728. }
  729. static int write_adaptation_set(AVFormatContext *s, AVIOContext *out, int as_index,
  730. int final)
  731. {
  732. DASHContext *c = s->priv_data;
  733. AdaptationSet *as = &c->as[as_index];
  734. AVDictionaryEntry *lang, *role;
  735. int i;
  736. avio_printf(out, "\t\t<AdaptationSet id=\"%d\" contentType=\"%s\" startWithSAP=\"1\" segmentAlignment=\"true\" bitstreamSwitching=\"true\"",
  737. as->id, as->media_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  738. if (as->media_type == AVMEDIA_TYPE_VIDEO && as->max_frame_rate.num && !as->ambiguous_frame_rate && av_cmp_q(as->min_frame_rate, as->max_frame_rate) < 0)
  739. avio_printf(out, " maxFrameRate=\"%d/%d\"", as->max_frame_rate.num, as->max_frame_rate.den);
  740. else if (as->media_type == AVMEDIA_TYPE_VIDEO && as->max_frame_rate.num && !as->ambiguous_frame_rate && !av_cmp_q(as->min_frame_rate, as->max_frame_rate))
  741. avio_printf(out, " frameRate=\"%d/%d\"", as->max_frame_rate.num, as->max_frame_rate.den);
  742. if (as->media_type == AVMEDIA_TYPE_VIDEO) {
  743. avio_printf(out, " maxWidth=\"%d\" maxHeight=\"%d\"", as->max_width, as->max_height);
  744. avio_printf(out, " par=\"%d:%d\"", as->par.num, as->par.den);
  745. }
  746. lang = av_dict_get(as->metadata, "language", NULL, 0);
  747. if (lang)
  748. avio_printf(out, " lang=\"%s\"", lang->value);
  749. avio_printf(out, ">\n");
  750. if (!final && c->ldash && as->max_frag_duration && !(c->profile & MPD_PROFILE_DVB))
  751. avio_printf(out, "\t\t\t<Resync dT=\"%"PRId64"\" type=\"0\"/>\n", as->max_frag_duration);
  752. if (as->trick_idx >= 0)
  753. avio_printf(out, "\t\t\t<EssentialProperty id=\"%d\" schemeIdUri=\"http://dashif.org/guidelines/trickmode\" value=\"%d\"/>\n", as->id, as->trick_idx);
  754. role = av_dict_get(as->metadata, "role", NULL, 0);
  755. if (role)
  756. avio_printf(out, "\t\t\t<Role schemeIdUri=\"urn:mpeg:dash:role:2011\" value=\"%s\"/>\n", role->value);
  757. if (as->descriptor)
  758. avio_printf(out, "\t\t\t%s\n", as->descriptor);
  759. for (i = 0; i < s->nb_streams; i++) {
  760. AVStream *st = s->streams[i];
  761. OutputStream *os = &c->streams[i];
  762. char bandwidth_str[64] = {'\0'};
  763. if (os->as_idx - 1 != as_index)
  764. continue;
  765. if (os->bit_rate > 0)
  766. snprintf(bandwidth_str, sizeof(bandwidth_str), " bandwidth=\"%d\"", os->bit_rate);
  767. else if (final) {
  768. int average_bit_rate = os->pos * 8 * AV_TIME_BASE / c->total_duration;
  769. snprintf(bandwidth_str, sizeof(bandwidth_str), " bandwidth=\"%d\"", average_bit_rate);
  770. } else if (os->first_segment_bit_rate > 0)
  771. snprintf(bandwidth_str, sizeof(bandwidth_str), " bandwidth=\"%d\"", os->first_segment_bit_rate);
  772. if (as->media_type == AVMEDIA_TYPE_VIDEO) {
  773. avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"video/%s\" codecs=\"%s\"%s width=\"%d\" height=\"%d\"",
  774. i, os->format_name, os->codec_str, bandwidth_str, s->streams[i]->codecpar->width, s->streams[i]->codecpar->height);
  775. if (st->codecpar->field_order == AV_FIELD_UNKNOWN)
  776. avio_printf(out, " scanType=\"unknown\"");
  777. else if (st->codecpar->field_order != AV_FIELD_PROGRESSIVE)
  778. avio_printf(out, " scanType=\"interlaced\"");
  779. avio_printf(out, " sar=\"%d:%d\"", os->sar.num, os->sar.den);
  780. if (st->avg_frame_rate.num && av_cmp_q(as->min_frame_rate, as->max_frame_rate) < 0)
  781. avio_printf(out, " frameRate=\"%d/%d\"", st->avg_frame_rate.num, st->avg_frame_rate.den);
  782. if (as->trick_idx >= 0) {
  783. AdaptationSet *tas = &c->as[as->trick_idx];
  784. if (!as->ambiguous_frame_rate && !tas->ambiguous_frame_rate)
  785. avio_printf(out, " maxPlayoutRate=\"%d\"", FFMAX((int)av_q2d(av_div_q(tas->min_frame_rate, as->min_frame_rate)), 1));
  786. }
  787. if (!os->coding_dependency)
  788. avio_printf(out, " codingDependency=\"false\"");
  789. avio_printf(out, ">\n");
  790. } else {
  791. avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"audio/%s\" codecs=\"%s\"%s audioSamplingRate=\"%d\">\n",
  792. i, os->format_name, os->codec_str, bandwidth_str, s->streams[i]->codecpar->sample_rate);
  793. avio_printf(out, "\t\t\t\t<AudioChannelConfiguration schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\"%d\" />\n",
  794. s->streams[i]->codecpar->channels);
  795. }
  796. if (!final && c->write_prft && os->producer_reference_time_str[0]) {
  797. avio_printf(out, "\t\t\t\t<ProducerReferenceTime id=\"%d\" inband=\"true\" type=\"%s\" wallClockTime=\"%s\" presentationTime=\"%"PRId64"\">\n",
  798. i, os->producer_reference_time.flags ? "captured" : "encoder", os->producer_reference_time_str, c->presentation_time_offset);
  799. avio_printf(out, "\t\t\t\t\t<UTCTiming schemeIdUri=\"urn:mpeg:dash:utc:http-xsdate:2014\" value=\"%s\"/>\n", c->utc_timing_url);
  800. avio_printf(out, "\t\t\t\t</ProducerReferenceTime>\n");
  801. }
  802. if (!final && c->ldash && os->gop_size && os->frag_type != FRAG_TYPE_NONE && !(c->profile & MPD_PROFILE_DVB) &&
  803. (os->frag_type != FRAG_TYPE_DURATION || os->frag_duration != os->seg_duration))
  804. avio_printf(out, "\t\t\t\t<Resync dT=\"%"PRId64"\" type=\"1\"/>\n", os->gop_size);
  805. output_segment_list(os, out, s, i, final);
  806. avio_printf(out, "\t\t\t</Representation>\n");
  807. }
  808. avio_printf(out, "\t\t</AdaptationSet>\n");
  809. return 0;
  810. }
  811. static int add_adaptation_set(AVFormatContext *s, AdaptationSet **as, enum AVMediaType type)
  812. {
  813. DASHContext *c = s->priv_data;
  814. void *mem;
  815. if (c->profile & MPD_PROFILE_DVB && (c->nb_as + 1) > 16) {
  816. av_log(s, AV_LOG_ERROR, "DVB-DASH profile allows a max of 16 Adaptation Sets\n");
  817. return AVERROR(EINVAL);
  818. }
  819. mem = av_realloc(c->as, sizeof(*c->as) * (c->nb_as + 1));
  820. if (!mem)
  821. return AVERROR(ENOMEM);
  822. c->as = mem;
  823. ++c->nb_as;
  824. *as = &c->as[c->nb_as - 1];
  825. memset(*as, 0, sizeof(**as));
  826. (*as)->media_type = type;
  827. (*as)->frag_type = -1;
  828. (*as)->trick_idx = -1;
  829. return 0;
  830. }
  831. static int adaptation_set_add_stream(AVFormatContext *s, int as_idx, int i)
  832. {
  833. DASHContext *c = s->priv_data;
  834. AdaptationSet *as = &c->as[as_idx - 1];
  835. OutputStream *os = &c->streams[i];
  836. if (as->media_type != s->streams[i]->codecpar->codec_type) {
  837. av_log(s, AV_LOG_ERROR, "Codec type of stream %d doesn't match AdaptationSet's media type\n", i);
  838. return AVERROR(EINVAL);
  839. } else if (os->as_idx) {
  840. av_log(s, AV_LOG_ERROR, "Stream %d is already assigned to an AdaptationSet\n", i);
  841. return AVERROR(EINVAL);
  842. }
  843. if (c->profile & MPD_PROFILE_DVB && (as->nb_streams + 1) > 16) {
  844. av_log(s, AV_LOG_ERROR, "DVB-DASH profile allows a max of 16 Representations per Adaptation Set\n");
  845. return AVERROR(EINVAL);
  846. }
  847. os->as_idx = as_idx;
  848. ++as->nb_streams;
  849. return 0;
  850. }
  851. static int parse_adaptation_sets(AVFormatContext *s)
  852. {
  853. DASHContext *c = s->priv_data;
  854. const char *p = c->adaptation_sets;
  855. enum { new_set, parse_default, parsing_streams, parse_seg_duration, parse_frag_duration } state;
  856. AdaptationSet *as;
  857. int i, n, ret;
  858. // default: one AdaptationSet for each stream
  859. if (!p) {
  860. for (i = 0; i < s->nb_streams; i++) {
  861. if ((ret = add_adaptation_set(s, &as, s->streams[i]->codecpar->codec_type)) < 0)
  862. return ret;
  863. as->id = i;
  864. c->streams[i].as_idx = c->nb_as;
  865. ++as->nb_streams;
  866. }
  867. goto end;
  868. }
  869. // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on
  870. // option id=0,descriptor=descriptor_str,streams=0,1,2 and so on
  871. // option id=0,seg_duration=2.5,frag_duration=0.5,streams=0,1,2
  872. // id=1,trick_id=0,seg_duration=10,frag_type=none,streams=3 and so on
  873. // descriptor is useful to the scheme defined by ISO/IEC 23009-1:2014/Amd.2:2015
  874. // descriptor_str should be a self-closing xml tag.
  875. // seg_duration and frag_duration have the same syntax as the global options of
  876. // the same name, and the former have precedence over them if set.
  877. state = new_set;
  878. while (*p) {
  879. if (*p == ' ') {
  880. p++;
  881. continue;
  882. } else if (state == new_set && av_strstart(p, "id=", &p)) {
  883. char id_str[10], *end_str;
  884. n = strcspn(p, ",");
  885. snprintf(id_str, sizeof(id_str), "%.*s", n, p);
  886. i = strtol(id_str, &end_str, 10);
  887. if (id_str == end_str || i < 0 || i > c->nb_as) {
  888. av_log(s, AV_LOG_ERROR, "\"%s\" is not a valid value for an AdaptationSet id\n", id_str);
  889. return AVERROR(EINVAL);
  890. }
  891. if ((ret = add_adaptation_set(s, &as, AVMEDIA_TYPE_UNKNOWN)) < 0)
  892. return ret;
  893. as->id = i;
  894. p += n;
  895. if (*p)
  896. p++;
  897. state = parse_default;
  898. } else if (state != new_set && av_strstart(p, "seg_duration=", &p)) {
  899. state = parse_seg_duration;
  900. } else if (state != new_set && av_strstart(p, "frag_duration=", &p)) {
  901. state = parse_frag_duration;
  902. } else if (state == parse_seg_duration || state == parse_frag_duration) {
  903. char str[32];
  904. int64_t usecs = 0;
  905. n = strcspn(p, ",");
  906. snprintf(str, sizeof(str), "%.*s", n, p);
  907. p += n;
  908. if (*p)
  909. p++;
  910. ret = av_parse_time(&usecs, str, 1);
  911. if (ret < 0) {
  912. av_log(s, AV_LOG_ERROR, "Unable to parse option value \"%s\" as duration\n", str);
  913. return ret;
  914. }
  915. if (state == parse_seg_duration)
  916. as->seg_duration = usecs;
  917. else
  918. as->frag_duration = usecs;
  919. state = parse_default;
  920. } else if (state != new_set && av_strstart(p, "frag_type=", &p)) {
  921. char type_str[16];
  922. n = strcspn(p, ",");
  923. snprintf(type_str, sizeof(type_str), "%.*s", n, p);
  924. p += n;
  925. if (*p)
  926. p++;
  927. if (!strcmp(type_str, "duration"))
  928. as->frag_type = FRAG_TYPE_DURATION;
  929. else if (!strcmp(type_str, "pframes"))
  930. as->frag_type = FRAG_TYPE_PFRAMES;
  931. else if (!strcmp(type_str, "every_frame"))
  932. as->frag_type = FRAG_TYPE_EVERY_FRAME;
  933. else if (!strcmp(type_str, "none"))
  934. as->frag_type = FRAG_TYPE_NONE;
  935. else {
  936. av_log(s, AV_LOG_ERROR, "Unable to parse option value \"%s\" as fragment type\n", type_str);
  937. return ret;
  938. }
  939. state = parse_default;
  940. } else if (state != new_set && av_strstart(p, "descriptor=", &p)) {
  941. n = strcspn(p, ">") + 1; //followed by one comma, so plus 1
  942. if (n < strlen(p)) {
  943. as->descriptor = av_strndup(p, n);
  944. } else {
  945. av_log(s, AV_LOG_ERROR, "Parse error, descriptor string should be a self-closing xml tag\n");
  946. return AVERROR(EINVAL);
  947. }
  948. p += n;
  949. if (*p)
  950. p++;
  951. state = parse_default;
  952. } else if ((state != new_set) && av_strstart(p, "trick_id=", &p)) {
  953. char trick_id_str[10], *end_str;
  954. n = strcspn(p, ",");
  955. snprintf(trick_id_str, sizeof(trick_id_str), "%.*s", n, p);
  956. p += n;
  957. as->trick_idx = strtol(trick_id_str, &end_str, 10);
  958. if (trick_id_str == end_str || as->trick_idx < 0)
  959. return AVERROR(EINVAL);
  960. if (*p)
  961. p++;
  962. state = parse_default;
  963. } else if ((state != new_set) && av_strstart(p, "streams=", &p)) { //descriptor and durations are optional
  964. state = parsing_streams;
  965. } else if (state == parsing_streams) {
  966. AdaptationSet *as = &c->as[c->nb_as - 1];
  967. char idx_str[8], *end_str;
  968. n = strcspn(p, " ,");
  969. snprintf(idx_str, sizeof(idx_str), "%.*s", n, p);
  970. p += n;
  971. // if value is "a" or "v", map all streams of that type
  972. if (as->media_type == AVMEDIA_TYPE_UNKNOWN && (idx_str[0] == 'v' || idx_str[0] == 'a')) {
  973. enum AVMediaType type = (idx_str[0] == 'v') ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO;
  974. av_log(s, AV_LOG_DEBUG, "Map all streams of type %s\n", idx_str);
  975. for (i = 0; i < s->nb_streams; i++) {
  976. if (s->streams[i]->codecpar->codec_type != type)
  977. continue;
  978. as->media_type = s->streams[i]->codecpar->codec_type;
  979. if ((ret = adaptation_set_add_stream(s, c->nb_as, i)) < 0)
  980. return ret;
  981. }
  982. } else { // select single stream
  983. i = strtol(idx_str, &end_str, 10);
  984. if (idx_str == end_str || i < 0 || i >= s->nb_streams) {
  985. av_log(s, AV_LOG_ERROR, "Selected stream \"%s\" not found!\n", idx_str);
  986. return AVERROR(EINVAL);
  987. }
  988. av_log(s, AV_LOG_DEBUG, "Map stream %d\n", i);
  989. if (as->media_type == AVMEDIA_TYPE_UNKNOWN) {
  990. as->media_type = s->streams[i]->codecpar->codec_type;
  991. }
  992. if ((ret = adaptation_set_add_stream(s, c->nb_as, i)) < 0)
  993. return ret;
  994. }
  995. if (*p == ' ')
  996. state = new_set;
  997. if (*p)
  998. p++;
  999. } else {
  1000. return AVERROR(EINVAL);
  1001. }
  1002. }
  1003. end:
  1004. // check for unassigned streams
  1005. for (i = 0; i < s->nb_streams; i++) {
  1006. OutputStream *os = &c->streams[i];
  1007. if (!os->as_idx) {
  1008. av_log(s, AV_LOG_ERROR, "Stream %d is not mapped to an AdaptationSet\n", i);
  1009. return AVERROR(EINVAL);
  1010. }
  1011. }
  1012. // check references for trick mode AdaptationSet
  1013. for (i = 0; i < c->nb_as; i++) {
  1014. as = &c->as[i];
  1015. if (as->trick_idx < 0)
  1016. continue;
  1017. for (n = 0; n < c->nb_as; n++) {
  1018. if (c->as[n].id == as->trick_idx)
  1019. break;
  1020. }
  1021. if (n >= c->nb_as) {
  1022. av_log(s, AV_LOG_ERROR, "reference AdaptationSet id \"%d\" not found for trick mode AdaptationSet id \"%d\"\n", as->trick_idx, as->id);
  1023. return AVERROR(EINVAL);
  1024. }
  1025. }
  1026. return 0;
  1027. }
  1028. static int write_manifest(AVFormatContext *s, int final)
  1029. {
  1030. DASHContext *c = s->priv_data;
  1031. AVIOContext *out;
  1032. char temp_filename[1024];
  1033. int ret, i;
  1034. const char *proto = avio_find_protocol_name(s->url);
  1035. int use_rename = proto && !strcmp(proto, "file");
  1036. static unsigned int warned_non_file = 0;
  1037. AVDictionaryEntry *title = av_dict_get(s->metadata, "title", NULL, 0);
  1038. AVDictionary *opts = NULL;
  1039. if (!use_rename && !warned_non_file++)
  1040. av_log(s, AV_LOG_ERROR, "Cannot use rename on non file protocol, this may lead to races and temporary partial files\n");
  1041. snprintf(temp_filename, sizeof(temp_filename), use_rename ? "%s.tmp" : "%s", s->url);
  1042. set_http_options(&opts, c);
  1043. ret = dashenc_io_open(s, &c->mpd_out, temp_filename, &opts);
  1044. av_dict_free(&opts);
  1045. if (ret < 0) {
  1046. return handle_io_open_error(s, ret, temp_filename);
  1047. }
  1048. out = c->mpd_out;
  1049. avio_printf(out, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
  1050. avio_printf(out, "<MPD xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
  1051. "\txmlns=\"urn:mpeg:dash:schema:mpd:2011\"\n"
  1052. "\txmlns:xlink=\"http://www.w3.org/1999/xlink\"\n"
  1053. "\txsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd\"\n"
  1054. "\tprofiles=\"");
  1055. if (c->profile & MPD_PROFILE_DASH)
  1056. avio_printf(out, "%s%s", "urn:mpeg:dash:profile:isoff-live:2011", c->profile & MPD_PROFILE_DVB ? "," : "\"\n");
  1057. if (c->profile & MPD_PROFILE_DVB)
  1058. avio_printf(out, "%s", "urn:dvb:dash:profile:dvb-dash:2014\"\n");
  1059. avio_printf(out, "\ttype=\"%s\"\n",
  1060. final ? "static" : "dynamic");
  1061. if (final) {
  1062. avio_printf(out, "\tmediaPresentationDuration=\"");
  1063. write_time(out, c->total_duration);
  1064. avio_printf(out, "\"\n");
  1065. } else {
  1066. int64_t update_period = c->last_duration / AV_TIME_BASE;
  1067. char now_str[100];
  1068. if (c->use_template && !c->use_timeline)
  1069. update_period = 500;
  1070. if (c->update_period)
  1071. update_period = c->update_period;
  1072. avio_printf(out, "\tminimumUpdatePeriod=\"PT%"PRId64"S\"\n", update_period);
  1073. if (!c->ldash)
  1074. avio_printf(out, "\tsuggestedPresentationDelay=\"PT%"PRId64"S\"\n", c->last_duration / AV_TIME_BASE);
  1075. if (c->availability_start_time[0])
  1076. avio_printf(out, "\tavailabilityStartTime=\"%s\"\n", c->availability_start_time);
  1077. format_date(now_str, sizeof(now_str), av_gettime());
  1078. if (now_str[0])
  1079. avio_printf(out, "\tpublishTime=\"%s\"\n", now_str);
  1080. if (c->window_size && c->use_template) {
  1081. avio_printf(out, "\ttimeShiftBufferDepth=\"");
  1082. write_time(out, c->last_duration * c->window_size);
  1083. avio_printf(out, "\"\n");
  1084. }
  1085. }
  1086. avio_printf(out, "\tmaxSegmentDuration=\"");
  1087. write_time(out, c->max_segment_duration);
  1088. avio_printf(out, "\"\n");
  1089. avio_printf(out, "\tminBufferTime=\"");
  1090. write_time(out, c->ldash && c->max_gop_size ? c->max_gop_size : c->last_duration * 2);
  1091. avio_printf(out, "\">\n");
  1092. avio_printf(out, "\t<ProgramInformation>\n");
  1093. if (title) {
  1094. char *escaped = xmlescape(title->value);
  1095. avio_printf(out, "\t\t<Title>%s</Title>\n", escaped);
  1096. av_free(escaped);
  1097. }
  1098. avio_printf(out, "\t</ProgramInformation>\n");
  1099. avio_printf(out, "\t<ServiceDescription id=\"0\">\n");
  1100. if (!final && c->target_latency && c->target_latency_refid >= 0) {
  1101. avio_printf(out, "\t\t<Latency target=\"%"PRId64"\"", c->target_latency / 1000);
  1102. if (s->nb_streams > 1)
  1103. avio_printf(out, " referenceId=\"%d\"", c->target_latency_refid);
  1104. avio_printf(out, "/>\n");
  1105. }
  1106. if (av_cmp_q(c->min_playback_rate, (AVRational) {1, 1}) ||
  1107. av_cmp_q(c->max_playback_rate, (AVRational) {1, 1}))
  1108. avio_printf(out, "\t\t<PlaybackRate min=\"%.2f\" max=\"%.2f\"/>\n",
  1109. av_q2d(c->min_playback_rate), av_q2d(c->max_playback_rate));
  1110. avio_printf(out, "\t</ServiceDescription>\n");
  1111. if (c->window_size && s->nb_streams > 0 && c->streams[0].nb_segments > 0 && !c->use_template) {
  1112. OutputStream *os = &c->streams[0];
  1113. int start_index = FFMAX(os->nb_segments - c->window_size, 0);
  1114. int64_t start_time = av_rescale_q(os->segments[start_index]->time, s->streams[0]->time_base, AV_TIME_BASE_Q);
  1115. avio_printf(out, "\t<Period id=\"0\" start=\"");
  1116. write_time(out, start_time);
  1117. avio_printf(out, "\">\n");
  1118. } else {
  1119. avio_printf(out, "\t<Period id=\"0\" start=\"PT0.0S\">\n");
  1120. }
  1121. for (i = 0; i < c->nb_as; i++) {
  1122. if ((ret = write_adaptation_set(s, out, i, final)) < 0)
  1123. return ret;
  1124. }
  1125. avio_printf(out, "\t</Period>\n");
  1126. if (c->utc_timing_url)
  1127. avio_printf(out, "\t<UTCTiming schemeIdUri=\"urn:mpeg:dash:utc:http-xsdate:2014\" value=\"%s\"/>\n", c->utc_timing_url);
  1128. avio_printf(out, "</MPD>\n");
  1129. avio_flush(out);
  1130. dashenc_io_close(s, &c->mpd_out, temp_filename);
  1131. if (use_rename) {
  1132. if ((ret = ff_rename(temp_filename, s->url, s)) < 0)
  1133. return ret;
  1134. }
  1135. if (c->hls_playlist) {
  1136. char filename_hls[1024];
  1137. const char *audio_group = "A1";
  1138. char audio_codec_str[128] = "\0";
  1139. int is_default = 1;
  1140. int max_audio_bitrate = 0;
  1141. // Publish master playlist only the configured rate
  1142. if (c->master_playlist_created && (!c->master_publish_rate ||
  1143. c->streams[0].segment_index % c->master_publish_rate))
  1144. return 0;
  1145. if (*c->dirname)
  1146. snprintf(filename_hls, sizeof(filename_hls), "%s%s", c->dirname, c->hls_master_name);
  1147. else
  1148. snprintf(filename_hls, sizeof(filename_hls), "%s", c->hls_master_name);
  1149. snprintf(temp_filename, sizeof(temp_filename), use_rename ? "%s.tmp" : "%s", filename_hls);
  1150. set_http_options(&opts, c);
  1151. ret = dashenc_io_open(s, &c->m3u8_out, temp_filename, &opts);
  1152. av_dict_free(&opts);
  1153. if (ret < 0) {
  1154. return handle_io_open_error(s, ret, temp_filename);
  1155. }
  1156. ff_hls_write_playlist_version(c->m3u8_out, 7);
  1157. for (i = 0; i < s->nb_streams; i++) {
  1158. char playlist_file[64];
  1159. AVStream *st = s->streams[i];
  1160. OutputStream *os = &c->streams[i];
  1161. if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
  1162. continue;
  1163. if (os->segment_type != SEGMENT_TYPE_MP4)
  1164. continue;
  1165. get_hls_playlist_name(playlist_file, sizeof(playlist_file), NULL, i);
  1166. ff_hls_write_audio_rendition(c->m3u8_out, (char *)audio_group,
  1167. playlist_file, NULL, i, is_default);
  1168. max_audio_bitrate = FFMAX(st->codecpar->bit_rate +
  1169. os->muxer_overhead, max_audio_bitrate);
  1170. if (!av_strnstr(audio_codec_str, os->codec_str, sizeof(audio_codec_str))) {
  1171. if (strlen(audio_codec_str))
  1172. av_strlcat(audio_codec_str, ",", sizeof(audio_codec_str));
  1173. av_strlcat(audio_codec_str, os->codec_str, sizeof(audio_codec_str));
  1174. }
  1175. is_default = 0;
  1176. }
  1177. for (i = 0; i < s->nb_streams; i++) {
  1178. char playlist_file[64];
  1179. char codec_str[128];
  1180. AVStream *st = s->streams[i];
  1181. OutputStream *os = &c->streams[i];
  1182. char *agroup = NULL;
  1183. char *codec_str_ptr = NULL;
  1184. int stream_bitrate = os->muxer_overhead;
  1185. if (os->bit_rate > 0)
  1186. stream_bitrate += os->bit_rate;
  1187. else if (final)
  1188. stream_bitrate += os->pos * 8 * AV_TIME_BASE / c->total_duration;
  1189. else if (os->first_segment_bit_rate > 0)
  1190. stream_bitrate += os->first_segment_bit_rate;
  1191. if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
  1192. continue;
  1193. if (os->segment_type != SEGMENT_TYPE_MP4)
  1194. continue;
  1195. av_strlcpy(codec_str, os->codec_str, sizeof(codec_str));
  1196. if (max_audio_bitrate) {
  1197. agroup = (char *)audio_group;
  1198. stream_bitrate += max_audio_bitrate;
  1199. av_strlcat(codec_str, ",", sizeof(codec_str));
  1200. av_strlcat(codec_str, audio_codec_str, sizeof(codec_str));
  1201. }
  1202. if (st->codecpar->codec_id != AV_CODEC_ID_HEVC) {
  1203. codec_str_ptr = codec_str;
  1204. }
  1205. get_hls_playlist_name(playlist_file, sizeof(playlist_file), NULL, i);
  1206. ff_hls_write_stream_info(st, c->m3u8_out, stream_bitrate,
  1207. playlist_file, agroup,
  1208. codec_str_ptr, NULL, NULL);
  1209. }
  1210. dashenc_io_close(s, &c->m3u8_out, temp_filename);
  1211. if (use_rename)
  1212. if ((ret = ff_rename(temp_filename, filename_hls, s)) < 0)
  1213. return ret;
  1214. c->master_playlist_created = 1;
  1215. }
  1216. return 0;
  1217. }
  1218. static int dict_copy_entry(AVDictionary **dst, const AVDictionary *src, const char *key)
  1219. {
  1220. AVDictionaryEntry *entry = av_dict_get(src, key, NULL, 0);
  1221. if (entry)
  1222. av_dict_set(dst, key, entry->value, AV_DICT_DONT_OVERWRITE);
  1223. return 0;
  1224. }
  1225. static int dash_init(AVFormatContext *s)
  1226. {
  1227. DASHContext *c = s->priv_data;
  1228. int ret = 0, i;
  1229. char *ptr;
  1230. char basename[1024];
  1231. c->nr_of_streams_to_flush = 0;
  1232. if (c->single_file_name)
  1233. c->single_file = 1;
  1234. if (c->single_file)
  1235. c->use_template = 0;
  1236. if (!c->profile) {
  1237. av_log(s, AV_LOG_ERROR, "At least one profile must be enabled.\n");
  1238. return AVERROR(EINVAL);
  1239. }
  1240. #if FF_API_DASH_MIN_SEG_DURATION
  1241. if (c->min_seg_duration != 5000000) {
  1242. av_log(s, AV_LOG_WARNING, "The min_seg_duration option is deprecated and will be removed. Please use the -seg_duration\n");
  1243. c->seg_duration = c->min_seg_duration;
  1244. }
  1245. #endif
  1246. if (c->lhls && s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  1247. av_log(s, AV_LOG_ERROR,
  1248. "LHLS is experimental, Please set -strict experimental in order to enable it.\n");
  1249. return AVERROR_EXPERIMENTAL;
  1250. }
  1251. if (c->lhls && !c->streaming) {
  1252. av_log(s, AV_LOG_WARNING, "LHLS option will be ignored as streaming is not enabled\n");
  1253. c->lhls = 0;
  1254. }
  1255. if (c->lhls && !c->hls_playlist) {
  1256. av_log(s, AV_LOG_WARNING, "LHLS option will be ignored as hls_playlist is not enabled\n");
  1257. c->lhls = 0;
  1258. }
  1259. if (c->ldash && !c->streaming) {
  1260. av_log(s, AV_LOG_WARNING, "LDash option will be ignored as streaming is not enabled\n");
  1261. c->ldash = 0;
  1262. }
  1263. if (c->target_latency && !c->streaming) {
  1264. av_log(s, AV_LOG_WARNING, "Target latency option will be ignored as streaming is not enabled\n");
  1265. c->target_latency = 0;
  1266. }
  1267. if (c->global_sidx && !c->single_file) {
  1268. av_log(s, AV_LOG_WARNING, "Global SIDX option will be ignored as single_file is not enabled\n");
  1269. c->global_sidx = 0;
  1270. }
  1271. if (c->global_sidx && c->streaming) {
  1272. av_log(s, AV_LOG_WARNING, "Global SIDX option will be ignored as streaming is enabled\n");
  1273. c->global_sidx = 0;
  1274. }
  1275. if (c->frag_type == FRAG_TYPE_NONE && c->streaming) {
  1276. av_log(s, AV_LOG_VERBOSE, "Changing frag_type from none to every_frame as streaming is enabled\n");
  1277. c->frag_type = FRAG_TYPE_EVERY_FRAME;
  1278. }
  1279. if (c->write_prft < 0) {
  1280. c->write_prft = c->ldash;
  1281. if (c->ldash)
  1282. av_log(s, AV_LOG_VERBOSE, "Enabling Producer Reference Time element for Low Latency mode\n");
  1283. }
  1284. if (c->write_prft && !c->utc_timing_url) {
  1285. av_log(s, AV_LOG_WARNING, "Producer Reference Time element option will be ignored as utc_timing_url is not set\n");
  1286. c->write_prft = 0;
  1287. }
  1288. if (c->write_prft && !c->streaming) {
  1289. av_log(s, AV_LOG_WARNING, "Producer Reference Time element option will be ignored as streaming is not enabled\n");
  1290. c->write_prft = 0;
  1291. }
  1292. if (c->ldash && !c->write_prft) {
  1293. av_log(s, AV_LOG_WARNING, "Low Latency mode enabled without Producer Reference Time element option! Resulting manifest may not be complaint\n");
  1294. }
  1295. if (c->target_latency && !c->write_prft) {
  1296. av_log(s, AV_LOG_WARNING, "Target latency option will be ignored as Producer Reference Time element will not be written\n");
  1297. c->target_latency = 0;
  1298. }
  1299. if (av_cmp_q(c->max_playback_rate, c->min_playback_rate) < 0) {
  1300. av_log(s, AV_LOG_WARNING, "Minimum playback rate value is higer than the Maximum. Both will be ignored\n");
  1301. c->min_playback_rate = c->max_playback_rate = (AVRational) {1, 1};
  1302. }
  1303. av_strlcpy(c->dirname, s->url, sizeof(c->dirname));
  1304. ptr = strrchr(c->dirname, '/');
  1305. if (ptr) {
  1306. av_strlcpy(basename, &ptr[1], sizeof(basename));
  1307. ptr[1] = '\0';
  1308. } else {
  1309. c->dirname[0] = '\0';
  1310. av_strlcpy(basename, s->url, sizeof(basename));
  1311. }
  1312. ptr = strrchr(basename, '.');
  1313. if (ptr)
  1314. *ptr = '\0';
  1315. c->streams = av_mallocz(sizeof(*c->streams) * s->nb_streams);
  1316. if (!c->streams)
  1317. return AVERROR(ENOMEM);
  1318. if ((ret = parse_adaptation_sets(s)) < 0)
  1319. return ret;
  1320. if ((ret = init_segment_types(s)) < 0)
  1321. return ret;
  1322. for (i = 0; i < s->nb_streams; i++) {
  1323. OutputStream *os = &c->streams[i];
  1324. AdaptationSet *as = &c->as[os->as_idx - 1];
  1325. AVFormatContext *ctx;
  1326. AVStream *st;
  1327. AVDictionary *opts = NULL;
  1328. char filename[1024];
  1329. os->bit_rate = s->streams[i]->codecpar->bit_rate;
  1330. if (!os->bit_rate) {
  1331. int level = s->strict_std_compliance >= FF_COMPLIANCE_STRICT ?
  1332. AV_LOG_ERROR : AV_LOG_WARNING;
  1333. av_log(s, level, "No bit rate set for stream %d\n", i);
  1334. if (s->strict_std_compliance >= FF_COMPLIANCE_STRICT)
  1335. return AVERROR(EINVAL);
  1336. }
  1337. // copy AdaptationSet language and role from stream metadata
  1338. dict_copy_entry(&as->metadata, s->streams[i]->metadata, "language");
  1339. dict_copy_entry(&as->metadata, s->streams[i]->metadata, "role");
  1340. if (c->init_seg_name) {
  1341. os->init_seg_name = av_strireplace(c->init_seg_name, "$ext$", os->extension_name);
  1342. if (!os->init_seg_name)
  1343. return AVERROR(ENOMEM);
  1344. }
  1345. if (c->media_seg_name) {
  1346. os->media_seg_name = av_strireplace(c->media_seg_name, "$ext$", os->extension_name);
  1347. if (!os->media_seg_name)
  1348. return AVERROR(ENOMEM);
  1349. }
  1350. if (c->single_file_name) {
  1351. os->single_file_name = av_strireplace(c->single_file_name, "$ext$", os->extension_name);
  1352. if (!os->single_file_name)
  1353. return AVERROR(ENOMEM);
  1354. }
  1355. if (os->segment_type == SEGMENT_TYPE_WEBM) {
  1356. if ((!c->single_file && check_file_extension(os->init_seg_name, os->format_name) != 0) ||
  1357. (!c->single_file && check_file_extension(os->media_seg_name, os->format_name) != 0) ||
  1358. (c->single_file && check_file_extension(os->single_file_name, os->format_name) != 0)) {
  1359. av_log(s, AV_LOG_WARNING,
  1360. "One or many segment file names doesn't end with .webm. "
  1361. "Override -init_seg_name and/or -media_seg_name and/or "
  1362. "-single_file_name to end with the extension .webm\n");
  1363. }
  1364. if (c->streaming) {
  1365. // Streaming not supported as matroskaenc buffers internally before writing the output
  1366. av_log(s, AV_LOG_WARNING, "One or more streams in WebM output format. Streaming option will be ignored\n");
  1367. c->streaming = 0;
  1368. }
  1369. }
  1370. os->ctx = ctx = avformat_alloc_context();
  1371. if (!ctx)
  1372. return AVERROR(ENOMEM);
  1373. ctx->oformat = av_guess_format(os->format_name, NULL, NULL);
  1374. if (!ctx->oformat)
  1375. return AVERROR_MUXER_NOT_FOUND;
  1376. ctx->interrupt_callback = s->interrupt_callback;
  1377. ctx->opaque = s->opaque;
  1378. ctx->io_close = s->io_close;
  1379. ctx->io_open = s->io_open;
  1380. ctx->strict_std_compliance = s->strict_std_compliance;
  1381. if (!(st = avformat_new_stream(ctx, NULL)))
  1382. return AVERROR(ENOMEM);
  1383. avcodec_parameters_copy(st->codecpar, s->streams[i]->codecpar);
  1384. st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio;
  1385. st->time_base = s->streams[i]->time_base;
  1386. st->avg_frame_rate = s->streams[i]->avg_frame_rate;
  1387. ctx->avoid_negative_ts = s->avoid_negative_ts;
  1388. ctx->flags = s->flags;
  1389. os->parser = av_parser_init(st->codecpar->codec_id);
  1390. if (os->parser) {
  1391. os->parser_avctx = avcodec_alloc_context3(NULL);
  1392. if (!os->parser_avctx)
  1393. return AVERROR(ENOMEM);
  1394. ret = avcodec_parameters_to_context(os->parser_avctx, st->codecpar);
  1395. if (ret < 0)
  1396. return ret;
  1397. // We only want to parse frame headers
  1398. os->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
  1399. }
  1400. if (c->single_file) {
  1401. if (os->single_file_name)
  1402. ff_dash_fill_tmpl_params(os->initfile, sizeof(os->initfile), os->single_file_name, i, 0, os->bit_rate, 0);
  1403. else
  1404. snprintf(os->initfile, sizeof(os->initfile), "%s-stream%d.%s", basename, i, os->format_name);
  1405. } else {
  1406. ff_dash_fill_tmpl_params(os->initfile, sizeof(os->initfile), os->init_seg_name, i, 0, os->bit_rate, 0);
  1407. }
  1408. snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->initfile);
  1409. set_http_options(&opts, c);
  1410. if (!c->single_file) {
  1411. if ((ret = avio_open_dyn_buf(&ctx->pb)) < 0)
  1412. return ret;
  1413. ret = s->io_open(s, &os->out, filename, AVIO_FLAG_WRITE, &opts);
  1414. } else {
  1415. ctx->url = av_strdup(filename);
  1416. ret = avio_open2(&ctx->pb, filename, AVIO_FLAG_WRITE, NULL, &opts);
  1417. }
  1418. av_dict_free(&opts);
  1419. if (ret < 0)
  1420. return ret;
  1421. os->init_start_pos = 0;
  1422. av_dict_copy(&opts, c->format_options, 0);
  1423. if (!as->seg_duration)
  1424. as->seg_duration = c->seg_duration;
  1425. if (!as->frag_duration)
  1426. as->frag_duration = c->frag_duration;
  1427. if (as->frag_type < 0)
  1428. as->frag_type = c->frag_type;
  1429. os->seg_duration = as->seg_duration;
  1430. os->frag_duration = as->frag_duration;
  1431. os->frag_type = as->frag_type;
  1432. c->max_segment_duration = FFMAX(c->max_segment_duration, as->seg_duration);
  1433. if (c->profile & MPD_PROFILE_DVB && (os->seg_duration > 15000000 || os->seg_duration < 960000)) {
  1434. av_log(s, AV_LOG_ERROR, "Segment duration %"PRId64" is outside the allowed range for DVB-DASH profile\n", os->seg_duration);
  1435. return AVERROR(EINVAL);
  1436. }
  1437. if (os->frag_type == FRAG_TYPE_DURATION && !os->frag_duration) {
  1438. av_log(s, AV_LOG_WARNING, "frag_type set to duration for stream %d but no frag_duration set\n", i);
  1439. os->frag_type = c->streaming ? FRAG_TYPE_EVERY_FRAME : FRAG_TYPE_NONE;
  1440. }
  1441. if (os->frag_type == FRAG_TYPE_DURATION && os->frag_duration > os->seg_duration) {
  1442. av_log(s, AV_LOG_ERROR, "Fragment duration %"PRId64" is longer than Segment duration %"PRId64"\n", os->frag_duration, os->seg_duration);
  1443. return AVERROR(EINVAL);
  1444. }
  1445. if (os->frag_type == FRAG_TYPE_PFRAMES && (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO || !os->parser)) {
  1446. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && !os->parser)
  1447. av_log(s, AV_LOG_WARNING, "frag_type set to P-Frame reordering, but no parser found for stream %d\n", i);
  1448. os->frag_type = c->streaming ? FRAG_TYPE_EVERY_FRAME : FRAG_TYPE_NONE;
  1449. }
  1450. if (os->frag_type != FRAG_TYPE_PFRAMES && as->trick_idx < 0)
  1451. // Set this now if a parser isn't used
  1452. os->coding_dependency = 1;
  1453. if (os->segment_type == SEGMENT_TYPE_MP4) {
  1454. if (c->streaming)
  1455. // skip_sidx : Reduce bitrate overhead
  1456. // skip_trailer : Avoids growing memory usage with time
  1457. av_dict_set(&opts, "movflags", "+dash+delay_moov+skip_sidx+skip_trailer", AV_DICT_APPEND);
  1458. else {
  1459. if (c->global_sidx)
  1460. av_dict_set(&opts, "movflags", "+dash+delay_moov+global_sidx+skip_trailer", AV_DICT_APPEND);
  1461. else
  1462. av_dict_set(&opts, "movflags", "+dash+delay_moov+skip_trailer", AV_DICT_APPEND);
  1463. }
  1464. if (os->frag_type == FRAG_TYPE_EVERY_FRAME)
  1465. av_dict_set(&opts, "movflags", "+frag_every_frame", AV_DICT_APPEND);
  1466. else
  1467. av_dict_set(&opts, "movflags", "+frag_custom", AV_DICT_APPEND);
  1468. if (os->frag_type == FRAG_TYPE_DURATION)
  1469. av_dict_set_int(&opts, "frag_duration", os->frag_duration, 0);
  1470. if (c->write_prft)
  1471. av_dict_set(&opts, "write_prft", "wallclock", 0);
  1472. } else {
  1473. av_dict_set_int(&opts, "cluster_time_limit", c->seg_duration / 1000, 0);
  1474. av_dict_set_int(&opts, "cluster_size_limit", 5 * 1024 * 1024, 0); // set a large cluster size limit
  1475. av_dict_set_int(&opts, "dash", 1, 0);
  1476. av_dict_set_int(&opts, "dash_track_number", i + 1, 0);
  1477. av_dict_set_int(&opts, "live", 1, 0);
  1478. }
  1479. ret = avformat_init_output(ctx, &opts);
  1480. av_dict_free(&opts);
  1481. if (ret < 0)
  1482. return ret;
  1483. os->ctx_inited = 1;
  1484. avio_flush(ctx->pb);
  1485. av_log(s, AV_LOG_VERBOSE, "Representation %d init segment will be written to: %s\n", i, filename);
  1486. s->streams[i]->time_base = st->time_base;
  1487. // If the muxer wants to shift timestamps, request to have them shifted
  1488. // already before being handed to this muxer, so we don't have mismatches
  1489. // between the MPD and the actual segments.
  1490. s->avoid_negative_ts = ctx->avoid_negative_ts;
  1491. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  1492. AVRational avg_frame_rate = s->streams[i]->avg_frame_rate;
  1493. AVRational par;
  1494. if (avg_frame_rate.num > 0) {
  1495. if (av_cmp_q(avg_frame_rate, as->min_frame_rate) < 0)
  1496. as->min_frame_rate = avg_frame_rate;
  1497. if (av_cmp_q(as->max_frame_rate, avg_frame_rate) < 0)
  1498. as->max_frame_rate = avg_frame_rate;
  1499. } else {
  1500. as->ambiguous_frame_rate = 1;
  1501. }
  1502. if (st->codecpar->width > as->max_width)
  1503. as->max_width = st->codecpar->width;
  1504. if (st->codecpar->height > as->max_height)
  1505. as->max_height = st->codecpar->height;
  1506. if (st->sample_aspect_ratio.num)
  1507. os->sar = st->sample_aspect_ratio;
  1508. else
  1509. os->sar = (AVRational){1,1};
  1510. av_reduce(&par.num, &par.den,
  1511. st->codecpar->width * (int64_t)os->sar.num,
  1512. st->codecpar->height * (int64_t)os->sar.den,
  1513. 1024 * 1024);
  1514. if (as->par.num && av_cmp_q(par, as->par)) {
  1515. av_log(s, AV_LOG_ERROR, "Conflicting stream aspect ratios values in Adaptation Set %d. Please ensure all adaptation sets have the same aspect ratio\n", os->as_idx);
  1516. return AVERROR(EINVAL);
  1517. }
  1518. as->par = par;
  1519. c->has_video = 1;
  1520. }
  1521. set_codec_str(s, st->codecpar, &st->avg_frame_rate, os->codec_str,
  1522. sizeof(os->codec_str));
  1523. os->first_pts = AV_NOPTS_VALUE;
  1524. os->max_pts = AV_NOPTS_VALUE;
  1525. os->last_dts = AV_NOPTS_VALUE;
  1526. os->segment_index = 1;
  1527. if (s->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  1528. c->nr_of_streams_to_flush++;
  1529. }
  1530. if (!c->has_video && c->seg_duration <= 0) {
  1531. av_log(s, AV_LOG_WARNING, "no video stream and no seg duration set\n");
  1532. return AVERROR(EINVAL);
  1533. }
  1534. if (!c->has_video && c->frag_type == FRAG_TYPE_PFRAMES)
  1535. av_log(s, AV_LOG_WARNING, "no video stream and P-frame fragmentation set\n");
  1536. c->nr_of_streams_flushed = 0;
  1537. c->target_latency_refid = -1;
  1538. return 0;
  1539. }
  1540. static int dash_write_header(AVFormatContext *s)
  1541. {
  1542. DASHContext *c = s->priv_data;
  1543. int i, ret;
  1544. for (i = 0; i < s->nb_streams; i++) {
  1545. OutputStream *os = &c->streams[i];
  1546. if ((ret = avformat_write_header(os->ctx, NULL)) < 0)
  1547. return ret;
  1548. // Flush init segment
  1549. // Only for WebM segment, since for mp4 delay_moov is set and
  1550. // the init segment is thus flushed after the first packets.
  1551. if (os->segment_type == SEGMENT_TYPE_WEBM &&
  1552. (ret = flush_init_segment(s, os)) < 0)
  1553. return ret;
  1554. }
  1555. return ret;
  1556. }
  1557. static int add_segment(OutputStream *os, const char *file,
  1558. int64_t time, int64_t duration,
  1559. int64_t start_pos, int64_t range_length,
  1560. int64_t index_length, int next_exp_index)
  1561. {
  1562. int err;
  1563. Segment *seg;
  1564. if (os->nb_segments >= os->segments_size) {
  1565. os->segments_size = (os->segments_size + 1) * 2;
  1566. if ((err = av_reallocp_array(&os->segments, sizeof(*os->segments),
  1567. os->segments_size)) < 0) {
  1568. os->segments_size = 0;
  1569. os->nb_segments = 0;
  1570. return err;
  1571. }
  1572. }
  1573. seg = av_mallocz(sizeof(*seg));
  1574. if (!seg)
  1575. return AVERROR(ENOMEM);
  1576. av_strlcpy(seg->file, file, sizeof(seg->file));
  1577. seg->time = time;
  1578. seg->duration = duration;
  1579. if (seg->time < 0) { // If pts<0, it is expected to be cut away with an edit list
  1580. seg->duration += seg->time;
  1581. seg->time = 0;
  1582. }
  1583. seg->start_pos = start_pos;
  1584. seg->range_length = range_length;
  1585. seg->index_length = index_length;
  1586. os->segments[os->nb_segments++] = seg;
  1587. os->segment_index++;
  1588. //correcting the segment index if it has fallen behind the expected value
  1589. if (os->segment_index < next_exp_index) {
  1590. av_log(NULL, AV_LOG_WARNING, "Correcting the segment index after file %s: current=%d corrected=%d\n",
  1591. file, os->segment_index, next_exp_index);
  1592. os->segment_index = next_exp_index;
  1593. }
  1594. return 0;
  1595. }
  1596. static void write_styp(AVIOContext *pb)
  1597. {
  1598. avio_wb32(pb, 24);
  1599. ffio_wfourcc(pb, "styp");
  1600. ffio_wfourcc(pb, "msdh");
  1601. avio_wb32(pb, 0); /* minor */
  1602. ffio_wfourcc(pb, "msdh");
  1603. ffio_wfourcc(pb, "msix");
  1604. }
  1605. static void find_index_range(AVFormatContext *s, const char *full_path,
  1606. int64_t pos, int *index_length)
  1607. {
  1608. uint8_t buf[8];
  1609. AVIOContext *pb;
  1610. int ret;
  1611. ret = s->io_open(s, &pb, full_path, AVIO_FLAG_READ, NULL);
  1612. if (ret < 0)
  1613. return;
  1614. if (avio_seek(pb, pos, SEEK_SET) != pos) {
  1615. ff_format_io_close(s, &pb);
  1616. return;
  1617. }
  1618. ret = avio_read(pb, buf, 8);
  1619. ff_format_io_close(s, &pb);
  1620. if (ret < 8)
  1621. return;
  1622. if (AV_RL32(&buf[4]) != MKTAG('s', 'i', 'd', 'x'))
  1623. return;
  1624. *index_length = AV_RB32(&buf[0]);
  1625. }
  1626. static int update_stream_extradata(AVFormatContext *s, OutputStream *os,
  1627. AVPacket *pkt, AVRational *frame_rate)
  1628. {
  1629. AVCodecParameters *par = os->ctx->streams[0]->codecpar;
  1630. uint8_t *extradata;
  1631. int ret, extradata_size;
  1632. if (par->extradata_size)
  1633. return 0;
  1634. extradata = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &extradata_size);
  1635. if (!extradata_size)
  1636. return 0;
  1637. ret = ff_alloc_extradata(par, extradata_size);
  1638. if (ret < 0)
  1639. return ret;
  1640. memcpy(par->extradata, extradata, extradata_size);
  1641. set_codec_str(s, par, frame_rate, os->codec_str, sizeof(os->codec_str));
  1642. return 0;
  1643. }
  1644. static void dashenc_delete_file(AVFormatContext *s, char *filename) {
  1645. DASHContext *c = s->priv_data;
  1646. int http_base_proto = ff_is_http_proto(filename);
  1647. if (http_base_proto) {
  1648. AVIOContext *out = NULL;
  1649. AVDictionary *http_opts = NULL;
  1650. set_http_options(&http_opts, c);
  1651. av_dict_set(&http_opts, "method", "DELETE", 0);
  1652. if (dashenc_io_open(s, &out, filename, &http_opts) < 0) {
  1653. av_log(s, AV_LOG_ERROR, "failed to delete %s\n", filename);
  1654. }
  1655. av_dict_free(&http_opts);
  1656. ff_format_io_close(s, &out);
  1657. } else {
  1658. int res = avpriv_io_delete(filename);
  1659. if (res < 0) {
  1660. char errbuf[AV_ERROR_MAX_STRING_SIZE];
  1661. av_strerror(res, errbuf, sizeof(errbuf));
  1662. av_log(s, (res == AVERROR(ENOENT) ? AV_LOG_WARNING : AV_LOG_ERROR), "failed to delete %s: %s\n", filename, errbuf);
  1663. }
  1664. }
  1665. }
  1666. static int dashenc_delete_segment_file(AVFormatContext *s, const char* file)
  1667. {
  1668. DASHContext *c = s->priv_data;
  1669. AVBPrint buf;
  1670. av_bprint_init(&buf, 0, AV_BPRINT_SIZE_UNLIMITED);
  1671. av_bprintf(&buf, "%s%s", c->dirname, file);
  1672. if (!av_bprint_is_complete(&buf)) {
  1673. av_bprint_finalize(&buf, NULL);
  1674. av_log(s, AV_LOG_WARNING, "Out of memory for filename\n");
  1675. return AVERROR(ENOMEM);
  1676. }
  1677. dashenc_delete_file(s, buf.str);
  1678. av_bprint_finalize(&buf, NULL);
  1679. return 0;
  1680. }
  1681. static inline void dashenc_delete_media_segments(AVFormatContext *s, OutputStream *os, int remove_count)
  1682. {
  1683. for (int i = 0; i < remove_count; ++i) {
  1684. dashenc_delete_segment_file(s, os->segments[i]->file);
  1685. // Delete the segment regardless of whether the file was successfully deleted
  1686. av_free(os->segments[i]);
  1687. }
  1688. os->nb_segments -= remove_count;
  1689. memmove(os->segments, os->segments + remove_count, os->nb_segments * sizeof(*os->segments));
  1690. }
  1691. static int dash_flush(AVFormatContext *s, int final, int stream)
  1692. {
  1693. DASHContext *c = s->priv_data;
  1694. int i, ret = 0;
  1695. const char *proto = avio_find_protocol_name(s->url);
  1696. int use_rename = proto && !strcmp(proto, "file");
  1697. int cur_flush_segment_index = 0, next_exp_index = -1;
  1698. if (stream >= 0) {
  1699. cur_flush_segment_index = c->streams[stream].segment_index;
  1700. //finding the next segment's expected index, based on the current pts value
  1701. if (c->use_template && !c->use_timeline && c->index_correction &&
  1702. c->streams[stream].last_pts != AV_NOPTS_VALUE &&
  1703. c->streams[stream].first_pts != AV_NOPTS_VALUE) {
  1704. int64_t pts_diff = av_rescale_q(c->streams[stream].last_pts -
  1705. c->streams[stream].first_pts,
  1706. s->streams[stream]->time_base,
  1707. AV_TIME_BASE_Q);
  1708. next_exp_index = (pts_diff / c->streams[stream].seg_duration) + 1;
  1709. }
  1710. }
  1711. for (i = 0; i < s->nb_streams; i++) {
  1712. OutputStream *os = &c->streams[i];
  1713. AVStream *st = s->streams[i];
  1714. int range_length, index_length = 0;
  1715. int64_t duration;
  1716. if (!os->packets_written)
  1717. continue;
  1718. // Flush the single stream that got a keyframe right now.
  1719. // Flush all audio streams as well, in sync with video keyframes,
  1720. // but not the other video streams.
  1721. if (stream >= 0 && i != stream) {
  1722. if (s->streams[stream]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO &&
  1723. s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
  1724. continue;
  1725. if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
  1726. continue;
  1727. // Make sure we don't flush audio streams multiple times, when
  1728. // all video streams are flushed one at a time.
  1729. if (c->has_video && os->segment_index > cur_flush_segment_index)
  1730. continue;
  1731. }
  1732. if (c->single_file)
  1733. snprintf(os->full_path, sizeof(os->full_path), "%s%s", c->dirname, os->initfile);
  1734. ret = flush_dynbuf(c, os, &range_length);
  1735. if (ret < 0)
  1736. break;
  1737. os->packets_written = 0;
  1738. if (c->single_file) {
  1739. find_index_range(s, os->full_path, os->pos, &index_length);
  1740. } else {
  1741. dashenc_io_close(s, &os->out, os->temp_path);
  1742. if (use_rename) {
  1743. ret = ff_rename(os->temp_path, os->full_path, os->ctx);
  1744. if (ret < 0)
  1745. break;
  1746. }
  1747. }
  1748. duration = av_rescale_q(os->max_pts - os->start_pts, st->time_base, AV_TIME_BASE_Q);
  1749. os->last_duration = FFMAX(os->last_duration, duration);
  1750. if (!os->muxer_overhead && os->max_pts > os->start_pts)
  1751. os->muxer_overhead = ((int64_t) (range_length - os->total_pkt_size) *
  1752. 8 * AV_TIME_BASE) / duration;
  1753. os->total_pkt_size = 0;
  1754. os->total_pkt_duration = 0;
  1755. if (!os->bit_rate && !os->first_segment_bit_rate) {
  1756. os->first_segment_bit_rate = (int64_t) range_length * 8 * AV_TIME_BASE / duration;
  1757. }
  1758. add_segment(os, os->filename, os->start_pts, os->max_pts - os->start_pts, os->pos, range_length, index_length, next_exp_index);
  1759. av_log(s, AV_LOG_VERBOSE, "Representation %d media segment %d written to: %s\n", i, os->segment_index, os->full_path);
  1760. os->pos += range_length;
  1761. }
  1762. if (c->window_size) {
  1763. for (i = 0; i < s->nb_streams; i++) {
  1764. OutputStream *os = &c->streams[i];
  1765. int remove_count = os->nb_segments - c->window_size - c->extra_window_size;
  1766. if (remove_count > 0)
  1767. dashenc_delete_media_segments(s, os, remove_count);
  1768. }
  1769. }
  1770. if (final) {
  1771. for (i = 0; i < s->nb_streams; i++) {
  1772. OutputStream *os = &c->streams[i];
  1773. if (os->ctx && os->ctx_inited) {
  1774. int64_t file_size = avio_tell(os->ctx->pb);
  1775. av_write_trailer(os->ctx);
  1776. if (c->global_sidx) {
  1777. int j, start_index, start_number;
  1778. int64_t sidx_size = avio_tell(os->ctx->pb) - file_size;
  1779. get_start_index_number(os, c, &start_index, &start_number);
  1780. if (start_index >= os->nb_segments ||
  1781. os->segment_type != SEGMENT_TYPE_MP4)
  1782. continue;
  1783. os->init_range_length += sidx_size;
  1784. for (j = start_index; j < os->nb_segments; j++) {
  1785. Segment *seg = os->segments[j];
  1786. seg->start_pos += sidx_size;
  1787. }
  1788. }
  1789. }
  1790. }
  1791. }
  1792. if (ret >= 0) {
  1793. if (c->has_video && !final) {
  1794. c->nr_of_streams_flushed++;
  1795. if (c->nr_of_streams_flushed != c->nr_of_streams_to_flush)
  1796. return ret;
  1797. c->nr_of_streams_flushed = 0;
  1798. }
  1799. ret = write_manifest(s, final);
  1800. }
  1801. return ret;
  1802. }
  1803. static int dash_parse_prft(DASHContext *c, AVPacket *pkt)
  1804. {
  1805. OutputStream *os = &c->streams[pkt->stream_index];
  1806. AVProducerReferenceTime *prft;
  1807. int side_data_size;
  1808. prft = (AVProducerReferenceTime *)av_packet_get_side_data(pkt, AV_PKT_DATA_PRFT, &side_data_size);
  1809. if (!prft || side_data_size != sizeof(AVProducerReferenceTime) || (prft->flags && prft->flags != 24)) {
  1810. // No encoder generated or user provided capture time AVProducerReferenceTime side data. Instead
  1811. // of letting the mov muxer generate one, do it here so we can also use it for the manifest.
  1812. prft = (AVProducerReferenceTime *)av_packet_new_side_data(pkt, AV_PKT_DATA_PRFT,
  1813. sizeof(AVProducerReferenceTime));
  1814. if (!prft)
  1815. return AVERROR(ENOMEM);
  1816. prft->wallclock = av_gettime();
  1817. prft->flags = 24;
  1818. }
  1819. if (os->first_pts == AV_NOPTS_VALUE) {
  1820. os->producer_reference_time = *prft;
  1821. if (c->target_latency_refid < 0)
  1822. c->target_latency_refid = pkt->stream_index;
  1823. }
  1824. return 0;
  1825. }
  1826. static int dash_write_packet(AVFormatContext *s, AVPacket *pkt)
  1827. {
  1828. DASHContext *c = s->priv_data;
  1829. AVStream *st = s->streams[pkt->stream_index];
  1830. OutputStream *os = &c->streams[pkt->stream_index];
  1831. AdaptationSet *as = &c->as[os->as_idx - 1];
  1832. int64_t seg_end_duration, elapsed_duration;
  1833. int ret;
  1834. ret = update_stream_extradata(s, os, pkt, &st->avg_frame_rate);
  1835. if (ret < 0)
  1836. return ret;
  1837. // Fill in a heuristic guess of the packet duration, if none is available.
  1838. // The mp4 muxer will do something similar (for the last packet in a fragment)
  1839. // if nothing is set (setting it for the other packets doesn't hurt).
  1840. // By setting a nonzero duration here, we can be sure that the mp4 muxer won't
  1841. // invoke its heuristic (this doesn't have to be identical to that algorithm),
  1842. // so that we know the exact timestamps of fragments.
  1843. if (!pkt->duration && os->last_dts != AV_NOPTS_VALUE)
  1844. pkt->duration = pkt->dts - os->last_dts;
  1845. os->last_dts = pkt->dts;
  1846. // If forcing the stream to start at 0, the mp4 muxer will set the start
  1847. // timestamps to 0. Do the same here, to avoid mismatches in duration/timestamps.
  1848. if (os->first_pts == AV_NOPTS_VALUE &&
  1849. s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) {
  1850. pkt->pts -= pkt->dts;
  1851. pkt->dts = 0;
  1852. }
  1853. if (c->write_prft) {
  1854. ret = dash_parse_prft(c, pkt);
  1855. if (ret < 0)
  1856. return ret;
  1857. }
  1858. if (os->first_pts == AV_NOPTS_VALUE) {
  1859. os->first_pts = pkt->pts;
  1860. }
  1861. os->last_pts = pkt->pts;
  1862. if (!c->availability_start_time[0]) {
  1863. int64_t start_time_us = av_gettime();
  1864. c->start_time_s = start_time_us / 1000000;
  1865. format_date(c->availability_start_time,
  1866. sizeof(c->availability_start_time), start_time_us);
  1867. }
  1868. if (!os->packets_written)
  1869. os->availability_time_offset = 0;
  1870. if (!os->availability_time_offset &&
  1871. ((os->frag_type == FRAG_TYPE_DURATION && os->seg_duration != os->frag_duration) ||
  1872. (os->frag_type == FRAG_TYPE_EVERY_FRAME && pkt->duration))) {
  1873. AdaptationSet *as = &c->as[os->as_idx - 1];
  1874. int64_t frame_duration = 0;
  1875. switch (os->frag_type) {
  1876. case FRAG_TYPE_DURATION:
  1877. frame_duration = os->frag_duration;
  1878. break;
  1879. case FRAG_TYPE_EVERY_FRAME:
  1880. frame_duration = av_rescale_q(pkt->duration, st->time_base, AV_TIME_BASE_Q);
  1881. break;
  1882. }
  1883. os->availability_time_offset = ((double) os->seg_duration -
  1884. frame_duration) / AV_TIME_BASE;
  1885. as->max_frag_duration = FFMAX(frame_duration, as->max_frag_duration);
  1886. }
  1887. if (c->use_template && !c->use_timeline) {
  1888. elapsed_duration = pkt->pts - os->first_pts;
  1889. seg_end_duration = (int64_t) os->segment_index * os->seg_duration;
  1890. } else {
  1891. elapsed_duration = pkt->pts - os->start_pts;
  1892. seg_end_duration = os->seg_duration;
  1893. }
  1894. if (os->parser &&
  1895. (os->frag_type == FRAG_TYPE_PFRAMES ||
  1896. as->trick_idx >= 0)) {
  1897. // Parse the packets only in scenarios where it's needed
  1898. uint8_t *data;
  1899. int size;
  1900. av_parser_parse2(os->parser, os->parser_avctx,
  1901. &data, &size, pkt->data, pkt->size,
  1902. pkt->pts, pkt->dts, pkt->pos);
  1903. os->coding_dependency |= os->parser->pict_type != AV_PICTURE_TYPE_I;
  1904. }
  1905. if (pkt->flags & AV_PKT_FLAG_KEY && os->packets_written &&
  1906. av_compare_ts(elapsed_duration, st->time_base,
  1907. seg_end_duration, AV_TIME_BASE_Q) >= 0) {
  1908. if (!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  1909. c->last_duration = av_rescale_q(pkt->pts - os->start_pts,
  1910. st->time_base,
  1911. AV_TIME_BASE_Q);
  1912. c->total_duration = av_rescale_q(pkt->pts - os->first_pts,
  1913. st->time_base,
  1914. AV_TIME_BASE_Q);
  1915. if ((!c->use_timeline || !c->use_template) && os->last_duration) {
  1916. if (c->last_duration < os->last_duration*9/10 ||
  1917. c->last_duration > os->last_duration*11/10) {
  1918. av_log(s, AV_LOG_WARNING,
  1919. "Segment durations differ too much, enable use_timeline "
  1920. "and use_template, or keep a stricter keyframe interval\n");
  1921. }
  1922. }
  1923. }
  1924. if (c->write_prft && os->producer_reference_time.wallclock && !os->producer_reference_time_str[0])
  1925. format_date(os->producer_reference_time_str,
  1926. sizeof(os->producer_reference_time_str),
  1927. os->producer_reference_time.wallclock);
  1928. if ((ret = dash_flush(s, 0, pkt->stream_index)) < 0)
  1929. return ret;
  1930. }
  1931. if (!os->packets_written) {
  1932. // If we wrote a previous segment, adjust the start time of the segment
  1933. // to the end of the previous one (which is the same as the mp4 muxer
  1934. // does). This avoids gaps in the timeline.
  1935. if (os->max_pts != AV_NOPTS_VALUE)
  1936. os->start_pts = os->max_pts;
  1937. else
  1938. os->start_pts = pkt->pts;
  1939. }
  1940. if (os->max_pts == AV_NOPTS_VALUE)
  1941. os->max_pts = pkt->pts + pkt->duration;
  1942. else
  1943. os->max_pts = FFMAX(os->max_pts, pkt->pts + pkt->duration);
  1944. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
  1945. os->frag_type == FRAG_TYPE_PFRAMES &&
  1946. os->packets_written) {
  1947. av_assert0(os->parser);
  1948. if ((os->parser->pict_type == AV_PICTURE_TYPE_P &&
  1949. st->codecpar->video_delay &&
  1950. !(os->last_flags & AV_PKT_FLAG_KEY)) ||
  1951. pkt->flags & AV_PKT_FLAG_KEY) {
  1952. ret = av_write_frame(os->ctx, NULL);
  1953. if (ret < 0)
  1954. return ret;
  1955. if (!os->availability_time_offset) {
  1956. int64_t frag_duration = av_rescale_q(os->total_pkt_duration, st->time_base,
  1957. AV_TIME_BASE_Q);
  1958. os->availability_time_offset = ((double) os->seg_duration -
  1959. frag_duration) / AV_TIME_BASE;
  1960. as->max_frag_duration = FFMAX(frag_duration, as->max_frag_duration);
  1961. }
  1962. }
  1963. }
  1964. if (pkt->flags & AV_PKT_FLAG_KEY && (os->packets_written || os->nb_segments) && !os->gop_size && as->trick_idx < 0) {
  1965. os->gop_size = os->last_duration + av_rescale_q(os->total_pkt_duration, st->time_base, AV_TIME_BASE_Q);
  1966. c->max_gop_size = FFMAX(c->max_gop_size, os->gop_size);
  1967. }
  1968. if ((ret = ff_write_chained(os->ctx, 0, pkt, s, 0)) < 0)
  1969. return ret;
  1970. os->packets_written++;
  1971. os->total_pkt_size += pkt->size;
  1972. os->total_pkt_duration += pkt->duration;
  1973. os->last_flags = pkt->flags;
  1974. if (!os->init_range_length)
  1975. flush_init_segment(s, os);
  1976. //open the output context when the first frame of a segment is ready
  1977. if (!c->single_file && os->packets_written == 1) {
  1978. AVDictionary *opts = NULL;
  1979. const char *proto = avio_find_protocol_name(s->url);
  1980. int use_rename = proto && !strcmp(proto, "file");
  1981. if (os->segment_type == SEGMENT_TYPE_MP4)
  1982. write_styp(os->ctx->pb);
  1983. os->filename[0] = os->full_path[0] = os->temp_path[0] = '\0';
  1984. ff_dash_fill_tmpl_params(os->filename, sizeof(os->filename),
  1985. os->media_seg_name, pkt->stream_index,
  1986. os->segment_index, os->bit_rate, os->start_pts);
  1987. snprintf(os->full_path, sizeof(os->full_path), "%s%s", c->dirname,
  1988. os->filename);
  1989. snprintf(os->temp_path, sizeof(os->temp_path),
  1990. use_rename ? "%s.tmp" : "%s", os->full_path);
  1991. set_http_options(&opts, c);
  1992. ret = dashenc_io_open(s, &os->out, os->temp_path, &opts);
  1993. av_dict_free(&opts);
  1994. if (ret < 0) {
  1995. return handle_io_open_error(s, ret, os->temp_path);
  1996. }
  1997. if (c->lhls) {
  1998. char *prefetch_url = use_rename ? NULL : os->filename;
  1999. write_hls_media_playlist(os, s, pkt->stream_index, 0, prefetch_url);
  2000. }
  2001. }
  2002. //write out the data immediately in streaming mode
  2003. if (c->streaming && os->segment_type == SEGMENT_TYPE_MP4) {
  2004. int len = 0;
  2005. uint8_t *buf = NULL;
  2006. avio_flush(os->ctx->pb);
  2007. len = avio_get_dyn_buf (os->ctx->pb, &buf);
  2008. if (os->out) {
  2009. avio_write(os->out, buf + os->written_len, len - os->written_len);
  2010. avio_flush(os->out);
  2011. }
  2012. os->written_len = len;
  2013. }
  2014. return ret;
  2015. }
  2016. static int dash_write_trailer(AVFormatContext *s)
  2017. {
  2018. DASHContext *c = s->priv_data;
  2019. int i;
  2020. if (s->nb_streams > 0) {
  2021. OutputStream *os = &c->streams[0];
  2022. // If no segments have been written so far, try to do a crude
  2023. // guess of the segment duration
  2024. if (!c->last_duration)
  2025. c->last_duration = av_rescale_q(os->max_pts - os->start_pts,
  2026. s->streams[0]->time_base,
  2027. AV_TIME_BASE_Q);
  2028. c->total_duration = av_rescale_q(os->max_pts - os->first_pts,
  2029. s->streams[0]->time_base,
  2030. AV_TIME_BASE_Q);
  2031. }
  2032. dash_flush(s, 1, -1);
  2033. if (c->remove_at_exit) {
  2034. for (i = 0; i < s->nb_streams; ++i) {
  2035. OutputStream *os = &c->streams[i];
  2036. dashenc_delete_media_segments(s, os, os->nb_segments);
  2037. dashenc_delete_segment_file(s, os->initfile);
  2038. if (c->hls_playlist && os->segment_type == SEGMENT_TYPE_MP4) {
  2039. char filename[1024];
  2040. get_hls_playlist_name(filename, sizeof(filename), c->dirname, i);
  2041. dashenc_delete_file(s, filename);
  2042. }
  2043. }
  2044. dashenc_delete_file(s, s->url);
  2045. if (c->hls_playlist && c->master_playlist_created) {
  2046. char filename[1024];
  2047. snprintf(filename, sizeof(filename), "%s%s", c->dirname, c->hls_master_name);
  2048. dashenc_delete_file(s, filename);
  2049. }
  2050. }
  2051. return 0;
  2052. }
  2053. static int dash_check_bitstream(struct AVFormatContext *s, const AVPacket *avpkt)
  2054. {
  2055. DASHContext *c = s->priv_data;
  2056. OutputStream *os = &c->streams[avpkt->stream_index];
  2057. AVFormatContext *oc = os->ctx;
  2058. if (oc->oformat->check_bitstream) {
  2059. int ret;
  2060. AVPacket pkt = *avpkt;
  2061. pkt.stream_index = 0;
  2062. ret = oc->oformat->check_bitstream(oc, &pkt);
  2063. if (ret == 1) {
  2064. AVStream *st = s->streams[avpkt->stream_index];
  2065. AVStream *ost = oc->streams[0];
  2066. st->internal->bsfc = ost->internal->bsfc;
  2067. ost->internal->bsfc = NULL;
  2068. }
  2069. return ret;
  2070. }
  2071. return 1;
  2072. }
  2073. #define OFFSET(x) offsetof(DASHContext, x)
  2074. #define E AV_OPT_FLAG_ENCODING_PARAM
  2075. static const AVOption options[] = {
  2076. { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  2077. { "window_size", "number of segments kept in the manifest", OFFSET(window_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, E },
  2078. { "extra_window_size", "number of segments kept outside of the manifest before removing from disk", OFFSET(extra_window_size), AV_OPT_TYPE_INT, { .i64 = 5 }, 0, INT_MAX, E },
  2079. #if FF_API_DASH_MIN_SEG_DURATION
  2080. { "min_seg_duration", "minimum segment duration (in microseconds) (will be deprecated)", OFFSET(min_seg_duration), AV_OPT_TYPE_INT, { .i64 = 5000000 }, 0, INT_MAX, E },
  2081. #endif
  2082. { "seg_duration", "segment duration (in seconds, fractional value can be set)", OFFSET(seg_duration), AV_OPT_TYPE_DURATION, { .i64 = 5000000 }, 0, INT_MAX, E },
  2083. { "frag_duration", "fragment duration (in seconds, fractional value can be set)", OFFSET(frag_duration), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT_MAX, E },
  2084. { "frag_type", "set type of interval for fragments", OFFSET(frag_type), AV_OPT_TYPE_INT, {.i64 = FRAG_TYPE_NONE }, 0, FRAG_TYPE_NB - 1, E, "frag_type"},
  2085. { "none", "one fragment per segment", 0, AV_OPT_TYPE_CONST, {.i64 = FRAG_TYPE_NONE }, 0, UINT_MAX, E, "frag_type"},
  2086. { "every_frame", "fragment at every frame", 0, AV_OPT_TYPE_CONST, {.i64 = FRAG_TYPE_EVERY_FRAME }, 0, UINT_MAX, E, "frag_type"},
  2087. { "duration", "fragment at specific time intervals", 0, AV_OPT_TYPE_CONST, {.i64 = FRAG_TYPE_DURATION }, 0, UINT_MAX, E, "frag_type"},
  2088. { "pframes", "fragment at keyframes and following P-Frame reordering (Video only, experimental)", 0, AV_OPT_TYPE_CONST, {.i64 = FRAG_TYPE_PFRAMES }, 0, UINT_MAX, E, "frag_type"},
  2089. { "remove_at_exit", "remove all segments when finished", OFFSET(remove_at_exit), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  2090. { "use_template", "Use SegmentTemplate instead of SegmentList", OFFSET(use_template), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
  2091. { "use_timeline", "Use SegmentTimeline in SegmentTemplate", OFFSET(use_timeline), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
  2092. { "single_file", "Store all segments in one file, accessed using byte ranges", OFFSET(single_file), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  2093. { "single_file_name", "DASH-templated name to be used for baseURL. Implies storing all segments in one file, accessed using byte ranges", OFFSET(single_file_name), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E },
  2094. { "init_seg_name", "DASH-templated name to used for the initialization segment", OFFSET(init_seg_name), AV_OPT_TYPE_STRING, {.str = "init-stream$RepresentationID$.$ext$"}, 0, 0, E },
  2095. { "media_seg_name", "DASH-templated name to used for the media segments", OFFSET(media_seg_name), AV_OPT_TYPE_STRING, {.str = "chunk-stream$RepresentationID$-$Number%05d$.$ext$"}, 0, 0, E },
  2096. { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, E },
  2097. { "method", "set the HTTP method", OFFSET(method), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
  2098. { "http_user_agent", "override User-Agent field in HTTP header", OFFSET(user_agent), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E},
  2099. { "http_persistent", "Use persistent HTTP connections", OFFSET(http_persistent), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, E },
  2100. { "hls_playlist", "Generate HLS playlist files(master.m3u8, media_%d.m3u8)", OFFSET(hls_playlist), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  2101. { "hls_master_name", "HLS master playlist name", OFFSET(hls_master_name), AV_OPT_TYPE_STRING, {.str = "master.m3u8"}, 0, 0, E },
  2102. { "streaming", "Enable/Disable streaming mode of output. Each frame will be moof fragment", OFFSET(streaming), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  2103. { "timeout", "set timeout for socket I/O operations", OFFSET(timeout), AV_OPT_TYPE_DURATION, { .i64 = -1 }, -1, INT_MAX, .flags = E },
  2104. { "index_correction", "Enable/Disable segment index correction logic", OFFSET(index_correction), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  2105. { "format_options","set list of options for the container format (mp4/webm) used for dash", OFFSET(format_options), AV_OPT_TYPE_DICT, {.str = NULL}, 0, 0, E},
  2106. { "global_sidx", "Write global SIDX atom. Applicable only for single file, mp4 output, non-streaming mode", OFFSET(global_sidx), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  2107. { "dash_segment_type", "set dash segment files type", OFFSET(segment_type_option), AV_OPT_TYPE_INT, {.i64 = SEGMENT_TYPE_AUTO }, 0, SEGMENT_TYPE_NB - 1, E, "segment_type"},
  2108. { "auto", "select segment file format based on codec", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_AUTO }, 0, UINT_MAX, E, "segment_type"},
  2109. { "mp4", "make segment file in ISOBMFF format", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_MP4 }, 0, UINT_MAX, E, "segment_type"},
  2110. { "webm", "make segment file in WebM format", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_WEBM }, 0, UINT_MAX, E, "segment_type"},
  2111. { "ignore_io_errors", "Ignore IO errors during open and write. Useful for long-duration runs with network output", OFFSET(ignore_io_errors), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  2112. { "lhls", "Enable Low-latency HLS(Experimental). Adds #EXT-X-PREFETCH tag with current segment's URI", OFFSET(lhls), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  2113. { "ldash", "Enable Low-latency dash. Constrains the value of a few elements", OFFSET(ldash), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  2114. { "master_m3u8_publish_rate", "Publish master playlist every after this many segment intervals", OFFSET(master_publish_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, UINT_MAX, E},
  2115. { "write_prft", "Write producer reference time element", OFFSET(write_prft), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, E},
  2116. { "mpd_profile", "Set profiles. Elements and values used in the manifest may be constrained by them", OFFSET(profile), AV_OPT_TYPE_FLAGS, {.i64 = MPD_PROFILE_DASH }, 0, UINT_MAX, E, "mpd_profile"},
  2117. { "dash", "MPEG-DASH ISO Base media file format live profile", 0, AV_OPT_TYPE_CONST, {.i64 = MPD_PROFILE_DASH }, 0, UINT_MAX, E, "mpd_profile"},
  2118. { "dvb_dash", "DVB-DASH profile", 0, AV_OPT_TYPE_CONST, {.i64 = MPD_PROFILE_DVB }, 0, UINT_MAX, E, "mpd_profile"},
  2119. { "http_opts", "HTTP protocol options", OFFSET(http_opts), AV_OPT_TYPE_DICT, { .str = NULL }, 0, 0, E },
  2120. { "target_latency", "Set desired target latency for Low-latency dash", OFFSET(target_latency), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT_MAX, E },
  2121. { "min_playback_rate", "Set desired minimum playback rate", OFFSET(min_playback_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 1.0 }, 0.5, 1.5, E },
  2122. { "max_playback_rate", "Set desired maximum playback rate", OFFSET(max_playback_rate), AV_OPT_TYPE_RATIONAL, { .dbl = 1.0 }, 0.5, 1.5, E },
  2123. { "update_period", "Set the mpd update interval", OFFSET(update_period), AV_OPT_TYPE_INT64, {.i64 = 0}, 0, INT64_MAX, E},
  2124. { NULL },
  2125. };
  2126. static const AVClass dash_class = {
  2127. .class_name = "dash muxer",
  2128. .item_name = av_default_item_name,
  2129. .option = options,
  2130. .version = LIBAVUTIL_VERSION_INT,
  2131. };
  2132. AVOutputFormat ff_dash_muxer = {
  2133. .name = "dash",
  2134. .long_name = NULL_IF_CONFIG_SMALL("DASH Muxer"),
  2135. .extensions = "mpd",
  2136. .priv_data_size = sizeof(DASHContext),
  2137. .audio_codec = AV_CODEC_ID_AAC,
  2138. .video_codec = AV_CODEC_ID_H264,
  2139. .flags = AVFMT_GLOBALHEADER | AVFMT_NOFILE | AVFMT_TS_NEGATIVE,
  2140. .init = dash_init,
  2141. .write_header = dash_write_header,
  2142. .write_packet = dash_write_packet,
  2143. .write_trailer = dash_write_trailer,
  2144. .deinit = dash_free,
  2145. .check_bitstream = dash_check_bitstream,
  2146. .priv_class = &dash_class,
  2147. };