You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1964 lines
73KB

  1. /*
  2. * MPEG-DASH ISO BMFF segmenter
  3. * Copyright (c) 2014 Martin Storsjo
  4. * Copyright (c) 2018 Akamai Technologies, Inc.
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if HAVE_UNISTD_H
  24. #include <unistd.h>
  25. #endif
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/avutil.h"
  28. #include "libavutil/avstring.h"
  29. #include "libavutil/intreadwrite.h"
  30. #include "libavutil/mathematics.h"
  31. #include "libavutil/opt.h"
  32. #include "libavutil/rational.h"
  33. #include "libavutil/time.h"
  34. #include "libavutil/time_internal.h"
  35. #include "av1.h"
  36. #include "avc.h"
  37. #include "avformat.h"
  38. #include "avio_internal.h"
  39. #include "hlsplaylist.h"
  40. #if CONFIG_HTTP_PROTOCOL
  41. #include "http.h"
  42. #endif
  43. #include "internal.h"
  44. #include "isom.h"
  45. #include "os_support.h"
  46. #include "url.h"
  47. #include "vpcc.h"
  48. #include "dash.h"
  49. typedef enum {
  50. SEGMENT_TYPE_AUTO = 0,
  51. SEGMENT_TYPE_MP4,
  52. SEGMENT_TYPE_WEBM,
  53. SEGMENT_TYPE_NB
  54. } SegmentType;
  55. typedef struct Segment {
  56. char file[1024];
  57. int64_t start_pos;
  58. int range_length, index_length;
  59. int64_t time;
  60. double prog_date_time;
  61. int64_t duration;
  62. int n;
  63. } Segment;
  64. typedef struct AdaptationSet {
  65. char id[10];
  66. char *descriptor;
  67. enum AVMediaType media_type;
  68. AVDictionary *metadata;
  69. AVRational min_frame_rate, max_frame_rate;
  70. int ambiguous_frame_rate;
  71. } AdaptationSet;
  72. typedef struct OutputStream {
  73. AVFormatContext *ctx;
  74. int ctx_inited, as_idx;
  75. AVIOContext *out;
  76. int packets_written;
  77. char initfile[1024];
  78. int64_t init_start_pos, pos;
  79. int init_range_length;
  80. int nb_segments, segments_size, segment_index;
  81. Segment **segments;
  82. int64_t first_pts, start_pts, max_pts;
  83. int64_t last_dts, last_pts;
  84. int bit_rate;
  85. SegmentType segment_type; /* segment type selected for this particular stream */
  86. const char *format_name;
  87. const char *extension_name;
  88. const char *single_file_name; /* file names selected for this particular stream */
  89. const char *init_seg_name;
  90. const char *media_seg_name;
  91. char codec_str[100];
  92. int written_len;
  93. char filename[1024];
  94. char full_path[1024];
  95. char temp_path[1024];
  96. double availability_time_offset;
  97. int total_pkt_size;
  98. int muxer_overhead;
  99. } OutputStream;
  100. typedef struct DASHContext {
  101. const AVClass *class; /* Class for private options. */
  102. char *adaptation_sets;
  103. AdaptationSet *as;
  104. int nb_as;
  105. int window_size;
  106. int extra_window_size;
  107. #if FF_API_DASH_MIN_SEG_DURATION
  108. int min_seg_duration;
  109. #endif
  110. int64_t seg_duration;
  111. int remove_at_exit;
  112. int use_template;
  113. int use_timeline;
  114. int single_file;
  115. OutputStream *streams;
  116. int has_video;
  117. int64_t last_duration;
  118. int64_t total_duration;
  119. char availability_start_time[100];
  120. time_t start_time_s;
  121. char dirname[1024];
  122. const char *single_file_name; /* file names as specified in options */
  123. const char *init_seg_name;
  124. const char *media_seg_name;
  125. const char *utc_timing_url;
  126. const char *method;
  127. const char *user_agent;
  128. int hls_playlist;
  129. int http_persistent;
  130. int master_playlist_created;
  131. AVIOContext *mpd_out;
  132. AVIOContext *m3u8_out;
  133. int streaming;
  134. int64_t timeout;
  135. int index_correction;
  136. AVDictionary *format_options;
  137. int global_sidx;
  138. SegmentType segment_type_option; /* segment type as specified in options */
  139. int ignore_io_errors;
  140. int lhls;
  141. int master_publish_rate;
  142. int nr_of_streams_to_flush;
  143. int nr_of_streams_flushed;
  144. } DASHContext;
  145. static struct codec_string {
  146. int id;
  147. const char *str;
  148. } codecs[] = {
  149. { AV_CODEC_ID_VP8, "vp8" },
  150. { AV_CODEC_ID_VP9, "vp9" },
  151. { AV_CODEC_ID_VORBIS, "vorbis" },
  152. { AV_CODEC_ID_OPUS, "opus" },
  153. { AV_CODEC_ID_FLAC, "flac" },
  154. { 0, NULL }
  155. };
  156. static struct format_string {
  157. SegmentType segment_type;
  158. const char *str;
  159. } formats[] = {
  160. { SEGMENT_TYPE_AUTO, "auto" },
  161. { SEGMENT_TYPE_MP4, "mp4" },
  162. { SEGMENT_TYPE_WEBM, "webm" },
  163. { 0, NULL }
  164. };
  165. static int dashenc_io_open(AVFormatContext *s, AVIOContext **pb, char *filename,
  166. AVDictionary **options) {
  167. DASHContext *c = s->priv_data;
  168. int http_base_proto = filename ? ff_is_http_proto(filename) : 0;
  169. int err = AVERROR_MUXER_NOT_FOUND;
  170. if (!*pb || !http_base_proto || !c->http_persistent) {
  171. err = s->io_open(s, pb, filename, AVIO_FLAG_WRITE, options);
  172. #if CONFIG_HTTP_PROTOCOL
  173. } else {
  174. URLContext *http_url_context = ffio_geturlcontext(*pb);
  175. av_assert0(http_url_context);
  176. err = ff_http_do_new_request(http_url_context, filename);
  177. if (err < 0)
  178. ff_format_io_close(s, pb);
  179. #endif
  180. }
  181. return err;
  182. }
  183. static void dashenc_io_close(AVFormatContext *s, AVIOContext **pb, char *filename) {
  184. DASHContext *c = s->priv_data;
  185. int http_base_proto = filename ? ff_is_http_proto(filename) : 0;
  186. if (!*pb)
  187. return;
  188. if (!http_base_proto || !c->http_persistent) {
  189. ff_format_io_close(s, pb);
  190. #if CONFIG_HTTP_PROTOCOL
  191. } else {
  192. URLContext *http_url_context = ffio_geturlcontext(*pb);
  193. av_assert0(http_url_context);
  194. avio_flush(*pb);
  195. ffurl_shutdown(http_url_context, AVIO_FLAG_WRITE);
  196. #endif
  197. }
  198. }
  199. static const char *get_format_str(SegmentType segment_type) {
  200. int i;
  201. for (i = 0; i < SEGMENT_TYPE_NB; i++)
  202. if (formats[i].segment_type == segment_type)
  203. return formats[i].str;
  204. return NULL;
  205. }
  206. static const char *get_extension_str(SegmentType type, int single_file)
  207. {
  208. switch (type) {
  209. case SEGMENT_TYPE_MP4: return single_file ? "mp4" : "m4s";
  210. case SEGMENT_TYPE_WEBM: return "webm";
  211. default: return NULL;
  212. }
  213. }
  214. static int handle_io_open_error(AVFormatContext *s, int err, char *url) {
  215. DASHContext *c = s->priv_data;
  216. char errbuf[AV_ERROR_MAX_STRING_SIZE];
  217. av_strerror(err, errbuf, sizeof(errbuf));
  218. av_log(s, c->ignore_io_errors ? AV_LOG_WARNING : AV_LOG_ERROR,
  219. "Unable to open %s for writing: %s\n", url, errbuf);
  220. return c->ignore_io_errors ? 0 : err;
  221. }
  222. static inline SegmentType select_segment_type(SegmentType segment_type, enum AVCodecID codec_id)
  223. {
  224. if (segment_type == SEGMENT_TYPE_AUTO) {
  225. if (codec_id == AV_CODEC_ID_OPUS || codec_id == AV_CODEC_ID_VORBIS ||
  226. codec_id == AV_CODEC_ID_VP8 || codec_id == AV_CODEC_ID_VP9) {
  227. segment_type = SEGMENT_TYPE_WEBM;
  228. } else {
  229. segment_type = SEGMENT_TYPE_MP4;
  230. }
  231. }
  232. return segment_type;
  233. }
  234. static int init_segment_types(AVFormatContext *s)
  235. {
  236. DASHContext *c = s->priv_data;
  237. int has_mp4_streams = 0;
  238. for (int i = 0; i < s->nb_streams; ++i) {
  239. OutputStream *os = &c->streams[i];
  240. SegmentType segment_type = select_segment_type(
  241. c->segment_type_option, s->streams[i]->codecpar->codec_id);
  242. os->segment_type = segment_type;
  243. os->format_name = get_format_str(segment_type);
  244. if (!os->format_name) {
  245. av_log(s, AV_LOG_ERROR, "Could not select DASH segment type for stream %d\n", i);
  246. return AVERROR_MUXER_NOT_FOUND;
  247. }
  248. os->extension_name = get_extension_str(segment_type, c->single_file);
  249. if (!os->extension_name) {
  250. av_log(s, AV_LOG_ERROR, "Could not get extension type for stream %d\n", i);
  251. return AVERROR_MUXER_NOT_FOUND;
  252. }
  253. has_mp4_streams |= segment_type == SEGMENT_TYPE_MP4;
  254. }
  255. if (c->hls_playlist && !has_mp4_streams) {
  256. av_log(s, AV_LOG_WARNING, "No mp4 streams, disabling HLS manifest generation\n");
  257. c->hls_playlist = 0;
  258. }
  259. return 0;
  260. }
  261. static int check_file_extension(const char *filename, const char *extension) {
  262. char *dot;
  263. if (!filename || !extension)
  264. return -1;
  265. dot = strrchr(filename, '.');
  266. if (dot && !strcmp(dot + 1, extension))
  267. return 0;
  268. return -1;
  269. }
  270. static void set_vp9_codec_str(AVFormatContext *s, AVCodecParameters *par,
  271. AVRational *frame_rate, char *str, int size) {
  272. VPCC vpcc;
  273. int ret = ff_isom_get_vpcc_features(s, par, frame_rate, &vpcc);
  274. if (ret == 0) {
  275. av_strlcatf(str, size, "vp09.%02d.%02d.%02d",
  276. vpcc.profile, vpcc.level, vpcc.bitdepth);
  277. } else {
  278. // Default to just vp9 in case of error while finding out profile or level
  279. av_log(s, AV_LOG_WARNING, "Could not find VP9 profile and/or level\n");
  280. av_strlcpy(str, "vp9", size);
  281. }
  282. return;
  283. }
  284. static void set_codec_str(AVFormatContext *s, AVCodecParameters *par,
  285. AVRational *frame_rate, char *str, int size)
  286. {
  287. const AVCodecTag *tags[2] = { NULL, NULL };
  288. uint32_t tag;
  289. int i;
  290. // common Webm codecs are not part of RFC 6381
  291. for (i = 0; codecs[i].id; i++)
  292. if (codecs[i].id == par->codec_id) {
  293. if (codecs[i].id == AV_CODEC_ID_VP9) {
  294. set_vp9_codec_str(s, par, frame_rate, str, size);
  295. } else {
  296. av_strlcpy(str, codecs[i].str, size);
  297. }
  298. return;
  299. }
  300. // for codecs part of RFC 6381
  301. if (par->codec_type == AVMEDIA_TYPE_VIDEO)
  302. tags[0] = ff_codec_movvideo_tags;
  303. else if (par->codec_type == AVMEDIA_TYPE_AUDIO)
  304. tags[0] = ff_codec_movaudio_tags;
  305. else
  306. return;
  307. tag = par->codec_tag;
  308. if (!tag)
  309. tag = av_codec_get_tag(tags, par->codec_id);
  310. if (!tag)
  311. return;
  312. if (size < 5)
  313. return;
  314. AV_WL32(str, tag);
  315. str[4] = '\0';
  316. if (!strcmp(str, "mp4a") || !strcmp(str, "mp4v")) {
  317. uint32_t oti;
  318. tags[0] = ff_mp4_obj_type;
  319. oti = av_codec_get_tag(tags, par->codec_id);
  320. if (oti)
  321. av_strlcatf(str, size, ".%02"PRIx32, oti);
  322. else
  323. return;
  324. if (tag == MKTAG('m', 'p', '4', 'a')) {
  325. if (par->extradata_size >= 2) {
  326. int aot = par->extradata[0] >> 3;
  327. if (aot == 31)
  328. aot = ((AV_RB16(par->extradata) >> 5) & 0x3f) + 32;
  329. av_strlcatf(str, size, ".%d", aot);
  330. }
  331. } else if (tag == MKTAG('m', 'p', '4', 'v')) {
  332. // Unimplemented, should output ProfileLevelIndication as a decimal number
  333. av_log(s, AV_LOG_WARNING, "Incomplete RFC 6381 codec string for mp4v\n");
  334. }
  335. } else if (!strcmp(str, "avc1")) {
  336. uint8_t *tmpbuf = NULL;
  337. uint8_t *extradata = par->extradata;
  338. int extradata_size = par->extradata_size;
  339. if (!extradata_size)
  340. return;
  341. if (extradata[0] != 1) {
  342. AVIOContext *pb;
  343. if (avio_open_dyn_buf(&pb) < 0)
  344. return;
  345. if (ff_isom_write_avcc(pb, extradata, extradata_size) < 0) {
  346. ffio_free_dyn_buf(&pb);
  347. return;
  348. }
  349. extradata_size = avio_close_dyn_buf(pb, &extradata);
  350. tmpbuf = extradata;
  351. }
  352. if (extradata_size >= 4)
  353. av_strlcatf(str, size, ".%02x%02x%02x",
  354. extradata[1], extradata[2], extradata[3]);
  355. av_free(tmpbuf);
  356. } else if (!strcmp(str, "av01")) {
  357. AV1SequenceParameters seq;
  358. if (!par->extradata_size)
  359. return;
  360. if (ff_av1_parse_seq_header(&seq, par->extradata, par->extradata_size) < 0)
  361. return;
  362. av_strlcatf(str, size, ".%01u.%02u%s.%02u",
  363. seq.profile, seq.level, seq.tier ? "H" : "M", seq.bitdepth);
  364. if (seq.color_description_present_flag)
  365. av_strlcatf(str, size, ".%01u.%01u%01u%01u.%02u.%02u.%02u.%01u",
  366. seq.monochrome,
  367. seq.chroma_subsampling_x, seq.chroma_subsampling_y, seq.chroma_sample_position,
  368. seq.color_primaries, seq.transfer_characteristics, seq.matrix_coefficients,
  369. seq.color_range);
  370. }
  371. }
  372. static int flush_dynbuf(DASHContext *c, OutputStream *os, int *range_length)
  373. {
  374. uint8_t *buffer;
  375. if (!os->ctx->pb) {
  376. return AVERROR(EINVAL);
  377. }
  378. // flush
  379. av_write_frame(os->ctx, NULL);
  380. avio_flush(os->ctx->pb);
  381. if (!c->single_file) {
  382. // write out to file
  383. *range_length = avio_close_dyn_buf(os->ctx->pb, &buffer);
  384. os->ctx->pb = NULL;
  385. if (os->out)
  386. avio_write(os->out, buffer + os->written_len, *range_length - os->written_len);
  387. os->written_len = 0;
  388. av_free(buffer);
  389. // re-open buffer
  390. return avio_open_dyn_buf(&os->ctx->pb);
  391. } else {
  392. *range_length = avio_tell(os->ctx->pb) - os->pos;
  393. return 0;
  394. }
  395. }
  396. static void set_http_options(AVDictionary **options, DASHContext *c)
  397. {
  398. if (c->method)
  399. av_dict_set(options, "method", c->method, 0);
  400. if (c->user_agent)
  401. av_dict_set(options, "user_agent", c->user_agent, 0);
  402. if (c->http_persistent)
  403. av_dict_set_int(options, "multiple_requests", 1, 0);
  404. if (c->timeout >= 0)
  405. av_dict_set_int(options, "timeout", c->timeout, 0);
  406. }
  407. static void get_hls_playlist_name(char *playlist_name, int string_size,
  408. const char *base_url, int id) {
  409. if (base_url)
  410. snprintf(playlist_name, string_size, "%smedia_%d.m3u8", base_url, id);
  411. else
  412. snprintf(playlist_name, string_size, "media_%d.m3u8", id);
  413. }
  414. static void get_start_index_number(OutputStream *os, DASHContext *c,
  415. int *start_index, int *start_number) {
  416. *start_index = 0;
  417. *start_number = 1;
  418. if (c->window_size) {
  419. *start_index = FFMAX(os->nb_segments - c->window_size, 0);
  420. *start_number = FFMAX(os->segment_index - c->window_size, 1);
  421. }
  422. }
  423. static void write_hls_media_playlist(OutputStream *os, AVFormatContext *s,
  424. int representation_id, int final,
  425. char *prefetch_url) {
  426. DASHContext *c = s->priv_data;
  427. int timescale = os->ctx->streams[0]->time_base.den;
  428. char temp_filename_hls[1024];
  429. char filename_hls[1024];
  430. AVDictionary *http_opts = NULL;
  431. int target_duration = 0;
  432. int ret = 0;
  433. const char *proto = avio_find_protocol_name(c->dirname);
  434. int use_rename = proto && !strcmp(proto, "file");
  435. int i, start_index, start_number;
  436. double prog_date_time = 0;
  437. get_start_index_number(os, c, &start_index, &start_number);
  438. if (!c->hls_playlist || start_index >= os->nb_segments ||
  439. os->segment_type != SEGMENT_TYPE_MP4)
  440. return;
  441. get_hls_playlist_name(filename_hls, sizeof(filename_hls),
  442. c->dirname, representation_id);
  443. snprintf(temp_filename_hls, sizeof(temp_filename_hls), use_rename ? "%s.tmp" : "%s", filename_hls);
  444. set_http_options(&http_opts, c);
  445. ret = dashenc_io_open(s, &c->m3u8_out, temp_filename_hls, &http_opts);
  446. av_dict_free(&http_opts);
  447. if (ret < 0) {
  448. handle_io_open_error(s, ret, temp_filename_hls);
  449. return;
  450. }
  451. for (i = start_index; i < os->nb_segments; i++) {
  452. Segment *seg = os->segments[i];
  453. double duration = (double) seg->duration / timescale;
  454. if (target_duration <= duration)
  455. target_duration = lrint(duration);
  456. }
  457. ff_hls_write_playlist_header(c->m3u8_out, 6, -1, target_duration,
  458. start_number, PLAYLIST_TYPE_NONE, 0);
  459. ff_hls_write_init_file(c->m3u8_out, os->initfile, c->single_file,
  460. os->init_range_length, os->init_start_pos);
  461. for (i = start_index; i < os->nb_segments; i++) {
  462. Segment *seg = os->segments[i];
  463. if (prog_date_time == 0) {
  464. if (os->nb_segments == 1)
  465. prog_date_time = c->start_time_s;
  466. else
  467. prog_date_time = seg->prog_date_time;
  468. }
  469. seg->prog_date_time = prog_date_time;
  470. ret = ff_hls_write_file_entry(c->m3u8_out, 0, c->single_file,
  471. (double) seg->duration / timescale, 0,
  472. seg->range_length, seg->start_pos, NULL,
  473. c->single_file ? os->initfile : seg->file,
  474. &prog_date_time, 0, 0, 0);
  475. if (ret < 0) {
  476. av_log(os->ctx, AV_LOG_WARNING, "ff_hls_write_file_entry get error\n");
  477. }
  478. }
  479. if (prefetch_url)
  480. avio_printf(c->m3u8_out, "#EXT-X-PREFETCH:%s\n", prefetch_url);
  481. if (final)
  482. ff_hls_write_end_list(c->m3u8_out);
  483. dashenc_io_close(s, &c->m3u8_out, temp_filename_hls);
  484. if (use_rename)
  485. if (avpriv_io_move(temp_filename_hls, filename_hls) < 0) {
  486. av_log(os->ctx, AV_LOG_WARNING, "renaming file %s to %s failed\n\n", temp_filename_hls, filename_hls);
  487. }
  488. }
  489. static int flush_init_segment(AVFormatContext *s, OutputStream *os)
  490. {
  491. DASHContext *c = s->priv_data;
  492. int ret, range_length;
  493. ret = flush_dynbuf(c, os, &range_length);
  494. if (ret < 0)
  495. return ret;
  496. os->pos = os->init_range_length = range_length;
  497. if (!c->single_file) {
  498. char filename[1024];
  499. snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->initfile);
  500. dashenc_io_close(s, &os->out, filename);
  501. }
  502. return 0;
  503. }
  504. static void dash_free(AVFormatContext *s)
  505. {
  506. DASHContext *c = s->priv_data;
  507. int i, j;
  508. if (c->as) {
  509. for (i = 0; i < c->nb_as; i++) {
  510. av_dict_free(&c->as[i].metadata);
  511. av_freep(&c->as[i].descriptor);
  512. }
  513. av_freep(&c->as);
  514. c->nb_as = 0;
  515. }
  516. if (!c->streams)
  517. return;
  518. for (i = 0; i < s->nb_streams; i++) {
  519. OutputStream *os = &c->streams[i];
  520. if (os->ctx && os->ctx->pb) {
  521. if (!c->single_file)
  522. ffio_free_dyn_buf(&os->ctx->pb);
  523. else
  524. avio_close(os->ctx->pb);
  525. }
  526. ff_format_io_close(s, &os->out);
  527. avformat_free_context(os->ctx);
  528. for (j = 0; j < os->nb_segments; j++)
  529. av_free(os->segments[j]);
  530. av_free(os->segments);
  531. av_freep(&os->single_file_name);
  532. av_freep(&os->init_seg_name);
  533. av_freep(&os->media_seg_name);
  534. }
  535. av_freep(&c->streams);
  536. ff_format_io_close(s, &c->mpd_out);
  537. ff_format_io_close(s, &c->m3u8_out);
  538. }
  539. static void output_segment_list(OutputStream *os, AVIOContext *out, AVFormatContext *s,
  540. int representation_id, int final)
  541. {
  542. DASHContext *c = s->priv_data;
  543. int i, start_index, start_number;
  544. get_start_index_number(os, c, &start_index, &start_number);
  545. if (c->use_template) {
  546. int timescale = c->use_timeline ? os->ctx->streams[0]->time_base.den : AV_TIME_BASE;
  547. avio_printf(out, "\t\t\t\t<SegmentTemplate timescale=\"%d\" ", timescale);
  548. if (!c->use_timeline) {
  549. avio_printf(out, "duration=\"%"PRId64"\" ", c->seg_duration);
  550. if (c->streaming && os->availability_time_offset)
  551. avio_printf(out, "availabilityTimeOffset=\"%.3f\" ",
  552. os->availability_time_offset);
  553. }
  554. avio_printf(out, "initialization=\"%s\" media=\"%s\" startNumber=\"%d\">\n", os->init_seg_name, os->media_seg_name, c->use_timeline ? start_number : 1);
  555. if (c->use_timeline) {
  556. int64_t cur_time = 0;
  557. avio_printf(out, "\t\t\t\t\t<SegmentTimeline>\n");
  558. for (i = start_index; i < os->nb_segments; ) {
  559. Segment *seg = os->segments[i];
  560. int repeat = 0;
  561. avio_printf(out, "\t\t\t\t\t\t<S ");
  562. if (i == start_index || seg->time != cur_time) {
  563. cur_time = seg->time;
  564. avio_printf(out, "t=\"%"PRId64"\" ", seg->time);
  565. }
  566. avio_printf(out, "d=\"%"PRId64"\" ", seg->duration);
  567. while (i + repeat + 1 < os->nb_segments &&
  568. os->segments[i + repeat + 1]->duration == seg->duration &&
  569. os->segments[i + repeat + 1]->time == os->segments[i + repeat]->time + os->segments[i + repeat]->duration)
  570. repeat++;
  571. if (repeat > 0)
  572. avio_printf(out, "r=\"%d\" ", repeat);
  573. avio_printf(out, "/>\n");
  574. i += 1 + repeat;
  575. cur_time += (1 + repeat) * seg->duration;
  576. }
  577. avio_printf(out, "\t\t\t\t\t</SegmentTimeline>\n");
  578. }
  579. avio_printf(out, "\t\t\t\t</SegmentTemplate>\n");
  580. } else if (c->single_file) {
  581. avio_printf(out, "\t\t\t\t<BaseURL>%s</BaseURL>\n", os->initfile);
  582. avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%"PRId64"\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number);
  583. avio_printf(out, "\t\t\t\t\t<Initialization range=\"%"PRId64"-%"PRId64"\" />\n", os->init_start_pos, os->init_start_pos + os->init_range_length - 1);
  584. for (i = start_index; i < os->nb_segments; i++) {
  585. Segment *seg = os->segments[i];
  586. avio_printf(out, "\t\t\t\t\t<SegmentURL mediaRange=\"%"PRId64"-%"PRId64"\" ", seg->start_pos, seg->start_pos + seg->range_length - 1);
  587. if (seg->index_length)
  588. avio_printf(out, "indexRange=\"%"PRId64"-%"PRId64"\" ", seg->start_pos, seg->start_pos + seg->index_length - 1);
  589. avio_printf(out, "/>\n");
  590. }
  591. avio_printf(out, "\t\t\t\t</SegmentList>\n");
  592. } else {
  593. avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%"PRId64"\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number);
  594. avio_printf(out, "\t\t\t\t\t<Initialization sourceURL=\"%s\" />\n", os->initfile);
  595. for (i = start_index; i < os->nb_segments; i++) {
  596. Segment *seg = os->segments[i];
  597. avio_printf(out, "\t\t\t\t\t<SegmentURL media=\"%s\" />\n", seg->file);
  598. }
  599. avio_printf(out, "\t\t\t\t</SegmentList>\n");
  600. }
  601. if (!c->lhls || final) {
  602. write_hls_media_playlist(os, s, representation_id, final, NULL);
  603. }
  604. }
  605. static char *xmlescape(const char *str) {
  606. int outlen = strlen(str)*3/2 + 6;
  607. char *out = av_realloc(NULL, outlen + 1);
  608. int pos = 0;
  609. if (!out)
  610. return NULL;
  611. for (; *str; str++) {
  612. if (pos + 6 > outlen) {
  613. char *tmp;
  614. outlen = 2 * outlen + 6;
  615. tmp = av_realloc(out, outlen + 1);
  616. if (!tmp) {
  617. av_free(out);
  618. return NULL;
  619. }
  620. out = tmp;
  621. }
  622. if (*str == '&') {
  623. memcpy(&out[pos], "&amp;", 5);
  624. pos += 5;
  625. } else if (*str == '<') {
  626. memcpy(&out[pos], "&lt;", 4);
  627. pos += 4;
  628. } else if (*str == '>') {
  629. memcpy(&out[pos], "&gt;", 4);
  630. pos += 4;
  631. } else if (*str == '\'') {
  632. memcpy(&out[pos], "&apos;", 6);
  633. pos += 6;
  634. } else if (*str == '\"') {
  635. memcpy(&out[pos], "&quot;", 6);
  636. pos += 6;
  637. } else {
  638. out[pos++] = *str;
  639. }
  640. }
  641. out[pos] = '\0';
  642. return out;
  643. }
  644. static void write_time(AVIOContext *out, int64_t time)
  645. {
  646. int seconds = time / AV_TIME_BASE;
  647. int fractions = time % AV_TIME_BASE;
  648. int minutes = seconds / 60;
  649. int hours = minutes / 60;
  650. seconds %= 60;
  651. minutes %= 60;
  652. avio_printf(out, "PT");
  653. if (hours)
  654. avio_printf(out, "%dH", hours);
  655. if (hours || minutes)
  656. avio_printf(out, "%dM", minutes);
  657. avio_printf(out, "%d.%dS", seconds, fractions / (AV_TIME_BASE / 10));
  658. }
  659. static void format_date_now(char *buf, int size)
  660. {
  661. struct tm *ptm, tmbuf;
  662. int64_t time_us = av_gettime();
  663. int64_t time_ms = time_us / 1000;
  664. const time_t time_s = time_ms / 1000;
  665. int millisec = time_ms - (time_s * 1000);
  666. ptm = gmtime_r(&time_s, &tmbuf);
  667. if (ptm) {
  668. int len;
  669. if (!strftime(buf, size, "%Y-%m-%dT%H:%M:%S", ptm)) {
  670. buf[0] = '\0';
  671. return;
  672. }
  673. len = strlen(buf);
  674. snprintf(buf + len, size - len, ".%03dZ", millisec);
  675. }
  676. }
  677. static int write_adaptation_set(AVFormatContext *s, AVIOContext *out, int as_index,
  678. int final)
  679. {
  680. DASHContext *c = s->priv_data;
  681. AdaptationSet *as = &c->as[as_index];
  682. AVDictionaryEntry *lang, *role;
  683. int i;
  684. avio_printf(out, "\t\t<AdaptationSet id=\"%s\" contentType=\"%s\" segmentAlignment=\"true\" bitstreamSwitching=\"true\"",
  685. as->id, as->media_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  686. if (as->media_type == AVMEDIA_TYPE_VIDEO && as->max_frame_rate.num && !as->ambiguous_frame_rate && av_cmp_q(as->min_frame_rate, as->max_frame_rate) < 0)
  687. avio_printf(out, " maxFrameRate=\"%d/%d\"", as->max_frame_rate.num, as->max_frame_rate.den);
  688. lang = av_dict_get(as->metadata, "language", NULL, 0);
  689. if (lang)
  690. avio_printf(out, " lang=\"%s\"", lang->value);
  691. avio_printf(out, ">\n");
  692. role = av_dict_get(as->metadata, "role", NULL, 0);
  693. if (role)
  694. avio_printf(out, "\t\t\t<Role schemeIdUri=\"urn:mpeg:dash:role:2011\" value=\"%s\"/>\n", role->value);
  695. if (as->descriptor)
  696. avio_printf(out, "\t\t\t%s\n", as->descriptor);
  697. for (i = 0; i < s->nb_streams; i++) {
  698. OutputStream *os = &c->streams[i];
  699. char bandwidth_str[64] = {'\0'};
  700. if (os->as_idx - 1 != as_index)
  701. continue;
  702. if (os->bit_rate > 0)
  703. snprintf(bandwidth_str, sizeof(bandwidth_str), " bandwidth=\"%d\"",
  704. os->bit_rate);
  705. if (as->media_type == AVMEDIA_TYPE_VIDEO) {
  706. AVStream *st = s->streams[i];
  707. avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"video/%s\" codecs=\"%s\"%s width=\"%d\" height=\"%d\"",
  708. i, os->format_name, os->codec_str, bandwidth_str, s->streams[i]->codecpar->width, s->streams[i]->codecpar->height);
  709. if (st->avg_frame_rate.num)
  710. avio_printf(out, " frameRate=\"%d/%d\"", st->avg_frame_rate.num, st->avg_frame_rate.den);
  711. avio_printf(out, ">\n");
  712. } else {
  713. avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"audio/%s\" codecs=\"%s\"%s audioSamplingRate=\"%d\">\n",
  714. i, os->format_name, os->codec_str, bandwidth_str, s->streams[i]->codecpar->sample_rate);
  715. avio_printf(out, "\t\t\t\t<AudioChannelConfiguration schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\"%d\" />\n",
  716. s->streams[i]->codecpar->channels);
  717. }
  718. output_segment_list(os, out, s, i, final);
  719. avio_printf(out, "\t\t\t</Representation>\n");
  720. }
  721. avio_printf(out, "\t\t</AdaptationSet>\n");
  722. return 0;
  723. }
  724. static int add_adaptation_set(AVFormatContext *s, AdaptationSet **as, enum AVMediaType type)
  725. {
  726. DASHContext *c = s->priv_data;
  727. void *mem = av_realloc(c->as, sizeof(*c->as) * (c->nb_as + 1));
  728. if (!mem)
  729. return AVERROR(ENOMEM);
  730. c->as = mem;
  731. ++c->nb_as;
  732. *as = &c->as[c->nb_as - 1];
  733. memset(*as, 0, sizeof(**as));
  734. (*as)->media_type = type;
  735. return 0;
  736. }
  737. static int adaptation_set_add_stream(AVFormatContext *s, int as_idx, int i)
  738. {
  739. DASHContext *c = s->priv_data;
  740. AdaptationSet *as = &c->as[as_idx - 1];
  741. OutputStream *os = &c->streams[i];
  742. if (as->media_type != s->streams[i]->codecpar->codec_type) {
  743. av_log(s, AV_LOG_ERROR, "Codec type of stream %d doesn't match AdaptationSet's media type\n", i);
  744. return AVERROR(EINVAL);
  745. } else if (os->as_idx) {
  746. av_log(s, AV_LOG_ERROR, "Stream %d is already assigned to an AdaptationSet\n", i);
  747. return AVERROR(EINVAL);
  748. }
  749. os->as_idx = as_idx;
  750. return 0;
  751. }
  752. static int parse_adaptation_sets(AVFormatContext *s)
  753. {
  754. DASHContext *c = s->priv_data;
  755. const char *p = c->adaptation_sets;
  756. enum { new_set, parse_id, parsing_streams, parse_descriptor } state;
  757. AdaptationSet *as;
  758. int i, n, ret;
  759. // default: one AdaptationSet for each stream
  760. if (!p) {
  761. for (i = 0; i < s->nb_streams; i++) {
  762. if ((ret = add_adaptation_set(s, &as, s->streams[i]->codecpar->codec_type)) < 0)
  763. return ret;
  764. snprintf(as->id, sizeof(as->id), "%d", i);
  765. c->streams[i].as_idx = c->nb_as;
  766. }
  767. goto end;
  768. }
  769. // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on
  770. // option id=0,descriptor=descriptor_str,streams=0,1,2 and so on
  771. // descriptor is useful to the scheme defined by ISO/IEC 23009-1:2014/Amd.2:2015
  772. // descriptor_str should be a self-closing xml tag.
  773. state = new_set;
  774. while (*p) {
  775. if (*p == ' ') {
  776. p++;
  777. continue;
  778. } else if (state == new_set && av_strstart(p, "id=", &p)) {
  779. if ((ret = add_adaptation_set(s, &as, AVMEDIA_TYPE_UNKNOWN)) < 0)
  780. return ret;
  781. n = strcspn(p, ",");
  782. snprintf(as->id, sizeof(as->id), "%.*s", n, p);
  783. p += n;
  784. if (*p)
  785. p++;
  786. state = parse_id;
  787. } else if (state == parse_id && av_strstart(p, "descriptor=", &p)) {
  788. n = strcspn(p, ">") + 1; //followed by one comma, so plus 1
  789. if (n < strlen(p)) {
  790. as->descriptor = av_strndup(p, n);
  791. } else {
  792. av_log(s, AV_LOG_ERROR, "Parse error, descriptor string should be a self-closing xml tag\n");
  793. return AVERROR(EINVAL);
  794. }
  795. p += n;
  796. if (*p)
  797. p++;
  798. state = parse_descriptor;
  799. } else if ((state == parse_id || state == parse_descriptor) && av_strstart(p, "streams=", &p)) { //descriptor is optional
  800. state = parsing_streams;
  801. } else if (state == parsing_streams) {
  802. AdaptationSet *as = &c->as[c->nb_as - 1];
  803. char idx_str[8], *end_str;
  804. n = strcspn(p, " ,");
  805. snprintf(idx_str, sizeof(idx_str), "%.*s", n, p);
  806. p += n;
  807. // if value is "a" or "v", map all streams of that type
  808. if (as->media_type == AVMEDIA_TYPE_UNKNOWN && (idx_str[0] == 'v' || idx_str[0] == 'a')) {
  809. enum AVMediaType type = (idx_str[0] == 'v') ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO;
  810. av_log(s, AV_LOG_DEBUG, "Map all streams of type %s\n", idx_str);
  811. for (i = 0; i < s->nb_streams; i++) {
  812. if (s->streams[i]->codecpar->codec_type != type)
  813. continue;
  814. as->media_type = s->streams[i]->codecpar->codec_type;
  815. if ((ret = adaptation_set_add_stream(s, c->nb_as, i)) < 0)
  816. return ret;
  817. }
  818. } else { // select single stream
  819. i = strtol(idx_str, &end_str, 10);
  820. if (idx_str == end_str || i < 0 || i >= s->nb_streams) {
  821. av_log(s, AV_LOG_ERROR, "Selected stream \"%s\" not found!\n", idx_str);
  822. return AVERROR(EINVAL);
  823. }
  824. av_log(s, AV_LOG_DEBUG, "Map stream %d\n", i);
  825. if (as->media_type == AVMEDIA_TYPE_UNKNOWN) {
  826. as->media_type = s->streams[i]->codecpar->codec_type;
  827. }
  828. if ((ret = adaptation_set_add_stream(s, c->nb_as, i)) < 0)
  829. return ret;
  830. }
  831. if (*p == ' ')
  832. state = new_set;
  833. if (*p)
  834. p++;
  835. } else {
  836. return AVERROR(EINVAL);
  837. }
  838. }
  839. end:
  840. // check for unassigned streams
  841. for (i = 0; i < s->nb_streams; i++) {
  842. OutputStream *os = &c->streams[i];
  843. if (!os->as_idx) {
  844. av_log(s, AV_LOG_ERROR, "Stream %d is not mapped to an AdaptationSet\n", i);
  845. return AVERROR(EINVAL);
  846. }
  847. }
  848. return 0;
  849. }
  850. static int write_manifest(AVFormatContext *s, int final)
  851. {
  852. DASHContext *c = s->priv_data;
  853. AVIOContext *out;
  854. char temp_filename[1024];
  855. int ret, i;
  856. const char *proto = avio_find_protocol_name(s->url);
  857. int use_rename = proto && !strcmp(proto, "file");
  858. static unsigned int warned_non_file = 0;
  859. AVDictionaryEntry *title = av_dict_get(s->metadata, "title", NULL, 0);
  860. AVDictionary *opts = NULL;
  861. if (!use_rename && !warned_non_file++)
  862. av_log(s, AV_LOG_ERROR, "Cannot use rename on non file protocol, this may lead to races and temporary partial files\n");
  863. snprintf(temp_filename, sizeof(temp_filename), use_rename ? "%s.tmp" : "%s", s->url);
  864. set_http_options(&opts, c);
  865. ret = dashenc_io_open(s, &c->mpd_out, temp_filename, &opts);
  866. av_dict_free(&opts);
  867. if (ret < 0) {
  868. return handle_io_open_error(s, ret, temp_filename);
  869. }
  870. out = c->mpd_out;
  871. avio_printf(out, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
  872. avio_printf(out, "<MPD xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
  873. "\txmlns=\"urn:mpeg:dash:schema:mpd:2011\"\n"
  874. "\txmlns:xlink=\"http://www.w3.org/1999/xlink\"\n"
  875. "\txsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd\"\n"
  876. "\tprofiles=\"urn:mpeg:dash:profile:isoff-live:2011\"\n"
  877. "\ttype=\"%s\"\n", final ? "static" : "dynamic");
  878. if (final) {
  879. avio_printf(out, "\tmediaPresentationDuration=\"");
  880. write_time(out, c->total_duration);
  881. avio_printf(out, "\"\n");
  882. } else {
  883. int64_t update_period = c->last_duration / AV_TIME_BASE;
  884. char now_str[100];
  885. if (c->use_template && !c->use_timeline)
  886. update_period = 500;
  887. avio_printf(out, "\tminimumUpdatePeriod=\"PT%"PRId64"S\"\n", update_period);
  888. avio_printf(out, "\tsuggestedPresentationDelay=\"PT%"PRId64"S\"\n", c->last_duration / AV_TIME_BASE);
  889. if (c->availability_start_time[0])
  890. avio_printf(out, "\tavailabilityStartTime=\"%s\"\n", c->availability_start_time);
  891. format_date_now(now_str, sizeof(now_str));
  892. if (now_str[0])
  893. avio_printf(out, "\tpublishTime=\"%s\"\n", now_str);
  894. if (c->window_size && c->use_template) {
  895. avio_printf(out, "\ttimeShiftBufferDepth=\"");
  896. write_time(out, c->last_duration * c->window_size);
  897. avio_printf(out, "\"\n");
  898. }
  899. }
  900. avio_printf(out, "\tminBufferTime=\"");
  901. write_time(out, c->last_duration * 2);
  902. avio_printf(out, "\">\n");
  903. avio_printf(out, "\t<ProgramInformation>\n");
  904. if (title) {
  905. char *escaped = xmlescape(title->value);
  906. avio_printf(out, "\t\t<Title>%s</Title>\n", escaped);
  907. av_free(escaped);
  908. }
  909. avio_printf(out, "\t</ProgramInformation>\n");
  910. if (c->window_size && s->nb_streams > 0 && c->streams[0].nb_segments > 0 && !c->use_template) {
  911. OutputStream *os = &c->streams[0];
  912. int start_index = FFMAX(os->nb_segments - c->window_size, 0);
  913. int64_t start_time = av_rescale_q(os->segments[start_index]->time, s->streams[0]->time_base, AV_TIME_BASE_Q);
  914. avio_printf(out, "\t<Period id=\"0\" start=\"");
  915. write_time(out, start_time);
  916. avio_printf(out, "\">\n");
  917. } else {
  918. avio_printf(out, "\t<Period id=\"0\" start=\"PT0.0S\">\n");
  919. }
  920. for (i = 0; i < c->nb_as; i++) {
  921. if ((ret = write_adaptation_set(s, out, i, final)) < 0)
  922. return ret;
  923. }
  924. avio_printf(out, "\t</Period>\n");
  925. if (c->utc_timing_url)
  926. avio_printf(out, "\t<UTCTiming schemeIdUri=\"urn:mpeg:dash:utc:http-xsdate:2014\" value=\"%s\"/>\n", c->utc_timing_url);
  927. avio_printf(out, "</MPD>\n");
  928. avio_flush(out);
  929. dashenc_io_close(s, &c->mpd_out, temp_filename);
  930. if (use_rename) {
  931. if ((ret = avpriv_io_move(temp_filename, s->url)) < 0)
  932. return ret;
  933. }
  934. if (c->hls_playlist) {
  935. char filename_hls[1024];
  936. const char *audio_group = "A1";
  937. char audio_codec_str[128] = "\0";
  938. int is_default = 1;
  939. int max_audio_bitrate = 0;
  940. // Publish master playlist only the configured rate
  941. if (c->master_playlist_created && (!c->master_publish_rate ||
  942. c->streams[0].segment_index % c->master_publish_rate))
  943. return 0;
  944. if (*c->dirname)
  945. snprintf(filename_hls, sizeof(filename_hls), "%smaster.m3u8", c->dirname);
  946. else
  947. snprintf(filename_hls, sizeof(filename_hls), "master.m3u8");
  948. snprintf(temp_filename, sizeof(temp_filename), use_rename ? "%s.tmp" : "%s", filename_hls);
  949. set_http_options(&opts, c);
  950. ret = dashenc_io_open(s, &c->m3u8_out, temp_filename, &opts);
  951. av_dict_free(&opts);
  952. if (ret < 0) {
  953. return handle_io_open_error(s, ret, temp_filename);
  954. }
  955. ff_hls_write_playlist_version(c->m3u8_out, 7);
  956. for (i = 0; i < s->nb_streams; i++) {
  957. char playlist_file[64];
  958. AVStream *st = s->streams[i];
  959. OutputStream *os = &c->streams[i];
  960. if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
  961. continue;
  962. if (os->segment_type != SEGMENT_TYPE_MP4)
  963. continue;
  964. get_hls_playlist_name(playlist_file, sizeof(playlist_file), NULL, i);
  965. ff_hls_write_audio_rendition(c->m3u8_out, (char *)audio_group,
  966. playlist_file, NULL, i, is_default);
  967. max_audio_bitrate = FFMAX(st->codecpar->bit_rate +
  968. os->muxer_overhead, max_audio_bitrate);
  969. if (!av_strnstr(audio_codec_str, os->codec_str, sizeof(audio_codec_str))) {
  970. if (strlen(audio_codec_str))
  971. av_strlcat(audio_codec_str, ",", sizeof(audio_codec_str));
  972. av_strlcat(audio_codec_str, os->codec_str, sizeof(audio_codec_str));
  973. }
  974. is_default = 0;
  975. }
  976. for (i = 0; i < s->nb_streams; i++) {
  977. char playlist_file[64];
  978. char codec_str[128];
  979. AVStream *st = s->streams[i];
  980. OutputStream *os = &c->streams[i];
  981. char *agroup = NULL;
  982. char *codec_str_ptr = NULL;
  983. int stream_bitrate = st->codecpar->bit_rate + os->muxer_overhead;
  984. if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
  985. continue;
  986. if (os->segment_type != SEGMENT_TYPE_MP4)
  987. continue;
  988. av_strlcpy(codec_str, os->codec_str, sizeof(codec_str));
  989. if (max_audio_bitrate) {
  990. agroup = (char *)audio_group;
  991. stream_bitrate += max_audio_bitrate;
  992. av_strlcat(codec_str, ",", sizeof(codec_str));
  993. av_strlcat(codec_str, audio_codec_str, sizeof(codec_str));
  994. }
  995. if (st->codecpar->codec_id != AV_CODEC_ID_HEVC) {
  996. codec_str_ptr = codec_str;
  997. }
  998. get_hls_playlist_name(playlist_file, sizeof(playlist_file), NULL, i);
  999. ff_hls_write_stream_info(st, c->m3u8_out, stream_bitrate,
  1000. playlist_file, agroup,
  1001. codec_str_ptr, NULL);
  1002. }
  1003. dashenc_io_close(s, &c->m3u8_out, temp_filename);
  1004. if (use_rename)
  1005. if ((ret = avpriv_io_move(temp_filename, filename_hls)) < 0)
  1006. return ret;
  1007. c->master_playlist_created = 1;
  1008. }
  1009. return 0;
  1010. }
  1011. static int dict_copy_entry(AVDictionary **dst, const AVDictionary *src, const char *key)
  1012. {
  1013. AVDictionaryEntry *entry = av_dict_get(src, key, NULL, 0);
  1014. if (entry)
  1015. av_dict_set(dst, key, entry->value, AV_DICT_DONT_OVERWRITE);
  1016. return 0;
  1017. }
  1018. static int dash_init(AVFormatContext *s)
  1019. {
  1020. DASHContext *c = s->priv_data;
  1021. int ret = 0, i;
  1022. char *ptr;
  1023. char basename[1024];
  1024. c->nr_of_streams_to_flush = 0;
  1025. if (c->single_file_name)
  1026. c->single_file = 1;
  1027. if (c->single_file)
  1028. c->use_template = 0;
  1029. #if FF_API_DASH_MIN_SEG_DURATION
  1030. if (c->min_seg_duration != 5000000) {
  1031. av_log(s, AV_LOG_WARNING, "The min_seg_duration option is deprecated and will be removed. Please use the -seg_duration\n");
  1032. c->seg_duration = c->min_seg_duration;
  1033. }
  1034. #endif
  1035. if (c->lhls && s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  1036. av_log(s, AV_LOG_ERROR,
  1037. "LHLS is experimental, Please set -strict experimental in order to enable it.\n");
  1038. return AVERROR_EXPERIMENTAL;
  1039. }
  1040. if (c->lhls && !c->streaming) {
  1041. av_log(s, AV_LOG_WARNING, "LHLS option will be ignored as streaming is not enabled\n");
  1042. c->lhls = 0;
  1043. }
  1044. if (c->lhls && !c->hls_playlist) {
  1045. av_log(s, AV_LOG_WARNING, "LHLS option will be ignored as hls_playlist is not enabled\n");
  1046. c->lhls = 0;
  1047. }
  1048. if (c->global_sidx && !c->single_file) {
  1049. av_log(s, AV_LOG_WARNING, "Global SIDX option will be ignored as single_file is not enabled\n");
  1050. c->global_sidx = 0;
  1051. }
  1052. if (c->global_sidx && c->streaming) {
  1053. av_log(s, AV_LOG_WARNING, "Global SIDX option will be ignored as streaming is enabled\n");
  1054. c->global_sidx = 0;
  1055. }
  1056. av_strlcpy(c->dirname, s->url, sizeof(c->dirname));
  1057. ptr = strrchr(c->dirname, '/');
  1058. if (ptr) {
  1059. av_strlcpy(basename, &ptr[1], sizeof(basename));
  1060. ptr[1] = '\0';
  1061. } else {
  1062. c->dirname[0] = '\0';
  1063. av_strlcpy(basename, s->url, sizeof(basename));
  1064. }
  1065. ptr = strrchr(basename, '.');
  1066. if (ptr)
  1067. *ptr = '\0';
  1068. c->streams = av_mallocz(sizeof(*c->streams) * s->nb_streams);
  1069. if (!c->streams)
  1070. return AVERROR(ENOMEM);
  1071. if ((ret = parse_adaptation_sets(s)) < 0)
  1072. return ret;
  1073. if ((ret = init_segment_types(s)) < 0)
  1074. return ret;
  1075. for (i = 0; i < s->nb_streams; i++) {
  1076. OutputStream *os = &c->streams[i];
  1077. AdaptationSet *as = &c->as[os->as_idx - 1];
  1078. AVFormatContext *ctx;
  1079. AVStream *st;
  1080. AVDictionary *opts = NULL;
  1081. char filename[1024];
  1082. os->bit_rate = s->streams[i]->codecpar->bit_rate;
  1083. if (!os->bit_rate) {
  1084. int level = s->strict_std_compliance >= FF_COMPLIANCE_STRICT ?
  1085. AV_LOG_ERROR : AV_LOG_WARNING;
  1086. av_log(s, level, "No bit rate set for stream %d\n", i);
  1087. if (s->strict_std_compliance >= FF_COMPLIANCE_STRICT)
  1088. return AVERROR(EINVAL);
  1089. }
  1090. // copy AdaptationSet language and role from stream metadata
  1091. dict_copy_entry(&as->metadata, s->streams[i]->metadata, "language");
  1092. dict_copy_entry(&as->metadata, s->streams[i]->metadata, "role");
  1093. if (c->init_seg_name) {
  1094. os->init_seg_name = av_strireplace(c->init_seg_name, "$ext$", os->extension_name);
  1095. if (!os->init_seg_name)
  1096. return AVERROR(ENOMEM);
  1097. }
  1098. if (c->media_seg_name) {
  1099. os->media_seg_name = av_strireplace(c->media_seg_name, "$ext$", os->extension_name);
  1100. if (!os->media_seg_name)
  1101. return AVERROR(ENOMEM);
  1102. }
  1103. if (c->single_file_name) {
  1104. os->single_file_name = av_strireplace(c->single_file_name, "$ext$", os->extension_name);
  1105. if (!os->single_file_name)
  1106. return AVERROR(ENOMEM);
  1107. }
  1108. if (os->segment_type == SEGMENT_TYPE_WEBM) {
  1109. if ((!c->single_file && check_file_extension(os->init_seg_name, os->format_name) != 0) ||
  1110. (!c->single_file && check_file_extension(os->media_seg_name, os->format_name) != 0) ||
  1111. (c->single_file && check_file_extension(os->single_file_name, os->format_name) != 0)) {
  1112. av_log(s, AV_LOG_WARNING,
  1113. "One or many segment file names doesn't end with .webm. "
  1114. "Override -init_seg_name and/or -media_seg_name and/or "
  1115. "-single_file_name to end with the extension .webm\n");
  1116. }
  1117. if (c->streaming) {
  1118. // Streaming not supported as matroskaenc buffers internally before writing the output
  1119. av_log(s, AV_LOG_WARNING, "One or more streams in WebM output format. Streaming option will be ignored\n");
  1120. c->streaming = 0;
  1121. }
  1122. }
  1123. os->ctx = ctx = avformat_alloc_context();
  1124. if (!ctx)
  1125. return AVERROR(ENOMEM);
  1126. ctx->oformat = av_guess_format(os->format_name, NULL, NULL);
  1127. if (!ctx->oformat)
  1128. return AVERROR_MUXER_NOT_FOUND;
  1129. ctx->interrupt_callback = s->interrupt_callback;
  1130. ctx->opaque = s->opaque;
  1131. ctx->io_close = s->io_close;
  1132. ctx->io_open = s->io_open;
  1133. ctx->strict_std_compliance = s->strict_std_compliance;
  1134. if (!(st = avformat_new_stream(ctx, NULL)))
  1135. return AVERROR(ENOMEM);
  1136. avcodec_parameters_copy(st->codecpar, s->streams[i]->codecpar);
  1137. st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio;
  1138. st->time_base = s->streams[i]->time_base;
  1139. st->avg_frame_rate = s->streams[i]->avg_frame_rate;
  1140. ctx->avoid_negative_ts = s->avoid_negative_ts;
  1141. ctx->flags = s->flags;
  1142. if (c->single_file) {
  1143. if (os->single_file_name)
  1144. ff_dash_fill_tmpl_params(os->initfile, sizeof(os->initfile), os->single_file_name, i, 0, os->bit_rate, 0);
  1145. else
  1146. snprintf(os->initfile, sizeof(os->initfile), "%s-stream%d.%s", basename, i, os->format_name);
  1147. } else {
  1148. ff_dash_fill_tmpl_params(os->initfile, sizeof(os->initfile), os->init_seg_name, i, 0, os->bit_rate, 0);
  1149. }
  1150. snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->initfile);
  1151. set_http_options(&opts, c);
  1152. if (!c->single_file) {
  1153. if ((ret = avio_open_dyn_buf(&ctx->pb)) < 0)
  1154. return ret;
  1155. ret = s->io_open(s, &os->out, filename, AVIO_FLAG_WRITE, &opts);
  1156. } else {
  1157. ctx->url = av_strdup(filename);
  1158. ret = avio_open2(&ctx->pb, filename, AVIO_FLAG_WRITE, NULL, &opts);
  1159. }
  1160. av_dict_free(&opts);
  1161. if (ret < 0)
  1162. return ret;
  1163. os->init_start_pos = 0;
  1164. av_dict_copy(&opts, c->format_options, 0);
  1165. if (os->segment_type == SEGMENT_TYPE_MP4) {
  1166. if (c->streaming)
  1167. // frag_every_frame : Allows lower latency streaming
  1168. // skip_sidx : Reduce bitrate overhead
  1169. // skip_trailer : Avoids growing memory usage with time
  1170. av_dict_set(&opts, "movflags", "frag_every_frame+dash+delay_moov+skip_sidx+skip_trailer", 0);
  1171. else {
  1172. if (c->global_sidx)
  1173. av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov+global_sidx+skip_trailer", 0);
  1174. else
  1175. av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov+skip_trailer", 0);
  1176. }
  1177. } else {
  1178. av_dict_set_int(&opts, "cluster_time_limit", c->seg_duration / 1000, 0);
  1179. av_dict_set_int(&opts, "cluster_size_limit", 5 * 1024 * 1024, 0); // set a large cluster size limit
  1180. av_dict_set_int(&opts, "dash", 1, 0);
  1181. av_dict_set_int(&opts, "dash_track_number", i + 1, 0);
  1182. av_dict_set_int(&opts, "live", 1, 0);
  1183. }
  1184. ret = avformat_init_output(ctx, &opts);
  1185. av_dict_free(&opts);
  1186. if (ret < 0)
  1187. return ret;
  1188. os->ctx_inited = 1;
  1189. avio_flush(ctx->pb);
  1190. av_log(s, AV_LOG_VERBOSE, "Representation %d init segment will be written to: %s\n", i, filename);
  1191. s->streams[i]->time_base = st->time_base;
  1192. // If the muxer wants to shift timestamps, request to have them shifted
  1193. // already before being handed to this muxer, so we don't have mismatches
  1194. // between the MPD and the actual segments.
  1195. s->avoid_negative_ts = ctx->avoid_negative_ts;
  1196. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  1197. AVRational avg_frame_rate = s->streams[i]->avg_frame_rate;
  1198. if (avg_frame_rate.num > 0) {
  1199. if (av_cmp_q(avg_frame_rate, as->min_frame_rate) < 0)
  1200. as->min_frame_rate = avg_frame_rate;
  1201. if (av_cmp_q(as->max_frame_rate, avg_frame_rate) < 0)
  1202. as->max_frame_rate = avg_frame_rate;
  1203. } else {
  1204. as->ambiguous_frame_rate = 1;
  1205. }
  1206. c->has_video = 1;
  1207. }
  1208. set_codec_str(s, st->codecpar, &st->avg_frame_rate, os->codec_str,
  1209. sizeof(os->codec_str));
  1210. os->first_pts = AV_NOPTS_VALUE;
  1211. os->max_pts = AV_NOPTS_VALUE;
  1212. os->last_dts = AV_NOPTS_VALUE;
  1213. os->segment_index = 1;
  1214. if (s->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  1215. c->nr_of_streams_to_flush++;
  1216. }
  1217. if (!c->has_video && c->seg_duration <= 0) {
  1218. av_log(s, AV_LOG_WARNING, "no video stream and no seg duration set\n");
  1219. return AVERROR(EINVAL);
  1220. }
  1221. c->nr_of_streams_flushed = 0;
  1222. return 0;
  1223. }
  1224. static int dash_write_header(AVFormatContext *s)
  1225. {
  1226. DASHContext *c = s->priv_data;
  1227. int i, ret;
  1228. for (i = 0; i < s->nb_streams; i++) {
  1229. OutputStream *os = &c->streams[i];
  1230. if ((ret = avformat_write_header(os->ctx, NULL)) < 0)
  1231. return ret;
  1232. // Flush init segment
  1233. // Only for WebM segment, since for mp4 delay_moov is set and
  1234. // the init segment is thus flushed after the first packets.
  1235. if (os->segment_type == SEGMENT_TYPE_WEBM &&
  1236. (ret = flush_init_segment(s, os)) < 0)
  1237. return ret;
  1238. }
  1239. return ret;
  1240. }
  1241. static int add_segment(OutputStream *os, const char *file,
  1242. int64_t time, int64_t duration,
  1243. int64_t start_pos, int64_t range_length,
  1244. int64_t index_length, int next_exp_index)
  1245. {
  1246. int err;
  1247. Segment *seg;
  1248. if (os->nb_segments >= os->segments_size) {
  1249. os->segments_size = (os->segments_size + 1) * 2;
  1250. if ((err = av_reallocp(&os->segments, sizeof(*os->segments) *
  1251. os->segments_size)) < 0) {
  1252. os->segments_size = 0;
  1253. os->nb_segments = 0;
  1254. return err;
  1255. }
  1256. }
  1257. seg = av_mallocz(sizeof(*seg));
  1258. if (!seg)
  1259. return AVERROR(ENOMEM);
  1260. av_strlcpy(seg->file, file, sizeof(seg->file));
  1261. seg->time = time;
  1262. seg->duration = duration;
  1263. if (seg->time < 0) { // If pts<0, it is expected to be cut away with an edit list
  1264. seg->duration += seg->time;
  1265. seg->time = 0;
  1266. }
  1267. seg->start_pos = start_pos;
  1268. seg->range_length = range_length;
  1269. seg->index_length = index_length;
  1270. os->segments[os->nb_segments++] = seg;
  1271. os->segment_index++;
  1272. //correcting the segment index if it has fallen behind the expected value
  1273. if (os->segment_index < next_exp_index) {
  1274. av_log(NULL, AV_LOG_WARNING, "Correcting the segment index after file %s: current=%d corrected=%d\n",
  1275. file, os->segment_index, next_exp_index);
  1276. os->segment_index = next_exp_index;
  1277. }
  1278. return 0;
  1279. }
  1280. static void write_styp(AVIOContext *pb)
  1281. {
  1282. avio_wb32(pb, 24);
  1283. ffio_wfourcc(pb, "styp");
  1284. ffio_wfourcc(pb, "msdh");
  1285. avio_wb32(pb, 0); /* minor */
  1286. ffio_wfourcc(pb, "msdh");
  1287. ffio_wfourcc(pb, "msix");
  1288. }
  1289. static void find_index_range(AVFormatContext *s, const char *full_path,
  1290. int64_t pos, int *index_length)
  1291. {
  1292. uint8_t buf[8];
  1293. AVIOContext *pb;
  1294. int ret;
  1295. ret = s->io_open(s, &pb, full_path, AVIO_FLAG_READ, NULL);
  1296. if (ret < 0)
  1297. return;
  1298. if (avio_seek(pb, pos, SEEK_SET) != pos) {
  1299. ff_format_io_close(s, &pb);
  1300. return;
  1301. }
  1302. ret = avio_read(pb, buf, 8);
  1303. ff_format_io_close(s, &pb);
  1304. if (ret < 8)
  1305. return;
  1306. if (AV_RL32(&buf[4]) != MKTAG('s', 'i', 'd', 'x'))
  1307. return;
  1308. *index_length = AV_RB32(&buf[0]);
  1309. }
  1310. static int update_stream_extradata(AVFormatContext *s, OutputStream *os,
  1311. AVPacket *pkt, AVRational *frame_rate)
  1312. {
  1313. AVCodecParameters *par = os->ctx->streams[0]->codecpar;
  1314. uint8_t *extradata;
  1315. int ret, extradata_size;
  1316. if (par->extradata_size)
  1317. return 0;
  1318. extradata = av_packet_get_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA, &extradata_size);
  1319. if (!extradata_size)
  1320. return 0;
  1321. ret = ff_alloc_extradata(par, extradata_size);
  1322. if (ret < 0)
  1323. return ret;
  1324. memcpy(par->extradata, extradata, extradata_size);
  1325. set_codec_str(s, par, frame_rate, os->codec_str, sizeof(os->codec_str));
  1326. return 0;
  1327. }
  1328. static void dashenc_delete_file(AVFormatContext *s, char *filename) {
  1329. DASHContext *c = s->priv_data;
  1330. int http_base_proto = ff_is_http_proto(filename);
  1331. if (http_base_proto) {
  1332. AVIOContext *out = NULL;
  1333. AVDictionary *http_opts = NULL;
  1334. set_http_options(&http_opts, c);
  1335. av_dict_set(&http_opts, "method", "DELETE", 0);
  1336. if (dashenc_io_open(s, &out, filename, &http_opts) < 0) {
  1337. av_log(s, AV_LOG_ERROR, "failed to delete %s\n", filename);
  1338. }
  1339. av_dict_free(&http_opts);
  1340. ff_format_io_close(s, &out);
  1341. } else {
  1342. int res = avpriv_io_delete(filename);
  1343. if (res < 0) {
  1344. char errbuf[AV_ERROR_MAX_STRING_SIZE];
  1345. av_strerror(res, errbuf, sizeof(errbuf));
  1346. av_log(s, (res == AVERROR(ENOENT) ? AV_LOG_WARNING : AV_LOG_ERROR), "failed to delete %s: %s\n", filename, errbuf);
  1347. }
  1348. }
  1349. }
  1350. static int dashenc_delete_segment_file(AVFormatContext *s, const char* file)
  1351. {
  1352. DASHContext *c = s->priv_data;
  1353. size_t dirname_len, file_len;
  1354. char filename[1024];
  1355. dirname_len = strlen(c->dirname);
  1356. if (dirname_len >= sizeof(filename)) {
  1357. av_log(s, AV_LOG_WARNING, "Cannot delete segments as the directory path is too long: %"PRIu64" characters: %s\n",
  1358. (uint64_t)dirname_len, c->dirname);
  1359. return AVERROR(ENAMETOOLONG);
  1360. }
  1361. memcpy(filename, c->dirname, dirname_len);
  1362. file_len = strlen(file);
  1363. if ((dirname_len + file_len) >= sizeof(filename)) {
  1364. av_log(s, AV_LOG_WARNING, "Cannot delete segments as the path is too long: %"PRIu64" characters: %s%s\n",
  1365. (uint64_t)(dirname_len + file_len), c->dirname, file);
  1366. return AVERROR(ENAMETOOLONG);
  1367. }
  1368. memcpy(filename + dirname_len, file, file_len + 1); // include the terminating zero
  1369. dashenc_delete_file(s, filename);
  1370. return 0;
  1371. }
  1372. static inline void dashenc_delete_media_segments(AVFormatContext *s, OutputStream *os, int remove_count)
  1373. {
  1374. for (int i = 0; i < remove_count; ++i) {
  1375. dashenc_delete_segment_file(s, os->segments[i]->file);
  1376. // Delete the segment regardless of whether the file was successfully deleted
  1377. av_free(os->segments[i]);
  1378. }
  1379. os->nb_segments -= remove_count;
  1380. memmove(os->segments, os->segments + remove_count, os->nb_segments * sizeof(*os->segments));
  1381. }
  1382. static int dash_flush(AVFormatContext *s, int final, int stream)
  1383. {
  1384. DASHContext *c = s->priv_data;
  1385. int i, ret = 0;
  1386. const char *proto = avio_find_protocol_name(s->url);
  1387. int use_rename = proto && !strcmp(proto, "file");
  1388. int cur_flush_segment_index = 0, next_exp_index = -1;
  1389. if (stream >= 0) {
  1390. cur_flush_segment_index = c->streams[stream].segment_index;
  1391. //finding the next segment's expected index, based on the current pts value
  1392. if (c->use_template && !c->use_timeline && c->index_correction &&
  1393. c->streams[stream].last_pts != AV_NOPTS_VALUE &&
  1394. c->streams[stream].first_pts != AV_NOPTS_VALUE) {
  1395. int64_t pts_diff = av_rescale_q(c->streams[stream].last_pts -
  1396. c->streams[stream].first_pts,
  1397. s->streams[stream]->time_base,
  1398. AV_TIME_BASE_Q);
  1399. next_exp_index = (pts_diff / c->seg_duration) + 1;
  1400. }
  1401. }
  1402. for (i = 0; i < s->nb_streams; i++) {
  1403. OutputStream *os = &c->streams[i];
  1404. AVStream *st = s->streams[i];
  1405. int range_length, index_length = 0;
  1406. if (!os->packets_written)
  1407. continue;
  1408. // Flush the single stream that got a keyframe right now.
  1409. // Flush all audio streams as well, in sync with video keyframes,
  1410. // but not the other video streams.
  1411. if (stream >= 0 && i != stream) {
  1412. if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
  1413. continue;
  1414. // Make sure we don't flush audio streams multiple times, when
  1415. // all video streams are flushed one at a time.
  1416. if (c->has_video && os->segment_index > cur_flush_segment_index)
  1417. continue;
  1418. }
  1419. if (!c->single_file) {
  1420. if (os->segment_type == SEGMENT_TYPE_MP4 && !os->written_len)
  1421. write_styp(os->ctx->pb);
  1422. } else {
  1423. snprintf(os->full_path, sizeof(os->full_path), "%s%s", c->dirname, os->initfile);
  1424. }
  1425. ret = flush_dynbuf(c, os, &range_length);
  1426. if (ret < 0)
  1427. break;
  1428. os->packets_written = 0;
  1429. if (c->single_file) {
  1430. find_index_range(s, os->full_path, os->pos, &index_length);
  1431. } else {
  1432. dashenc_io_close(s, &os->out, os->temp_path);
  1433. if (use_rename) {
  1434. ret = avpriv_io_move(os->temp_path, os->full_path);
  1435. if (ret < 0)
  1436. break;
  1437. }
  1438. }
  1439. if (!os->muxer_overhead)
  1440. os->muxer_overhead = ((int64_t) (range_length - os->total_pkt_size) *
  1441. 8 * AV_TIME_BASE) /
  1442. av_rescale_q(os->max_pts - os->start_pts,
  1443. st->time_base, AV_TIME_BASE_Q);
  1444. os->total_pkt_size = 0;
  1445. if (!os->bit_rate) {
  1446. // calculate average bitrate of first segment
  1447. int64_t bitrate = (int64_t) range_length * 8 * AV_TIME_BASE / av_rescale_q(os->max_pts - os->start_pts,
  1448. st->time_base,
  1449. AV_TIME_BASE_Q);
  1450. if (bitrate >= 0)
  1451. os->bit_rate = bitrate;
  1452. }
  1453. add_segment(os, os->filename, os->start_pts, os->max_pts - os->start_pts, os->pos, range_length, index_length, next_exp_index);
  1454. av_log(s, AV_LOG_VERBOSE, "Representation %d media segment %d written to: %s\n", i, os->segment_index, os->full_path);
  1455. os->pos += range_length;
  1456. }
  1457. if (c->window_size) {
  1458. for (i = 0; i < s->nb_streams; i++) {
  1459. OutputStream *os = &c->streams[i];
  1460. int remove_count = os->nb_segments - c->window_size - c->extra_window_size;
  1461. if (remove_count > 0)
  1462. dashenc_delete_media_segments(s, os, remove_count);
  1463. }
  1464. }
  1465. if (final) {
  1466. for (i = 0; i < s->nb_streams; i++) {
  1467. OutputStream *os = &c->streams[i];
  1468. if (os->ctx && os->ctx_inited) {
  1469. int64_t file_size = avio_tell(os->ctx->pb);
  1470. av_write_trailer(os->ctx);
  1471. if (c->global_sidx) {
  1472. int j, start_index, start_number;
  1473. int64_t sidx_size = avio_tell(os->ctx->pb) - file_size;
  1474. get_start_index_number(os, c, &start_index, &start_number);
  1475. if (start_index >= os->nb_segments ||
  1476. os->segment_type != SEGMENT_TYPE_MP4)
  1477. continue;
  1478. os->init_range_length += sidx_size;
  1479. for (j = start_index; j < os->nb_segments; j++) {
  1480. Segment *seg = os->segments[j];
  1481. seg->start_pos += sidx_size;
  1482. }
  1483. }
  1484. }
  1485. }
  1486. }
  1487. if (ret >= 0) {
  1488. if (c->has_video && !final) {
  1489. c->nr_of_streams_flushed++;
  1490. if (c->nr_of_streams_flushed != c->nr_of_streams_to_flush)
  1491. return ret;
  1492. c->nr_of_streams_flushed = 0;
  1493. }
  1494. ret = write_manifest(s, final);
  1495. }
  1496. return ret;
  1497. }
  1498. static int dash_write_packet(AVFormatContext *s, AVPacket *pkt)
  1499. {
  1500. DASHContext *c = s->priv_data;
  1501. AVStream *st = s->streams[pkt->stream_index];
  1502. OutputStream *os = &c->streams[pkt->stream_index];
  1503. int64_t seg_end_duration, elapsed_duration;
  1504. int ret;
  1505. ret = update_stream_extradata(s, os, pkt, &st->avg_frame_rate);
  1506. if (ret < 0)
  1507. return ret;
  1508. // Fill in a heuristic guess of the packet duration, if none is available.
  1509. // The mp4 muxer will do something similar (for the last packet in a fragment)
  1510. // if nothing is set (setting it for the other packets doesn't hurt).
  1511. // By setting a nonzero duration here, we can be sure that the mp4 muxer won't
  1512. // invoke its heuristic (this doesn't have to be identical to that algorithm),
  1513. // so that we know the exact timestamps of fragments.
  1514. if (!pkt->duration && os->last_dts != AV_NOPTS_VALUE)
  1515. pkt->duration = pkt->dts - os->last_dts;
  1516. os->last_dts = pkt->dts;
  1517. // If forcing the stream to start at 0, the mp4 muxer will set the start
  1518. // timestamps to 0. Do the same here, to avoid mismatches in duration/timestamps.
  1519. if (os->first_pts == AV_NOPTS_VALUE &&
  1520. s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) {
  1521. pkt->pts -= pkt->dts;
  1522. pkt->dts = 0;
  1523. }
  1524. if (os->first_pts == AV_NOPTS_VALUE)
  1525. os->first_pts = pkt->pts;
  1526. os->last_pts = pkt->pts;
  1527. if (!c->availability_start_time[0]) {
  1528. int64_t start_time_us = av_gettime();
  1529. c->start_time_s = start_time_us / 1000000;
  1530. format_date_now(c->availability_start_time,
  1531. sizeof(c->availability_start_time));
  1532. }
  1533. if (!os->availability_time_offset && pkt->duration) {
  1534. int64_t frame_duration = av_rescale_q(pkt->duration, st->time_base,
  1535. AV_TIME_BASE_Q);
  1536. os->availability_time_offset = ((double) c->seg_duration -
  1537. frame_duration) / AV_TIME_BASE;
  1538. }
  1539. if (c->use_template && !c->use_timeline) {
  1540. elapsed_duration = pkt->pts - os->first_pts;
  1541. seg_end_duration = (int64_t) os->segment_index * c->seg_duration;
  1542. } else {
  1543. elapsed_duration = pkt->pts - os->start_pts;
  1544. seg_end_duration = c->seg_duration;
  1545. }
  1546. if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
  1547. pkt->flags & AV_PKT_FLAG_KEY && os->packets_written &&
  1548. av_compare_ts(elapsed_duration, st->time_base,
  1549. seg_end_duration, AV_TIME_BASE_Q) >= 0) {
  1550. int64_t prev_duration = c->last_duration;
  1551. c->last_duration = av_rescale_q(pkt->pts - os->start_pts,
  1552. st->time_base,
  1553. AV_TIME_BASE_Q);
  1554. c->total_duration = av_rescale_q(pkt->pts - os->first_pts,
  1555. st->time_base,
  1556. AV_TIME_BASE_Q);
  1557. if ((!c->use_timeline || !c->use_template) && prev_duration) {
  1558. if (c->last_duration < prev_duration*9/10 ||
  1559. c->last_duration > prev_duration*11/10) {
  1560. av_log(s, AV_LOG_WARNING,
  1561. "Segment durations differ too much, enable use_timeline "
  1562. "and use_template, or keep a stricter keyframe interval\n");
  1563. }
  1564. }
  1565. if ((ret = dash_flush(s, 0, pkt->stream_index)) < 0)
  1566. return ret;
  1567. }
  1568. if (!os->packets_written) {
  1569. // If we wrote a previous segment, adjust the start time of the segment
  1570. // to the end of the previous one (which is the same as the mp4 muxer
  1571. // does). This avoids gaps in the timeline.
  1572. if (os->max_pts != AV_NOPTS_VALUE)
  1573. os->start_pts = os->max_pts;
  1574. else
  1575. os->start_pts = pkt->pts;
  1576. }
  1577. if (os->max_pts == AV_NOPTS_VALUE)
  1578. os->max_pts = pkt->pts + pkt->duration;
  1579. else
  1580. os->max_pts = FFMAX(os->max_pts, pkt->pts + pkt->duration);
  1581. os->packets_written++;
  1582. os->total_pkt_size += pkt->size;
  1583. if ((ret = ff_write_chained(os->ctx, 0, pkt, s, 0)) < 0)
  1584. return ret;
  1585. if (!os->init_range_length)
  1586. flush_init_segment(s, os);
  1587. //open the output context when the first frame of a segment is ready
  1588. if (!c->single_file && os->packets_written == 1) {
  1589. AVDictionary *opts = NULL;
  1590. const char *proto = avio_find_protocol_name(s->url);
  1591. int use_rename = proto && !strcmp(proto, "file");
  1592. os->filename[0] = os->full_path[0] = os->temp_path[0] = '\0';
  1593. ff_dash_fill_tmpl_params(os->filename, sizeof(os->filename),
  1594. os->media_seg_name, pkt->stream_index,
  1595. os->segment_index, os->bit_rate, os->start_pts);
  1596. snprintf(os->full_path, sizeof(os->full_path), "%s%s", c->dirname,
  1597. os->filename);
  1598. snprintf(os->temp_path, sizeof(os->temp_path),
  1599. use_rename ? "%s.tmp" : "%s", os->full_path);
  1600. set_http_options(&opts, c);
  1601. ret = dashenc_io_open(s, &os->out, os->temp_path, &opts);
  1602. av_dict_free(&opts);
  1603. if (ret < 0) {
  1604. return handle_io_open_error(s, ret, os->temp_path);
  1605. }
  1606. if (c->lhls) {
  1607. char *prefetch_url = use_rename ? NULL : os->filename;
  1608. write_hls_media_playlist(os, s, pkt->stream_index, 0, prefetch_url);
  1609. }
  1610. }
  1611. //write out the data immediately in streaming mode
  1612. if (c->streaming && os->segment_type == SEGMENT_TYPE_MP4) {
  1613. int len = 0;
  1614. uint8_t *buf = NULL;
  1615. if (!os->written_len)
  1616. write_styp(os->ctx->pb);
  1617. avio_flush(os->ctx->pb);
  1618. len = avio_get_dyn_buf (os->ctx->pb, &buf);
  1619. if (os->out) {
  1620. avio_write(os->out, buf + os->written_len, len - os->written_len);
  1621. avio_flush(os->out);
  1622. }
  1623. os->written_len = len;
  1624. }
  1625. return ret;
  1626. }
  1627. static int dash_write_trailer(AVFormatContext *s)
  1628. {
  1629. DASHContext *c = s->priv_data;
  1630. int i;
  1631. if (s->nb_streams > 0) {
  1632. OutputStream *os = &c->streams[0];
  1633. // If no segments have been written so far, try to do a crude
  1634. // guess of the segment duration
  1635. if (!c->last_duration)
  1636. c->last_duration = av_rescale_q(os->max_pts - os->start_pts,
  1637. s->streams[0]->time_base,
  1638. AV_TIME_BASE_Q);
  1639. c->total_duration = av_rescale_q(os->max_pts - os->first_pts,
  1640. s->streams[0]->time_base,
  1641. AV_TIME_BASE_Q);
  1642. }
  1643. dash_flush(s, 1, -1);
  1644. if (c->remove_at_exit) {
  1645. for (i = 0; i < s->nb_streams; ++i) {
  1646. OutputStream *os = &c->streams[i];
  1647. dashenc_delete_media_segments(s, os, os->nb_segments);
  1648. dashenc_delete_segment_file(s, os->initfile);
  1649. if (c->hls_playlist && os->segment_type == SEGMENT_TYPE_MP4) {
  1650. char filename[1024];
  1651. get_hls_playlist_name(filename, sizeof(filename), c->dirname, i);
  1652. dashenc_delete_file(s, filename);
  1653. }
  1654. }
  1655. dashenc_delete_file(s, s->url);
  1656. if (c->hls_playlist && c->master_playlist_created) {
  1657. char filename[1024];
  1658. snprintf(filename, sizeof(filename), "%smaster.m3u8", c->dirname);
  1659. dashenc_delete_file(s, filename);
  1660. }
  1661. }
  1662. return 0;
  1663. }
  1664. static int dash_check_bitstream(struct AVFormatContext *s, const AVPacket *avpkt)
  1665. {
  1666. DASHContext *c = s->priv_data;
  1667. OutputStream *os = &c->streams[avpkt->stream_index];
  1668. AVFormatContext *oc = os->ctx;
  1669. if (oc->oformat->check_bitstream) {
  1670. int ret;
  1671. AVPacket pkt = *avpkt;
  1672. pkt.stream_index = 0;
  1673. ret = oc->oformat->check_bitstream(oc, &pkt);
  1674. if (ret == 1) {
  1675. AVStream *st = s->streams[avpkt->stream_index];
  1676. AVStream *ost = oc->streams[0];
  1677. st->internal->bsfcs = ost->internal->bsfcs;
  1678. st->internal->nb_bsfcs = ost->internal->nb_bsfcs;
  1679. ost->internal->bsfcs = NULL;
  1680. ost->internal->nb_bsfcs = 0;
  1681. }
  1682. return ret;
  1683. }
  1684. return 1;
  1685. }
  1686. #define OFFSET(x) offsetof(DASHContext, x)
  1687. #define E AV_OPT_FLAG_ENCODING_PARAM
  1688. static const AVOption options[] = {
  1689. { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  1690. { "window_size", "number of segments kept in the manifest", OFFSET(window_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, E },
  1691. { "extra_window_size", "number of segments kept outside of the manifest before removing from disk", OFFSET(extra_window_size), AV_OPT_TYPE_INT, { .i64 = 5 }, 0, INT_MAX, E },
  1692. #if FF_API_DASH_MIN_SEG_DURATION
  1693. { "min_seg_duration", "minimum segment duration (in microseconds) (will be deprecated)", OFFSET(min_seg_duration), AV_OPT_TYPE_INT, { .i64 = 5000000 }, 0, INT_MAX, E },
  1694. #endif
  1695. { "seg_duration", "segment duration (in seconds, fractional value can be set)", OFFSET(seg_duration), AV_OPT_TYPE_DURATION, { .i64 = 5000000 }, 0, INT_MAX, E },
  1696. { "remove_at_exit", "remove all segments when finished", OFFSET(remove_at_exit), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1697. { "use_template", "Use SegmentTemplate instead of SegmentList", OFFSET(use_template), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
  1698. { "use_timeline", "Use SegmentTimeline in SegmentTemplate", OFFSET(use_timeline), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
  1699. { "single_file", "Store all segments in one file, accessed using byte ranges", OFFSET(single_file), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1700. { "single_file_name", "DASH-templated name to be used for baseURL. Implies storing all segments in one file, accessed using byte ranges", OFFSET(single_file_name), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E },
  1701. { "init_seg_name", "DASH-templated name to used for the initialization segment", OFFSET(init_seg_name), AV_OPT_TYPE_STRING, {.str = "init-stream$RepresentationID$.$ext$"}, 0, 0, E },
  1702. { "media_seg_name", "DASH-templated name to used for the media segments", OFFSET(media_seg_name), AV_OPT_TYPE_STRING, {.str = "chunk-stream$RepresentationID$-$Number%05d$.$ext$"}, 0, 0, E },
  1703. { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, E },
  1704. { "method", "set the HTTP method", OFFSET(method), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
  1705. { "http_user_agent", "override User-Agent field in HTTP header", OFFSET(user_agent), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E},
  1706. { "http_persistent", "Use persistent HTTP connections", OFFSET(http_persistent), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, E },
  1707. { "hls_playlist", "Generate HLS playlist files(master.m3u8, media_%d.m3u8)", OFFSET(hls_playlist), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1708. { "streaming", "Enable/Disable streaming mode of output. Each frame will be moof fragment", OFFSET(streaming), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1709. { "timeout", "set timeout for socket I/O operations", OFFSET(timeout), AV_OPT_TYPE_DURATION, { .i64 = -1 }, -1, INT_MAX, .flags = E },
  1710. { "index_correction", "Enable/Disable segment index correction logic", OFFSET(index_correction), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1711. { "format_options","set list of options for the container format (mp4/webm) used for dash", OFFSET(format_options), AV_OPT_TYPE_DICT, {.str = NULL}, 0, 0, E},
  1712. { "global_sidx", "Write global SIDX atom. Applicable only for single file, mp4 output, non-streaming mode", OFFSET(global_sidx), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1713. { "dash_segment_type", "set dash segment files type", OFFSET(segment_type_option), AV_OPT_TYPE_INT, {.i64 = SEGMENT_TYPE_AUTO }, 0, SEGMENT_TYPE_NB - 1, E, "segment_type"},
  1714. { "auto", "select segment file format based on codec", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_AUTO }, 0, UINT_MAX, E, "segment_type"},
  1715. { "mp4", "make segment file in ISOBMFF format", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_MP4 }, 0, UINT_MAX, E, "segment_type"},
  1716. { "webm", "make segment file in WebM format", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_WEBM }, 0, UINT_MAX, E, "segment_type"},
  1717. { "ignore_io_errors", "Ignore IO errors during open and write. Useful for long-duration runs with network output", OFFSET(ignore_io_errors), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1718. { "lhls", "Enable Low-latency HLS(Experimental). Adds #EXT-X-PREFETCH tag with current segment's URI", OFFSET(lhls), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1719. { "master_m3u8_publish_rate", "Publish master playlist every after this many segment intervals", OFFSET(master_publish_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, UINT_MAX, E},
  1720. { NULL },
  1721. };
  1722. static const AVClass dash_class = {
  1723. .class_name = "dash muxer",
  1724. .item_name = av_default_item_name,
  1725. .option = options,
  1726. .version = LIBAVUTIL_VERSION_INT,
  1727. };
  1728. AVOutputFormat ff_dash_muxer = {
  1729. .name = "dash",
  1730. .long_name = NULL_IF_CONFIG_SMALL("DASH Muxer"),
  1731. .extensions = "mpd",
  1732. .priv_data_size = sizeof(DASHContext),
  1733. .audio_codec = AV_CODEC_ID_AAC,
  1734. .video_codec = AV_CODEC_ID_H264,
  1735. .flags = AVFMT_GLOBALHEADER | AVFMT_NOFILE | AVFMT_TS_NEGATIVE,
  1736. .init = dash_init,
  1737. .write_header = dash_write_header,
  1738. .write_packet = dash_write_packet,
  1739. .write_trailer = dash_write_trailer,
  1740. .deinit = dash_free,
  1741. .check_bitstream = dash_check_bitstream,
  1742. .priv_class = &dash_class,
  1743. };