You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1934 lines
72KB

  1. /*
  2. * MPEG-DASH ISO BMFF segmenter
  3. * Copyright (c) 2014 Martin Storsjo
  4. * Copyright (c) 2018 Akamai Technologies, Inc.
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "config.h"
  23. #if HAVE_UNISTD_H
  24. #include <unistd.h>
  25. #endif
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/avutil.h"
  28. #include "libavutil/avstring.h"
  29. #include "libavutil/intreadwrite.h"
  30. #include "libavutil/mathematics.h"
  31. #include "libavutil/opt.h"
  32. #include "libavutil/rational.h"
  33. #include "libavutil/time.h"
  34. #include "libavutil/time_internal.h"
  35. #include "avc.h"
  36. #include "avformat.h"
  37. #include "avio_internal.h"
  38. #include "hlsplaylist.h"
  39. #if CONFIG_HTTP_PROTOCOL
  40. #include "http.h"
  41. #endif
  42. #include "internal.h"
  43. #include "isom.h"
  44. #include "os_support.h"
  45. #include "url.h"
  46. #include "vpcc.h"
  47. #include "dash.h"
  48. typedef enum {
  49. SEGMENT_TYPE_AUTO = 0,
  50. SEGMENT_TYPE_MP4,
  51. SEGMENT_TYPE_WEBM,
  52. SEGMENT_TYPE_NB
  53. } SegmentType;
  54. typedef struct Segment {
  55. char file[1024];
  56. int64_t start_pos;
  57. int range_length, index_length;
  58. int64_t time;
  59. double prog_date_time;
  60. int64_t duration;
  61. int n;
  62. } Segment;
  63. typedef struct AdaptationSet {
  64. char id[10];
  65. enum AVMediaType media_type;
  66. AVDictionary *metadata;
  67. AVRational min_frame_rate, max_frame_rate;
  68. int ambiguous_frame_rate;
  69. } AdaptationSet;
  70. typedef struct OutputStream {
  71. AVFormatContext *ctx;
  72. int ctx_inited, as_idx;
  73. AVIOContext *out;
  74. int packets_written;
  75. char initfile[1024];
  76. int64_t init_start_pos, pos;
  77. int init_range_length;
  78. int nb_segments, segments_size, segment_index;
  79. Segment **segments;
  80. int64_t first_pts, start_pts, max_pts;
  81. int64_t last_dts, last_pts;
  82. int bit_rate;
  83. SegmentType segment_type; /* segment type selected for this particular stream */
  84. const char *format_name;
  85. const char *extension_name;
  86. const char *single_file_name; /* file names selected for this particular stream */
  87. const char *init_seg_name;
  88. const char *media_seg_name;
  89. char codec_str[100];
  90. int written_len;
  91. char filename[1024];
  92. char full_path[1024];
  93. char temp_path[1024];
  94. double availability_time_offset;
  95. int total_pkt_size;
  96. int muxer_overhead;
  97. } OutputStream;
  98. typedef struct DASHContext {
  99. const AVClass *class; /* Class for private options. */
  100. char *adaptation_sets;
  101. AdaptationSet *as;
  102. int nb_as;
  103. int window_size;
  104. int extra_window_size;
  105. #if FF_API_DASH_MIN_SEG_DURATION
  106. int min_seg_duration;
  107. #endif
  108. int64_t seg_duration;
  109. int remove_at_exit;
  110. int use_template;
  111. int use_timeline;
  112. int single_file;
  113. OutputStream *streams;
  114. int has_video;
  115. int64_t last_duration;
  116. int64_t total_duration;
  117. char availability_start_time[100];
  118. time_t start_time_s;
  119. char dirname[1024];
  120. const char *single_file_name; /* file names as specified in options */
  121. const char *init_seg_name;
  122. const char *media_seg_name;
  123. const char *utc_timing_url;
  124. const char *method;
  125. const char *user_agent;
  126. int hls_playlist;
  127. int http_persistent;
  128. int master_playlist_created;
  129. AVIOContext *mpd_out;
  130. AVIOContext *m3u8_out;
  131. int streaming;
  132. int64_t timeout;
  133. int index_correction;
  134. char *format_options_str;
  135. int global_sidx;
  136. SegmentType segment_type_option; /* segment type as specified in options */
  137. int ignore_io_errors;
  138. int lhls;
  139. int master_publish_rate;
  140. int nr_of_streams_to_flush;
  141. int nr_of_streams_flushed;
  142. } DASHContext;
  143. static struct codec_string {
  144. int id;
  145. const char *str;
  146. } codecs[] = {
  147. { AV_CODEC_ID_VP8, "vp8" },
  148. { AV_CODEC_ID_VP9, "vp9" },
  149. { AV_CODEC_ID_VORBIS, "vorbis" },
  150. { AV_CODEC_ID_OPUS, "opus" },
  151. { AV_CODEC_ID_FLAC, "flac" },
  152. { 0, NULL }
  153. };
  154. static struct format_string {
  155. SegmentType segment_type;
  156. const char *str;
  157. } formats[] = {
  158. { SEGMENT_TYPE_AUTO, "auto" },
  159. { SEGMENT_TYPE_MP4, "mp4" },
  160. { SEGMENT_TYPE_WEBM, "webm" },
  161. { 0, NULL }
  162. };
  163. static int dashenc_io_open(AVFormatContext *s, AVIOContext **pb, char *filename,
  164. AVDictionary **options) {
  165. DASHContext *c = s->priv_data;
  166. int http_base_proto = filename ? ff_is_http_proto(filename) : 0;
  167. int err = AVERROR_MUXER_NOT_FOUND;
  168. if (!*pb || !http_base_proto || !c->http_persistent) {
  169. err = s->io_open(s, pb, filename, AVIO_FLAG_WRITE, options);
  170. #if CONFIG_HTTP_PROTOCOL
  171. } else {
  172. URLContext *http_url_context = ffio_geturlcontext(*pb);
  173. av_assert0(http_url_context);
  174. err = ff_http_do_new_request(http_url_context, filename);
  175. if (err < 0)
  176. ff_format_io_close(s, pb);
  177. #endif
  178. }
  179. return err;
  180. }
  181. static void dashenc_io_close(AVFormatContext *s, AVIOContext **pb, char *filename) {
  182. DASHContext *c = s->priv_data;
  183. int http_base_proto = filename ? ff_is_http_proto(filename) : 0;
  184. if (!*pb)
  185. return;
  186. if (!http_base_proto || !c->http_persistent) {
  187. ff_format_io_close(s, pb);
  188. #if CONFIG_HTTP_PROTOCOL
  189. } else {
  190. URLContext *http_url_context = ffio_geturlcontext(*pb);
  191. av_assert0(http_url_context);
  192. avio_flush(*pb);
  193. ffurl_shutdown(http_url_context, AVIO_FLAG_WRITE);
  194. #endif
  195. }
  196. }
  197. static const char *get_format_str(SegmentType segment_type) {
  198. int i;
  199. for (i = 0; i < SEGMENT_TYPE_NB; i++)
  200. if (formats[i].segment_type == segment_type)
  201. return formats[i].str;
  202. return NULL;
  203. }
  204. static const char *get_extension_str(SegmentType type, int single_file)
  205. {
  206. switch (type) {
  207. case SEGMENT_TYPE_MP4: return single_file ? "mp4" : "m4s";
  208. case SEGMENT_TYPE_WEBM: return "webm";
  209. default: return NULL;
  210. }
  211. }
  212. static int handle_io_open_error(AVFormatContext *s, int err, char *url) {
  213. DASHContext *c = s->priv_data;
  214. char errbuf[AV_ERROR_MAX_STRING_SIZE];
  215. av_strerror(err, errbuf, sizeof(errbuf));
  216. av_log(s, c->ignore_io_errors ? AV_LOG_WARNING : AV_LOG_ERROR,
  217. "Unable to open %s for writing: %s\n", url, errbuf);
  218. return c->ignore_io_errors ? 0 : err;
  219. }
  220. static inline SegmentType select_segment_type(SegmentType segment_type, enum AVCodecID codec_id)
  221. {
  222. if (segment_type == SEGMENT_TYPE_AUTO) {
  223. if (codec_id == AV_CODEC_ID_OPUS || codec_id == AV_CODEC_ID_VORBIS ||
  224. codec_id == AV_CODEC_ID_VP8 || codec_id == AV_CODEC_ID_VP9) {
  225. segment_type = SEGMENT_TYPE_WEBM;
  226. } else {
  227. segment_type = SEGMENT_TYPE_MP4;
  228. }
  229. }
  230. return segment_type;
  231. }
  232. static int init_segment_types(AVFormatContext *s)
  233. {
  234. DASHContext *c = s->priv_data;
  235. int has_mp4_streams = 0;
  236. for (int i = 0; i < s->nb_streams; ++i) {
  237. OutputStream *os = &c->streams[i];
  238. SegmentType segment_type = select_segment_type(
  239. c->segment_type_option, s->streams[i]->codecpar->codec_id);
  240. os->segment_type = segment_type;
  241. os->format_name = get_format_str(segment_type);
  242. if (!os->format_name) {
  243. av_log(s, AV_LOG_ERROR, "Could not select DASH segment type for stream %d\n", i);
  244. return AVERROR_MUXER_NOT_FOUND;
  245. }
  246. os->extension_name = get_extension_str(segment_type, c->single_file);
  247. if (!os->extension_name) {
  248. av_log(s, AV_LOG_ERROR, "Could not get extension type for stream %d\n", i);
  249. return AVERROR_MUXER_NOT_FOUND;
  250. }
  251. has_mp4_streams |= segment_type == SEGMENT_TYPE_MP4;
  252. }
  253. if (c->hls_playlist && !has_mp4_streams) {
  254. av_log(s, AV_LOG_WARNING, "No mp4 streams, disabling HLS manifest generation\n");
  255. c->hls_playlist = 0;
  256. }
  257. return 0;
  258. }
  259. static int check_file_extension(const char *filename, const char *extension) {
  260. char *dot;
  261. if (!filename || !extension)
  262. return -1;
  263. dot = strrchr(filename, '.');
  264. if (dot && !strcmp(dot + 1, extension))
  265. return 0;
  266. return -1;
  267. }
  268. static void set_vp9_codec_str(AVFormatContext *s, AVCodecParameters *par,
  269. AVRational *frame_rate, char *str, int size) {
  270. VPCC vpcc;
  271. int ret = ff_isom_get_vpcc_features(s, par, frame_rate, &vpcc);
  272. if (ret == 0) {
  273. av_strlcatf(str, size, "vp09.%02d.%02d.%02d",
  274. vpcc.profile, vpcc.level, vpcc.bitdepth);
  275. } else {
  276. // Default to just vp9 in case of error while finding out profile or level
  277. av_log(s, AV_LOG_WARNING, "Could not find VP9 profile and/or level\n");
  278. av_strlcpy(str, "vp9", size);
  279. }
  280. return;
  281. }
  282. static void set_codec_str(AVFormatContext *s, AVCodecParameters *par,
  283. AVRational *frame_rate, char *str, int size)
  284. {
  285. const AVCodecTag *tags[2] = { NULL, NULL };
  286. uint32_t tag;
  287. int i;
  288. // common Webm codecs are not part of RFC 6381
  289. for (i = 0; codecs[i].id; i++)
  290. if (codecs[i].id == par->codec_id) {
  291. if (codecs[i].id == AV_CODEC_ID_VP9) {
  292. set_vp9_codec_str(s, par, frame_rate, str, size);
  293. } else {
  294. av_strlcpy(str, codecs[i].str, size);
  295. }
  296. return;
  297. }
  298. // for codecs part of RFC 6381
  299. if (par->codec_type == AVMEDIA_TYPE_VIDEO)
  300. tags[0] = ff_codec_movvideo_tags;
  301. else if (par->codec_type == AVMEDIA_TYPE_AUDIO)
  302. tags[0] = ff_codec_movaudio_tags;
  303. else
  304. return;
  305. tag = par->codec_tag;
  306. if (!tag)
  307. tag = av_codec_get_tag(tags, par->codec_id);
  308. if (!tag)
  309. return;
  310. if (size < 5)
  311. return;
  312. AV_WL32(str, tag);
  313. str[4] = '\0';
  314. if (!strcmp(str, "mp4a") || !strcmp(str, "mp4v")) {
  315. uint32_t oti;
  316. tags[0] = ff_mp4_obj_type;
  317. oti = av_codec_get_tag(tags, par->codec_id);
  318. if (oti)
  319. av_strlcatf(str, size, ".%02"PRIx32, oti);
  320. else
  321. return;
  322. if (tag == MKTAG('m', 'p', '4', 'a')) {
  323. if (par->extradata_size >= 2) {
  324. int aot = par->extradata[0] >> 3;
  325. if (aot == 31)
  326. aot = ((AV_RB16(par->extradata) >> 5) & 0x3f) + 32;
  327. av_strlcatf(str, size, ".%d", aot);
  328. }
  329. } else if (tag == MKTAG('m', 'p', '4', 'v')) {
  330. // Unimplemented, should output ProfileLevelIndication as a decimal number
  331. av_log(s, AV_LOG_WARNING, "Incomplete RFC 6381 codec string for mp4v\n");
  332. }
  333. } else if (!strcmp(str, "avc1")) {
  334. uint8_t *tmpbuf = NULL;
  335. uint8_t *extradata = par->extradata;
  336. int extradata_size = par->extradata_size;
  337. if (!extradata_size)
  338. return;
  339. if (extradata[0] != 1) {
  340. AVIOContext *pb;
  341. if (avio_open_dyn_buf(&pb) < 0)
  342. return;
  343. if (ff_isom_write_avcc(pb, extradata, extradata_size) < 0) {
  344. ffio_free_dyn_buf(&pb);
  345. return;
  346. }
  347. extradata_size = avio_close_dyn_buf(pb, &extradata);
  348. tmpbuf = extradata;
  349. }
  350. if (extradata_size >= 4)
  351. av_strlcatf(str, size, ".%02x%02x%02x",
  352. extradata[1], extradata[2], extradata[3]);
  353. av_free(tmpbuf);
  354. }
  355. }
  356. static int flush_dynbuf(DASHContext *c, OutputStream *os, int *range_length)
  357. {
  358. uint8_t *buffer;
  359. if (!os->ctx->pb) {
  360. return AVERROR(EINVAL);
  361. }
  362. // flush
  363. av_write_frame(os->ctx, NULL);
  364. avio_flush(os->ctx->pb);
  365. if (!c->single_file) {
  366. // write out to file
  367. *range_length = avio_close_dyn_buf(os->ctx->pb, &buffer);
  368. os->ctx->pb = NULL;
  369. if (os->out)
  370. avio_write(os->out, buffer + os->written_len, *range_length - os->written_len);
  371. os->written_len = 0;
  372. av_free(buffer);
  373. // re-open buffer
  374. return avio_open_dyn_buf(&os->ctx->pb);
  375. } else {
  376. *range_length = avio_tell(os->ctx->pb) - os->pos;
  377. return 0;
  378. }
  379. }
  380. static void set_http_options(AVDictionary **options, DASHContext *c)
  381. {
  382. if (c->method)
  383. av_dict_set(options, "method", c->method, 0);
  384. if (c->user_agent)
  385. av_dict_set(options, "user_agent", c->user_agent, 0);
  386. if (c->http_persistent)
  387. av_dict_set_int(options, "multiple_requests", 1, 0);
  388. if (c->timeout >= 0)
  389. av_dict_set_int(options, "timeout", c->timeout, 0);
  390. }
  391. static void get_hls_playlist_name(char *playlist_name, int string_size,
  392. const char *base_url, int id) {
  393. if (base_url)
  394. snprintf(playlist_name, string_size, "%smedia_%d.m3u8", base_url, id);
  395. else
  396. snprintf(playlist_name, string_size, "media_%d.m3u8", id);
  397. }
  398. static void get_start_index_number(OutputStream *os, DASHContext *c,
  399. int *start_index, int *start_number) {
  400. *start_index = 0;
  401. *start_number = 1;
  402. if (c->window_size) {
  403. *start_index = FFMAX(os->nb_segments - c->window_size, 0);
  404. *start_number = FFMAX(os->segment_index - c->window_size, 1);
  405. }
  406. }
  407. static void write_hls_media_playlist(OutputStream *os, AVFormatContext *s,
  408. int representation_id, int final,
  409. char *prefetch_url) {
  410. DASHContext *c = s->priv_data;
  411. int timescale = os->ctx->streams[0]->time_base.den;
  412. char temp_filename_hls[1024];
  413. char filename_hls[1024];
  414. AVDictionary *http_opts = NULL;
  415. int target_duration = 0;
  416. int ret = 0;
  417. const char *proto = avio_find_protocol_name(c->dirname);
  418. int use_rename = proto && !strcmp(proto, "file");
  419. int i, start_index, start_number;
  420. double prog_date_time = 0;
  421. get_start_index_number(os, c, &start_index, &start_number);
  422. if (!c->hls_playlist || start_index >= os->nb_segments ||
  423. os->segment_type != SEGMENT_TYPE_MP4)
  424. return;
  425. get_hls_playlist_name(filename_hls, sizeof(filename_hls),
  426. c->dirname, representation_id);
  427. snprintf(temp_filename_hls, sizeof(temp_filename_hls), use_rename ? "%s.tmp" : "%s", filename_hls);
  428. set_http_options(&http_opts, c);
  429. ret = dashenc_io_open(s, &c->m3u8_out, temp_filename_hls, &http_opts);
  430. av_dict_free(&http_opts);
  431. if (ret < 0) {
  432. handle_io_open_error(s, ret, temp_filename_hls);
  433. return;
  434. }
  435. for (i = start_index; i < os->nb_segments; i++) {
  436. Segment *seg = os->segments[i];
  437. double duration = (double) seg->duration / timescale;
  438. if (target_duration <= duration)
  439. target_duration = lrint(duration);
  440. }
  441. ff_hls_write_playlist_header(c->m3u8_out, 6, -1, target_duration,
  442. start_number, PLAYLIST_TYPE_NONE, 0);
  443. ff_hls_write_init_file(c->m3u8_out, os->initfile, c->single_file,
  444. os->init_range_length, os->init_start_pos);
  445. for (i = start_index; i < os->nb_segments; i++) {
  446. Segment *seg = os->segments[i];
  447. if (prog_date_time == 0) {
  448. if (os->nb_segments == 1)
  449. prog_date_time = c->start_time_s;
  450. else
  451. prog_date_time = seg->prog_date_time;
  452. }
  453. seg->prog_date_time = prog_date_time;
  454. ret = ff_hls_write_file_entry(c->m3u8_out, 0, c->single_file,
  455. (double) seg->duration / timescale, 0,
  456. seg->range_length, seg->start_pos, NULL,
  457. c->single_file ? os->initfile : seg->file,
  458. &prog_date_time, 0, 0, 0);
  459. if (ret < 0) {
  460. av_log(os->ctx, AV_LOG_WARNING, "ff_hls_write_file_entry get error\n");
  461. }
  462. }
  463. if (prefetch_url)
  464. avio_printf(c->m3u8_out, "#EXT-X-PREFETCH:%s\n", prefetch_url);
  465. if (final)
  466. ff_hls_write_end_list(c->m3u8_out);
  467. dashenc_io_close(s, &c->m3u8_out, temp_filename_hls);
  468. if (use_rename)
  469. if (avpriv_io_move(temp_filename_hls, filename_hls) < 0) {
  470. av_log(os->ctx, AV_LOG_WARNING, "renaming file %s to %s failed\n\n", temp_filename_hls, filename_hls);
  471. }
  472. }
  473. static int flush_init_segment(AVFormatContext *s, OutputStream *os)
  474. {
  475. DASHContext *c = s->priv_data;
  476. int ret, range_length;
  477. ret = flush_dynbuf(c, os, &range_length);
  478. if (ret < 0)
  479. return ret;
  480. os->pos = os->init_range_length = range_length;
  481. if (!c->single_file) {
  482. char filename[1024];
  483. snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->initfile);
  484. dashenc_io_close(s, &os->out, filename);
  485. }
  486. return 0;
  487. }
  488. static void dash_free(AVFormatContext *s)
  489. {
  490. DASHContext *c = s->priv_data;
  491. int i, j;
  492. if (c->as) {
  493. for (i = 0; i < c->nb_as; i++)
  494. av_dict_free(&c->as[i].metadata);
  495. av_freep(&c->as);
  496. c->nb_as = 0;
  497. }
  498. if (!c->streams)
  499. return;
  500. for (i = 0; i < s->nb_streams; i++) {
  501. OutputStream *os = &c->streams[i];
  502. if (os->ctx && os->ctx->pb) {
  503. if (!c->single_file)
  504. ffio_free_dyn_buf(&os->ctx->pb);
  505. else
  506. avio_close(os->ctx->pb);
  507. }
  508. ff_format_io_close(s, &os->out);
  509. if (os->ctx)
  510. avformat_free_context(os->ctx);
  511. for (j = 0; j < os->nb_segments; j++)
  512. av_free(os->segments[j]);
  513. av_free(os->segments);
  514. av_freep(&os->single_file_name);
  515. av_freep(&os->init_seg_name);
  516. av_freep(&os->media_seg_name);
  517. }
  518. av_freep(&c->streams);
  519. ff_format_io_close(s, &c->mpd_out);
  520. ff_format_io_close(s, &c->m3u8_out);
  521. }
  522. static void output_segment_list(OutputStream *os, AVIOContext *out, AVFormatContext *s,
  523. int representation_id, int final)
  524. {
  525. DASHContext *c = s->priv_data;
  526. int i, start_index, start_number;
  527. get_start_index_number(os, c, &start_index, &start_number);
  528. if (c->use_template) {
  529. int timescale = c->use_timeline ? os->ctx->streams[0]->time_base.den : AV_TIME_BASE;
  530. avio_printf(out, "\t\t\t\t<SegmentTemplate timescale=\"%d\" ", timescale);
  531. if (!c->use_timeline) {
  532. avio_printf(out, "duration=\"%"PRId64"\" ", c->seg_duration);
  533. if (c->streaming && os->availability_time_offset)
  534. avio_printf(out, "availabilityTimeOffset=\"%.3f\" ",
  535. os->availability_time_offset);
  536. }
  537. avio_printf(out, "initialization=\"%s\" media=\"%s\" startNumber=\"%d\">\n", os->init_seg_name, os->media_seg_name, c->use_timeline ? start_number : 1);
  538. if (c->use_timeline) {
  539. int64_t cur_time = 0;
  540. avio_printf(out, "\t\t\t\t\t<SegmentTimeline>\n");
  541. for (i = start_index; i < os->nb_segments; ) {
  542. Segment *seg = os->segments[i];
  543. int repeat = 0;
  544. avio_printf(out, "\t\t\t\t\t\t<S ");
  545. if (i == start_index || seg->time != cur_time) {
  546. cur_time = seg->time;
  547. avio_printf(out, "t=\"%"PRId64"\" ", seg->time);
  548. }
  549. avio_printf(out, "d=\"%"PRId64"\" ", seg->duration);
  550. while (i + repeat + 1 < os->nb_segments &&
  551. os->segments[i + repeat + 1]->duration == seg->duration &&
  552. os->segments[i + repeat + 1]->time == os->segments[i + repeat]->time + os->segments[i + repeat]->duration)
  553. repeat++;
  554. if (repeat > 0)
  555. avio_printf(out, "r=\"%d\" ", repeat);
  556. avio_printf(out, "/>\n");
  557. i += 1 + repeat;
  558. cur_time += (1 + repeat) * seg->duration;
  559. }
  560. avio_printf(out, "\t\t\t\t\t</SegmentTimeline>\n");
  561. }
  562. avio_printf(out, "\t\t\t\t</SegmentTemplate>\n");
  563. } else if (c->single_file) {
  564. avio_printf(out, "\t\t\t\t<BaseURL>%s</BaseURL>\n", os->initfile);
  565. avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%"PRId64"\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number);
  566. avio_printf(out, "\t\t\t\t\t<Initialization range=\"%"PRId64"-%"PRId64"\" />\n", os->init_start_pos, os->init_start_pos + os->init_range_length - 1);
  567. for (i = start_index; i < os->nb_segments; i++) {
  568. Segment *seg = os->segments[i];
  569. avio_printf(out, "\t\t\t\t\t<SegmentURL mediaRange=\"%"PRId64"-%"PRId64"\" ", seg->start_pos, seg->start_pos + seg->range_length - 1);
  570. if (seg->index_length)
  571. avio_printf(out, "indexRange=\"%"PRId64"-%"PRId64"\" ", seg->start_pos, seg->start_pos + seg->index_length - 1);
  572. avio_printf(out, "/>\n");
  573. }
  574. avio_printf(out, "\t\t\t\t</SegmentList>\n");
  575. } else {
  576. avio_printf(out, "\t\t\t\t<SegmentList timescale=\"%d\" duration=\"%"PRId64"\" startNumber=\"%d\">\n", AV_TIME_BASE, c->last_duration, start_number);
  577. avio_printf(out, "\t\t\t\t\t<Initialization sourceURL=\"%s\" />\n", os->initfile);
  578. for (i = start_index; i < os->nb_segments; i++) {
  579. Segment *seg = os->segments[i];
  580. avio_printf(out, "\t\t\t\t\t<SegmentURL media=\"%s\" />\n", seg->file);
  581. }
  582. avio_printf(out, "\t\t\t\t</SegmentList>\n");
  583. }
  584. if (!c->lhls || final) {
  585. write_hls_media_playlist(os, s, representation_id, final, NULL);
  586. }
  587. }
  588. static char *xmlescape(const char *str) {
  589. int outlen = strlen(str)*3/2 + 6;
  590. char *out = av_realloc(NULL, outlen + 1);
  591. int pos = 0;
  592. if (!out)
  593. return NULL;
  594. for (; *str; str++) {
  595. if (pos + 6 > outlen) {
  596. char *tmp;
  597. outlen = 2 * outlen + 6;
  598. tmp = av_realloc(out, outlen + 1);
  599. if (!tmp) {
  600. av_free(out);
  601. return NULL;
  602. }
  603. out = tmp;
  604. }
  605. if (*str == '&') {
  606. memcpy(&out[pos], "&amp;", 5);
  607. pos += 5;
  608. } else if (*str == '<') {
  609. memcpy(&out[pos], "&lt;", 4);
  610. pos += 4;
  611. } else if (*str == '>') {
  612. memcpy(&out[pos], "&gt;", 4);
  613. pos += 4;
  614. } else if (*str == '\'') {
  615. memcpy(&out[pos], "&apos;", 6);
  616. pos += 6;
  617. } else if (*str == '\"') {
  618. memcpy(&out[pos], "&quot;", 6);
  619. pos += 6;
  620. } else {
  621. out[pos++] = *str;
  622. }
  623. }
  624. out[pos] = '\0';
  625. return out;
  626. }
  627. static void write_time(AVIOContext *out, int64_t time)
  628. {
  629. int seconds = time / AV_TIME_BASE;
  630. int fractions = time % AV_TIME_BASE;
  631. int minutes = seconds / 60;
  632. int hours = minutes / 60;
  633. seconds %= 60;
  634. minutes %= 60;
  635. avio_printf(out, "PT");
  636. if (hours)
  637. avio_printf(out, "%dH", hours);
  638. if (hours || minutes)
  639. avio_printf(out, "%dM", minutes);
  640. avio_printf(out, "%d.%dS", seconds, fractions / (AV_TIME_BASE / 10));
  641. }
  642. static void format_date_now(char *buf, int size)
  643. {
  644. struct tm *ptm, tmbuf;
  645. int64_t time_us = av_gettime();
  646. int64_t time_ms = time_us / 1000;
  647. const time_t time_s = time_ms / 1000;
  648. int millisec = time_ms - (time_s * 1000);
  649. ptm = gmtime_r(&time_s, &tmbuf);
  650. if (ptm) {
  651. int len;
  652. if (!strftime(buf, size, "%Y-%m-%dT%H:%M:%S", ptm)) {
  653. buf[0] = '\0';
  654. return;
  655. }
  656. len = strlen(buf);
  657. snprintf(buf + len, size - len, ".%03dZ", millisec);
  658. }
  659. }
  660. static int write_adaptation_set(AVFormatContext *s, AVIOContext *out, int as_index,
  661. int final)
  662. {
  663. DASHContext *c = s->priv_data;
  664. AdaptationSet *as = &c->as[as_index];
  665. AVDictionaryEntry *lang, *role;
  666. int i;
  667. avio_printf(out, "\t\t<AdaptationSet id=\"%s\" contentType=\"%s\" segmentAlignment=\"true\" bitstreamSwitching=\"true\"",
  668. as->id, as->media_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  669. if (as->media_type == AVMEDIA_TYPE_VIDEO && as->max_frame_rate.num && !as->ambiguous_frame_rate && av_cmp_q(as->min_frame_rate, as->max_frame_rate) < 0)
  670. avio_printf(out, " maxFrameRate=\"%d/%d\"", as->max_frame_rate.num, as->max_frame_rate.den);
  671. lang = av_dict_get(as->metadata, "language", NULL, 0);
  672. if (lang)
  673. avio_printf(out, " lang=\"%s\"", lang->value);
  674. avio_printf(out, ">\n");
  675. role = av_dict_get(as->metadata, "role", NULL, 0);
  676. if (role)
  677. avio_printf(out, "\t\t\t<Role schemeIdUri=\"urn:mpeg:dash:role:2011\" value=\"%s\"/>\n", role->value);
  678. for (i = 0; i < s->nb_streams; i++) {
  679. OutputStream *os = &c->streams[i];
  680. char bandwidth_str[64] = {'\0'};
  681. if (os->as_idx - 1 != as_index)
  682. continue;
  683. if (os->bit_rate > 0)
  684. snprintf(bandwidth_str, sizeof(bandwidth_str), " bandwidth=\"%d\"",
  685. os->bit_rate);
  686. if (as->media_type == AVMEDIA_TYPE_VIDEO) {
  687. AVStream *st = s->streams[i];
  688. avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"video/%s\" codecs=\"%s\"%s width=\"%d\" height=\"%d\"",
  689. i, os->format_name, os->codec_str, bandwidth_str, s->streams[i]->codecpar->width, s->streams[i]->codecpar->height);
  690. if (st->avg_frame_rate.num)
  691. avio_printf(out, " frameRate=\"%d/%d\"", st->avg_frame_rate.num, st->avg_frame_rate.den);
  692. avio_printf(out, ">\n");
  693. } else {
  694. avio_printf(out, "\t\t\t<Representation id=\"%d\" mimeType=\"audio/%s\" codecs=\"%s\"%s audioSamplingRate=\"%d\">\n",
  695. i, os->format_name, os->codec_str, bandwidth_str, s->streams[i]->codecpar->sample_rate);
  696. avio_printf(out, "\t\t\t\t<AudioChannelConfiguration schemeIdUri=\"urn:mpeg:dash:23003:3:audio_channel_configuration:2011\" value=\"%d\" />\n",
  697. s->streams[i]->codecpar->channels);
  698. }
  699. output_segment_list(os, out, s, i, final);
  700. avio_printf(out, "\t\t\t</Representation>\n");
  701. }
  702. avio_printf(out, "\t\t</AdaptationSet>\n");
  703. return 0;
  704. }
  705. static int add_adaptation_set(AVFormatContext *s, AdaptationSet **as, enum AVMediaType type)
  706. {
  707. DASHContext *c = s->priv_data;
  708. void *mem = av_realloc(c->as, sizeof(*c->as) * (c->nb_as + 1));
  709. if (!mem)
  710. return AVERROR(ENOMEM);
  711. c->as = mem;
  712. ++c->nb_as;
  713. *as = &c->as[c->nb_as - 1];
  714. memset(*as, 0, sizeof(**as));
  715. (*as)->media_type = type;
  716. return 0;
  717. }
  718. static int adaptation_set_add_stream(AVFormatContext *s, int as_idx, int i)
  719. {
  720. DASHContext *c = s->priv_data;
  721. AdaptationSet *as = &c->as[as_idx - 1];
  722. OutputStream *os = &c->streams[i];
  723. if (as->media_type != s->streams[i]->codecpar->codec_type) {
  724. av_log(s, AV_LOG_ERROR, "Codec type of stream %d doesn't match AdaptationSet's media type\n", i);
  725. return AVERROR(EINVAL);
  726. } else if (os->as_idx) {
  727. av_log(s, AV_LOG_ERROR, "Stream %d is already assigned to an AdaptationSet\n", i);
  728. return AVERROR(EINVAL);
  729. }
  730. os->as_idx = as_idx;
  731. return 0;
  732. }
  733. static int parse_adaptation_sets(AVFormatContext *s)
  734. {
  735. DASHContext *c = s->priv_data;
  736. const char *p = c->adaptation_sets;
  737. enum { new_set, parse_id, parsing_streams } state;
  738. AdaptationSet *as;
  739. int i, n, ret;
  740. // default: one AdaptationSet for each stream
  741. if (!p) {
  742. for (i = 0; i < s->nb_streams; i++) {
  743. if ((ret = add_adaptation_set(s, &as, s->streams[i]->codecpar->codec_type)) < 0)
  744. return ret;
  745. snprintf(as->id, sizeof(as->id), "%d", i);
  746. c->streams[i].as_idx = c->nb_as;
  747. }
  748. goto end;
  749. }
  750. // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on
  751. state = new_set;
  752. while (*p) {
  753. if (*p == ' ') {
  754. p++;
  755. continue;
  756. } else if (state == new_set && av_strstart(p, "id=", &p)) {
  757. if ((ret = add_adaptation_set(s, &as, AVMEDIA_TYPE_UNKNOWN)) < 0)
  758. return ret;
  759. n = strcspn(p, ",");
  760. snprintf(as->id, sizeof(as->id), "%.*s", n, p);
  761. p += n;
  762. if (*p)
  763. p++;
  764. state = parse_id;
  765. } else if (state == parse_id && av_strstart(p, "streams=", &p)) {
  766. state = parsing_streams;
  767. } else if (state == parsing_streams) {
  768. AdaptationSet *as = &c->as[c->nb_as - 1];
  769. char idx_str[8], *end_str;
  770. n = strcspn(p, " ,");
  771. snprintf(idx_str, sizeof(idx_str), "%.*s", n, p);
  772. p += n;
  773. // if value is "a" or "v", map all streams of that type
  774. if (as->media_type == AVMEDIA_TYPE_UNKNOWN && (idx_str[0] == 'v' || idx_str[0] == 'a')) {
  775. enum AVMediaType type = (idx_str[0] == 'v') ? AVMEDIA_TYPE_VIDEO : AVMEDIA_TYPE_AUDIO;
  776. av_log(s, AV_LOG_DEBUG, "Map all streams of type %s\n", idx_str);
  777. for (i = 0; i < s->nb_streams; i++) {
  778. if (s->streams[i]->codecpar->codec_type != type)
  779. continue;
  780. as->media_type = s->streams[i]->codecpar->codec_type;
  781. if ((ret = adaptation_set_add_stream(s, c->nb_as, i)) < 0)
  782. return ret;
  783. }
  784. } else { // select single stream
  785. i = strtol(idx_str, &end_str, 10);
  786. if (idx_str == end_str || i < 0 || i >= s->nb_streams) {
  787. av_log(s, AV_LOG_ERROR, "Selected stream \"%s\" not found!\n", idx_str);
  788. return AVERROR(EINVAL);
  789. }
  790. av_log(s, AV_LOG_DEBUG, "Map stream %d\n", i);
  791. if (as->media_type == AVMEDIA_TYPE_UNKNOWN) {
  792. as->media_type = s->streams[i]->codecpar->codec_type;
  793. }
  794. if ((ret = adaptation_set_add_stream(s, c->nb_as, i)) < 0)
  795. return ret;
  796. }
  797. if (*p == ' ')
  798. state = new_set;
  799. if (*p)
  800. p++;
  801. } else {
  802. return AVERROR(EINVAL);
  803. }
  804. }
  805. end:
  806. // check for unassigned streams
  807. for (i = 0; i < s->nb_streams; i++) {
  808. OutputStream *os = &c->streams[i];
  809. if (!os->as_idx) {
  810. av_log(s, AV_LOG_ERROR, "Stream %d is not mapped to an AdaptationSet\n", i);
  811. return AVERROR(EINVAL);
  812. }
  813. }
  814. return 0;
  815. }
  816. static int write_manifest(AVFormatContext *s, int final)
  817. {
  818. DASHContext *c = s->priv_data;
  819. AVIOContext *out;
  820. char temp_filename[1024];
  821. int ret, i;
  822. const char *proto = avio_find_protocol_name(s->url);
  823. int use_rename = proto && !strcmp(proto, "file");
  824. static unsigned int warned_non_file = 0;
  825. AVDictionaryEntry *title = av_dict_get(s->metadata, "title", NULL, 0);
  826. AVDictionary *opts = NULL;
  827. if (!use_rename && !warned_non_file++)
  828. av_log(s, AV_LOG_ERROR, "Cannot use rename on non file protocol, this may lead to races and temporary partial files\n");
  829. snprintf(temp_filename, sizeof(temp_filename), use_rename ? "%s.tmp" : "%s", s->url);
  830. set_http_options(&opts, c);
  831. ret = dashenc_io_open(s, &c->mpd_out, temp_filename, &opts);
  832. av_dict_free(&opts);
  833. if (ret < 0) {
  834. return handle_io_open_error(s, ret, temp_filename);
  835. }
  836. out = c->mpd_out;
  837. avio_printf(out, "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n");
  838. avio_printf(out, "<MPD xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n"
  839. "\txmlns=\"urn:mpeg:dash:schema:mpd:2011\"\n"
  840. "\txmlns:xlink=\"http://www.w3.org/1999/xlink\"\n"
  841. "\txsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011 http://standards.iso.org/ittf/PubliclyAvailableStandards/MPEG-DASH_schema_files/DASH-MPD.xsd\"\n"
  842. "\tprofiles=\"urn:mpeg:dash:profile:isoff-live:2011\"\n"
  843. "\ttype=\"%s\"\n", final ? "static" : "dynamic");
  844. if (final) {
  845. avio_printf(out, "\tmediaPresentationDuration=\"");
  846. write_time(out, c->total_duration);
  847. avio_printf(out, "\"\n");
  848. } else {
  849. int64_t update_period = c->last_duration / AV_TIME_BASE;
  850. char now_str[100];
  851. if (c->use_template && !c->use_timeline)
  852. update_period = 500;
  853. avio_printf(out, "\tminimumUpdatePeriod=\"PT%"PRId64"S\"\n", update_period);
  854. avio_printf(out, "\tsuggestedPresentationDelay=\"PT%"PRId64"S\"\n", c->last_duration / AV_TIME_BASE);
  855. if (c->availability_start_time[0])
  856. avio_printf(out, "\tavailabilityStartTime=\"%s\"\n", c->availability_start_time);
  857. format_date_now(now_str, sizeof(now_str));
  858. if (now_str[0])
  859. avio_printf(out, "\tpublishTime=\"%s\"\n", now_str);
  860. if (c->window_size && c->use_template) {
  861. avio_printf(out, "\ttimeShiftBufferDepth=\"");
  862. write_time(out, c->last_duration * c->window_size);
  863. avio_printf(out, "\"\n");
  864. }
  865. }
  866. avio_printf(out, "\tminBufferTime=\"");
  867. write_time(out, c->last_duration * 2);
  868. avio_printf(out, "\">\n");
  869. avio_printf(out, "\t<ProgramInformation>\n");
  870. if (title) {
  871. char *escaped = xmlescape(title->value);
  872. avio_printf(out, "\t\t<Title>%s</Title>\n", escaped);
  873. av_free(escaped);
  874. }
  875. avio_printf(out, "\t</ProgramInformation>\n");
  876. if (c->window_size && s->nb_streams > 0 && c->streams[0].nb_segments > 0 && !c->use_template) {
  877. OutputStream *os = &c->streams[0];
  878. int start_index = FFMAX(os->nb_segments - c->window_size, 0);
  879. int64_t start_time = av_rescale_q(os->segments[start_index]->time, s->streams[0]->time_base, AV_TIME_BASE_Q);
  880. avio_printf(out, "\t<Period id=\"0\" start=\"");
  881. write_time(out, start_time);
  882. avio_printf(out, "\">\n");
  883. } else {
  884. avio_printf(out, "\t<Period id=\"0\" start=\"PT0.0S\">\n");
  885. }
  886. for (i = 0; i < c->nb_as; i++) {
  887. if ((ret = write_adaptation_set(s, out, i, final)) < 0)
  888. return ret;
  889. }
  890. avio_printf(out, "\t</Period>\n");
  891. if (c->utc_timing_url)
  892. avio_printf(out, "\t<UTCTiming schemeIdUri=\"urn:mpeg:dash:utc:http-xsdate:2014\" value=\"%s\"/>\n", c->utc_timing_url);
  893. avio_printf(out, "</MPD>\n");
  894. avio_flush(out);
  895. dashenc_io_close(s, &c->mpd_out, temp_filename);
  896. if (use_rename) {
  897. if ((ret = avpriv_io_move(temp_filename, s->url)) < 0)
  898. return ret;
  899. }
  900. if (c->hls_playlist) {
  901. char filename_hls[1024];
  902. const char *audio_group = "A1";
  903. char audio_codec_str[128] = "\0";
  904. int is_default = 1;
  905. int max_audio_bitrate = 0;
  906. // Publish master playlist only the configured rate
  907. if (c->master_playlist_created && (!c->master_publish_rate ||
  908. c->streams[0].segment_index % c->master_publish_rate))
  909. return 0;
  910. if (*c->dirname)
  911. snprintf(filename_hls, sizeof(filename_hls), "%smaster.m3u8", c->dirname);
  912. else
  913. snprintf(filename_hls, sizeof(filename_hls), "master.m3u8");
  914. snprintf(temp_filename, sizeof(temp_filename), use_rename ? "%s.tmp" : "%s", filename_hls);
  915. set_http_options(&opts, c);
  916. ret = dashenc_io_open(s, &c->m3u8_out, temp_filename, &opts);
  917. av_dict_free(&opts);
  918. if (ret < 0) {
  919. return handle_io_open_error(s, ret, temp_filename);
  920. }
  921. ff_hls_write_playlist_version(c->m3u8_out, 7);
  922. for (i = 0; i < s->nb_streams; i++) {
  923. char playlist_file[64];
  924. AVStream *st = s->streams[i];
  925. OutputStream *os = &c->streams[i];
  926. if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
  927. continue;
  928. if (os->segment_type != SEGMENT_TYPE_MP4)
  929. continue;
  930. get_hls_playlist_name(playlist_file, sizeof(playlist_file), NULL, i);
  931. ff_hls_write_audio_rendition(c->m3u8_out, (char *)audio_group,
  932. playlist_file, NULL, i, is_default);
  933. max_audio_bitrate = FFMAX(st->codecpar->bit_rate +
  934. os->muxer_overhead, max_audio_bitrate);
  935. if (!av_strnstr(audio_codec_str, os->codec_str, sizeof(audio_codec_str))) {
  936. if (strlen(audio_codec_str))
  937. av_strlcat(audio_codec_str, ",", sizeof(audio_codec_str));
  938. av_strlcat(audio_codec_str, os->codec_str, sizeof(audio_codec_str));
  939. }
  940. is_default = 0;
  941. }
  942. for (i = 0; i < s->nb_streams; i++) {
  943. char playlist_file[64];
  944. char codec_str[128];
  945. AVStream *st = s->streams[i];
  946. OutputStream *os = &c->streams[i];
  947. char *agroup = NULL;
  948. char *codec_str_ptr = NULL;
  949. int stream_bitrate = st->codecpar->bit_rate + os->muxer_overhead;
  950. if (st->codecpar->codec_type != AVMEDIA_TYPE_VIDEO)
  951. continue;
  952. if (os->segment_type != SEGMENT_TYPE_MP4)
  953. continue;
  954. av_strlcpy(codec_str, os->codec_str, sizeof(codec_str));
  955. if (max_audio_bitrate) {
  956. agroup = (char *)audio_group;
  957. stream_bitrate += max_audio_bitrate;
  958. av_strlcat(codec_str, ",", sizeof(codec_str));
  959. av_strlcat(codec_str, audio_codec_str, sizeof(codec_str));
  960. }
  961. if (st->codecpar->codec_id != AV_CODEC_ID_HEVC) {
  962. codec_str_ptr = codec_str;
  963. }
  964. get_hls_playlist_name(playlist_file, sizeof(playlist_file), NULL, i);
  965. ff_hls_write_stream_info(st, c->m3u8_out, stream_bitrate,
  966. playlist_file, agroup,
  967. codec_str_ptr, NULL);
  968. }
  969. dashenc_io_close(s, &c->m3u8_out, temp_filename);
  970. if (use_rename)
  971. if ((ret = avpriv_io_move(temp_filename, filename_hls)) < 0)
  972. return ret;
  973. c->master_playlist_created = 1;
  974. }
  975. return 0;
  976. }
  977. static int dict_copy_entry(AVDictionary **dst, const AVDictionary *src, const char *key)
  978. {
  979. AVDictionaryEntry *entry = av_dict_get(src, key, NULL, 0);
  980. if (entry)
  981. av_dict_set(dst, key, entry->value, AV_DICT_DONT_OVERWRITE);
  982. return 0;
  983. }
  984. static int dash_init(AVFormatContext *s)
  985. {
  986. DASHContext *c = s->priv_data;
  987. int ret = 0, i;
  988. char *ptr;
  989. char basename[1024];
  990. c->nr_of_streams_to_flush = 0;
  991. if (c->single_file_name)
  992. c->single_file = 1;
  993. if (c->single_file)
  994. c->use_template = 0;
  995. #if FF_API_DASH_MIN_SEG_DURATION
  996. if (c->min_seg_duration != 5000000) {
  997. av_log(s, AV_LOG_WARNING, "The min_seg_duration option is deprecated and will be removed. Please use the -seg_duration\n");
  998. c->seg_duration = c->min_seg_duration;
  999. }
  1000. #endif
  1001. if (c->lhls && s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  1002. av_log(s, AV_LOG_ERROR,
  1003. "LHLS is experimental, Please set -strict experimental in order to enable it.\n");
  1004. return AVERROR_EXPERIMENTAL;
  1005. }
  1006. if (c->lhls && !c->streaming) {
  1007. av_log(s, AV_LOG_WARNING, "LHLS option will be ignored as streaming is not enabled\n");
  1008. c->lhls = 0;
  1009. }
  1010. if (c->lhls && !c->hls_playlist) {
  1011. av_log(s, AV_LOG_WARNING, "LHLS option will be ignored as hls_playlist is not enabled\n");
  1012. c->lhls = 0;
  1013. }
  1014. if (c->global_sidx && !c->single_file) {
  1015. av_log(s, AV_LOG_WARNING, "Global SIDX option will be ignored as single_file is not enabled\n");
  1016. c->global_sidx = 0;
  1017. }
  1018. if (c->global_sidx && c->streaming) {
  1019. av_log(s, AV_LOG_WARNING, "Global SIDX option will be ignored as streaming is enabled\n");
  1020. c->global_sidx = 0;
  1021. }
  1022. av_strlcpy(c->dirname, s->url, sizeof(c->dirname));
  1023. ptr = strrchr(c->dirname, '/');
  1024. if (ptr) {
  1025. av_strlcpy(basename, &ptr[1], sizeof(basename));
  1026. ptr[1] = '\0';
  1027. } else {
  1028. c->dirname[0] = '\0';
  1029. av_strlcpy(basename, s->url, sizeof(basename));
  1030. }
  1031. ptr = strrchr(basename, '.');
  1032. if (ptr)
  1033. *ptr = '\0';
  1034. c->streams = av_mallocz(sizeof(*c->streams) * s->nb_streams);
  1035. if (!c->streams)
  1036. return AVERROR(ENOMEM);
  1037. if ((ret = parse_adaptation_sets(s)) < 0)
  1038. return ret;
  1039. if ((ret = init_segment_types(s)) < 0)
  1040. return ret;
  1041. for (i = 0; i < s->nb_streams; i++) {
  1042. OutputStream *os = &c->streams[i];
  1043. AdaptationSet *as = &c->as[os->as_idx - 1];
  1044. AVFormatContext *ctx;
  1045. AVStream *st;
  1046. AVDictionary *opts = NULL;
  1047. char filename[1024];
  1048. os->bit_rate = s->streams[i]->codecpar->bit_rate;
  1049. if (!os->bit_rate) {
  1050. int level = s->strict_std_compliance >= FF_COMPLIANCE_STRICT ?
  1051. AV_LOG_ERROR : AV_LOG_WARNING;
  1052. av_log(s, level, "No bit rate set for stream %d\n", i);
  1053. if (s->strict_std_compliance >= FF_COMPLIANCE_STRICT)
  1054. return AVERROR(EINVAL);
  1055. }
  1056. // copy AdaptationSet language and role from stream metadata
  1057. dict_copy_entry(&as->metadata, s->streams[i]->metadata, "language");
  1058. dict_copy_entry(&as->metadata, s->streams[i]->metadata, "role");
  1059. ctx = avformat_alloc_context();
  1060. if (!ctx)
  1061. return AVERROR(ENOMEM);
  1062. if (c->init_seg_name) {
  1063. os->init_seg_name = av_strireplace(c->init_seg_name, "$ext$", os->extension_name);
  1064. if (!os->init_seg_name)
  1065. return AVERROR(ENOMEM);
  1066. }
  1067. if (c->media_seg_name) {
  1068. os->media_seg_name = av_strireplace(c->media_seg_name, "$ext$", os->extension_name);
  1069. if (!os->media_seg_name)
  1070. return AVERROR(ENOMEM);
  1071. }
  1072. if (c->single_file_name) {
  1073. os->single_file_name = av_strireplace(c->single_file_name, "$ext$", os->extension_name);
  1074. if (!os->single_file_name)
  1075. return AVERROR(ENOMEM);
  1076. }
  1077. if (os->segment_type == SEGMENT_TYPE_WEBM) {
  1078. if ((!c->single_file && check_file_extension(os->init_seg_name, os->format_name) != 0) ||
  1079. (!c->single_file && check_file_extension(os->media_seg_name, os->format_name) != 0) ||
  1080. (c->single_file && check_file_extension(os->single_file_name, os->format_name) != 0)) {
  1081. av_log(s, AV_LOG_WARNING,
  1082. "One or many segment file names doesn't end with .webm. "
  1083. "Override -init_seg_name and/or -media_seg_name and/or "
  1084. "-single_file_name to end with the extension .webm\n");
  1085. }
  1086. if (c->streaming) {
  1087. // Streaming not supported as matroskaenc buffers internally before writing the output
  1088. av_log(s, AV_LOG_WARNING, "One or more streams in WebM output format. Streaming option will be ignored\n");
  1089. c->streaming = 0;
  1090. }
  1091. }
  1092. ctx->oformat = av_guess_format(os->format_name, NULL, NULL);
  1093. if (!ctx->oformat)
  1094. return AVERROR_MUXER_NOT_FOUND;
  1095. os->ctx = ctx;
  1096. ctx->interrupt_callback = s->interrupt_callback;
  1097. ctx->opaque = s->opaque;
  1098. ctx->io_close = s->io_close;
  1099. ctx->io_open = s->io_open;
  1100. ctx->strict_std_compliance = s->strict_std_compliance;
  1101. if (!(st = avformat_new_stream(ctx, NULL)))
  1102. return AVERROR(ENOMEM);
  1103. avcodec_parameters_copy(st->codecpar, s->streams[i]->codecpar);
  1104. st->sample_aspect_ratio = s->streams[i]->sample_aspect_ratio;
  1105. st->time_base = s->streams[i]->time_base;
  1106. st->avg_frame_rate = s->streams[i]->avg_frame_rate;
  1107. ctx->avoid_negative_ts = s->avoid_negative_ts;
  1108. ctx->flags = s->flags;
  1109. if (c->single_file) {
  1110. if (os->single_file_name)
  1111. ff_dash_fill_tmpl_params(os->initfile, sizeof(os->initfile), os->single_file_name, i, 0, os->bit_rate, 0);
  1112. else
  1113. snprintf(os->initfile, sizeof(os->initfile), "%s-stream%d.%s", basename, i, os->format_name);
  1114. } else {
  1115. ff_dash_fill_tmpl_params(os->initfile, sizeof(os->initfile), os->init_seg_name, i, 0, os->bit_rate, 0);
  1116. }
  1117. snprintf(filename, sizeof(filename), "%s%s", c->dirname, os->initfile);
  1118. set_http_options(&opts, c);
  1119. if (!c->single_file) {
  1120. if ((ret = avio_open_dyn_buf(&ctx->pb)) < 0)
  1121. return ret;
  1122. ret = s->io_open(s, &os->out, filename, AVIO_FLAG_WRITE, &opts);
  1123. } else {
  1124. ctx->url = av_strdup(filename);
  1125. ret = avio_open2(&ctx->pb, filename, AVIO_FLAG_WRITE, NULL, &opts);
  1126. }
  1127. av_dict_free(&opts);
  1128. if (ret < 0)
  1129. return ret;
  1130. os->init_start_pos = 0;
  1131. if (c->format_options_str) {
  1132. ret = av_dict_parse_string(&opts, c->format_options_str, "=", ":", 0);
  1133. if (ret < 0)
  1134. return ret;
  1135. }
  1136. if (os->segment_type == SEGMENT_TYPE_MP4) {
  1137. if (c->streaming)
  1138. // frag_every_frame : Allows lower latency streaming
  1139. // skip_sidx : Reduce bitrate overhead
  1140. // skip_trailer : Avoids growing memory usage with time
  1141. av_dict_set(&opts, "movflags", "frag_every_frame+dash+delay_moov+skip_sidx+skip_trailer", 0);
  1142. else {
  1143. if (c->global_sidx)
  1144. av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov+global_sidx+skip_trailer", 0);
  1145. else
  1146. av_dict_set(&opts, "movflags", "frag_custom+dash+delay_moov+skip_trailer", 0);
  1147. }
  1148. } else {
  1149. av_dict_set_int(&opts, "cluster_time_limit", c->seg_duration / 1000, 0);
  1150. av_dict_set_int(&opts, "cluster_size_limit", 5 * 1024 * 1024, 0); // set a large cluster size limit
  1151. av_dict_set_int(&opts, "dash", 1, 0);
  1152. av_dict_set_int(&opts, "dash_track_number", i + 1, 0);
  1153. av_dict_set_int(&opts, "live", 1, 0);
  1154. }
  1155. ret = avformat_init_output(ctx, &opts);
  1156. av_dict_free(&opts);
  1157. if (ret < 0)
  1158. return ret;
  1159. os->ctx_inited = 1;
  1160. avio_flush(ctx->pb);
  1161. av_log(s, AV_LOG_VERBOSE, "Representation %d init segment will be written to: %s\n", i, filename);
  1162. s->streams[i]->time_base = st->time_base;
  1163. // If the muxer wants to shift timestamps, request to have them shifted
  1164. // already before being handed to this muxer, so we don't have mismatches
  1165. // between the MPD and the actual segments.
  1166. s->avoid_negative_ts = ctx->avoid_negative_ts;
  1167. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  1168. AVRational avg_frame_rate = s->streams[i]->avg_frame_rate;
  1169. if (avg_frame_rate.num > 0) {
  1170. if (av_cmp_q(avg_frame_rate, as->min_frame_rate) < 0)
  1171. as->min_frame_rate = avg_frame_rate;
  1172. if (av_cmp_q(as->max_frame_rate, avg_frame_rate) < 0)
  1173. as->max_frame_rate = avg_frame_rate;
  1174. } else {
  1175. as->ambiguous_frame_rate = 1;
  1176. }
  1177. c->has_video = 1;
  1178. }
  1179. set_codec_str(s, st->codecpar, &st->avg_frame_rate, os->codec_str,
  1180. sizeof(os->codec_str));
  1181. os->first_pts = AV_NOPTS_VALUE;
  1182. os->max_pts = AV_NOPTS_VALUE;
  1183. os->last_dts = AV_NOPTS_VALUE;
  1184. os->segment_index = 1;
  1185. if (s->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  1186. c->nr_of_streams_to_flush++;
  1187. }
  1188. if (!c->has_video && c->seg_duration <= 0) {
  1189. av_log(s, AV_LOG_WARNING, "no video stream and no seg duration set\n");
  1190. return AVERROR(EINVAL);
  1191. }
  1192. c->nr_of_streams_flushed = 0;
  1193. return 0;
  1194. }
  1195. static int dash_write_header(AVFormatContext *s)
  1196. {
  1197. DASHContext *c = s->priv_data;
  1198. int i, ret;
  1199. for (i = 0; i < s->nb_streams; i++) {
  1200. OutputStream *os = &c->streams[i];
  1201. if ((ret = avformat_write_header(os->ctx, NULL)) < 0)
  1202. return ret;
  1203. // Flush init segment
  1204. // Only for WebM segment, since for mp4 delay_moov is set and
  1205. // the init segment is thus flushed after the first packets.
  1206. if (os->segment_type == SEGMENT_TYPE_WEBM &&
  1207. (ret = flush_init_segment(s, os)) < 0)
  1208. return ret;
  1209. }
  1210. return ret;
  1211. }
  1212. static int add_segment(OutputStream *os, const char *file,
  1213. int64_t time, int64_t duration,
  1214. int64_t start_pos, int64_t range_length,
  1215. int64_t index_length, int next_exp_index)
  1216. {
  1217. int err;
  1218. Segment *seg;
  1219. if (os->nb_segments >= os->segments_size) {
  1220. os->segments_size = (os->segments_size + 1) * 2;
  1221. if ((err = av_reallocp(&os->segments, sizeof(*os->segments) *
  1222. os->segments_size)) < 0) {
  1223. os->segments_size = 0;
  1224. os->nb_segments = 0;
  1225. return err;
  1226. }
  1227. }
  1228. seg = av_mallocz(sizeof(*seg));
  1229. if (!seg)
  1230. return AVERROR(ENOMEM);
  1231. av_strlcpy(seg->file, file, sizeof(seg->file));
  1232. seg->time = time;
  1233. seg->duration = duration;
  1234. if (seg->time < 0) { // If pts<0, it is expected to be cut away with an edit list
  1235. seg->duration += seg->time;
  1236. seg->time = 0;
  1237. }
  1238. seg->start_pos = start_pos;
  1239. seg->range_length = range_length;
  1240. seg->index_length = index_length;
  1241. os->segments[os->nb_segments++] = seg;
  1242. os->segment_index++;
  1243. //correcting the segment index if it has fallen behind the expected value
  1244. if (os->segment_index < next_exp_index) {
  1245. av_log(NULL, AV_LOG_WARNING, "Correcting the segment index after file %s: current=%d corrected=%d\n",
  1246. file, os->segment_index, next_exp_index);
  1247. os->segment_index = next_exp_index;
  1248. }
  1249. return 0;
  1250. }
  1251. static void write_styp(AVIOContext *pb)
  1252. {
  1253. avio_wb32(pb, 24);
  1254. ffio_wfourcc(pb, "styp");
  1255. ffio_wfourcc(pb, "msdh");
  1256. avio_wb32(pb, 0); /* minor */
  1257. ffio_wfourcc(pb, "msdh");
  1258. ffio_wfourcc(pb, "msix");
  1259. }
  1260. static void find_index_range(AVFormatContext *s, const char *full_path,
  1261. int64_t pos, int *index_length)
  1262. {
  1263. uint8_t buf[8];
  1264. AVIOContext *pb;
  1265. int ret;
  1266. ret = s->io_open(s, &pb, full_path, AVIO_FLAG_READ, NULL);
  1267. if (ret < 0)
  1268. return;
  1269. if (avio_seek(pb, pos, SEEK_SET) != pos) {
  1270. ff_format_io_close(s, &pb);
  1271. return;
  1272. }
  1273. ret = avio_read(pb, buf, 8);
  1274. ff_format_io_close(s, &pb);
  1275. if (ret < 8)
  1276. return;
  1277. if (AV_RL32(&buf[4]) != MKTAG('s', 'i', 'd', 'x'))
  1278. return;
  1279. *index_length = AV_RB32(&buf[0]);
  1280. }
  1281. static int update_stream_extradata(AVFormatContext *s, OutputStream *os,
  1282. AVCodecParameters *par,
  1283. AVRational *frame_rate)
  1284. {
  1285. uint8_t *extradata;
  1286. if (os->ctx->streams[0]->codecpar->extradata_size || !par->extradata_size)
  1287. return 0;
  1288. extradata = av_malloc(par->extradata_size);
  1289. if (!extradata)
  1290. return AVERROR(ENOMEM);
  1291. memcpy(extradata, par->extradata, par->extradata_size);
  1292. os->ctx->streams[0]->codecpar->extradata = extradata;
  1293. os->ctx->streams[0]->codecpar->extradata_size = par->extradata_size;
  1294. set_codec_str(s, par, frame_rate, os->codec_str, sizeof(os->codec_str));
  1295. return 0;
  1296. }
  1297. static void dashenc_delete_file(AVFormatContext *s, char *filename) {
  1298. DASHContext *c = s->priv_data;
  1299. int http_base_proto = ff_is_http_proto(filename);
  1300. if (http_base_proto) {
  1301. AVIOContext *out = NULL;
  1302. AVDictionary *http_opts = NULL;
  1303. set_http_options(&http_opts, c);
  1304. av_dict_set(&http_opts, "method", "DELETE", 0);
  1305. if (dashenc_io_open(s, &out, filename, &http_opts) < 0) {
  1306. av_log(s, AV_LOG_ERROR, "failed to delete %s\n", filename);
  1307. }
  1308. av_dict_free(&http_opts);
  1309. ff_format_io_close(s, &out);
  1310. } else {
  1311. int res = avpriv_io_delete(filename);
  1312. if (res < 0) {
  1313. char errbuf[AV_ERROR_MAX_STRING_SIZE];
  1314. av_strerror(res, errbuf, sizeof(errbuf));
  1315. av_log(s, (res == AVERROR(ENOENT) ? AV_LOG_WARNING : AV_LOG_ERROR), "failed to delete %s: %s\n", filename, errbuf);
  1316. }
  1317. }
  1318. }
  1319. static int dashenc_delete_segment_file(AVFormatContext *s, const char* file)
  1320. {
  1321. DASHContext *c = s->priv_data;
  1322. size_t dirname_len, file_len;
  1323. char filename[1024];
  1324. dirname_len = strlen(c->dirname);
  1325. if (dirname_len >= sizeof(filename)) {
  1326. av_log(s, AV_LOG_WARNING, "Cannot delete segments as the directory path is too long: %"PRIu64" characters: %s\n",
  1327. (uint64_t)dirname_len, c->dirname);
  1328. return AVERROR(ENAMETOOLONG);
  1329. }
  1330. memcpy(filename, c->dirname, dirname_len);
  1331. file_len = strlen(file);
  1332. if ((dirname_len + file_len) >= sizeof(filename)) {
  1333. av_log(s, AV_LOG_WARNING, "Cannot delete segments as the path is too long: %"PRIu64" characters: %s%s\n",
  1334. (uint64_t)(dirname_len + file_len), c->dirname, file);
  1335. return AVERROR(ENAMETOOLONG);
  1336. }
  1337. memcpy(filename + dirname_len, file, file_len + 1); // include the terminating zero
  1338. dashenc_delete_file(s, filename);
  1339. return 0;
  1340. }
  1341. static inline void dashenc_delete_media_segments(AVFormatContext *s, OutputStream *os, int remove_count)
  1342. {
  1343. for (int i = 0; i < remove_count; ++i) {
  1344. dashenc_delete_segment_file(s, os->segments[i]->file);
  1345. // Delete the segment regardless of whether the file was successfully deleted
  1346. av_free(os->segments[i]);
  1347. }
  1348. os->nb_segments -= remove_count;
  1349. memmove(os->segments, os->segments + remove_count, os->nb_segments * sizeof(*os->segments));
  1350. }
  1351. static int dash_flush(AVFormatContext *s, int final, int stream)
  1352. {
  1353. DASHContext *c = s->priv_data;
  1354. int i, ret = 0;
  1355. const char *proto = avio_find_protocol_name(s->url);
  1356. int use_rename = proto && !strcmp(proto, "file");
  1357. int cur_flush_segment_index = 0, next_exp_index = -1;
  1358. if (stream >= 0) {
  1359. cur_flush_segment_index = c->streams[stream].segment_index;
  1360. //finding the next segment's expected index, based on the current pts value
  1361. if (c->use_template && !c->use_timeline && c->index_correction &&
  1362. c->streams[stream].last_pts != AV_NOPTS_VALUE &&
  1363. c->streams[stream].first_pts != AV_NOPTS_VALUE) {
  1364. int64_t pts_diff = av_rescale_q(c->streams[stream].last_pts -
  1365. c->streams[stream].first_pts,
  1366. s->streams[stream]->time_base,
  1367. AV_TIME_BASE_Q);
  1368. next_exp_index = (pts_diff / c->seg_duration) + 1;
  1369. }
  1370. }
  1371. for (i = 0; i < s->nb_streams; i++) {
  1372. OutputStream *os = &c->streams[i];
  1373. AVStream *st = s->streams[i];
  1374. int range_length, index_length = 0;
  1375. if (!os->packets_written)
  1376. continue;
  1377. // Flush the single stream that got a keyframe right now.
  1378. // Flush all audio streams as well, in sync with video keyframes,
  1379. // but not the other video streams.
  1380. if (stream >= 0 && i != stream) {
  1381. if (s->streams[i]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
  1382. continue;
  1383. // Make sure we don't flush audio streams multiple times, when
  1384. // all video streams are flushed one at a time.
  1385. if (c->has_video && os->segment_index > cur_flush_segment_index)
  1386. continue;
  1387. }
  1388. if (!c->single_file) {
  1389. if (os->segment_type == SEGMENT_TYPE_MP4 && !os->written_len)
  1390. write_styp(os->ctx->pb);
  1391. } else {
  1392. snprintf(os->full_path, sizeof(os->full_path), "%s%s", c->dirname, os->initfile);
  1393. }
  1394. ret = flush_dynbuf(c, os, &range_length);
  1395. if (ret < 0)
  1396. break;
  1397. os->packets_written = 0;
  1398. if (c->single_file) {
  1399. find_index_range(s, os->full_path, os->pos, &index_length);
  1400. } else {
  1401. dashenc_io_close(s, &os->out, os->temp_path);
  1402. if (use_rename) {
  1403. ret = avpriv_io_move(os->temp_path, os->full_path);
  1404. if (ret < 0)
  1405. break;
  1406. }
  1407. }
  1408. if (!os->muxer_overhead)
  1409. os->muxer_overhead = ((int64_t) (range_length - os->total_pkt_size) *
  1410. 8 * AV_TIME_BASE) /
  1411. av_rescale_q(os->max_pts - os->start_pts,
  1412. st->time_base, AV_TIME_BASE_Q);
  1413. os->total_pkt_size = 0;
  1414. if (!os->bit_rate) {
  1415. // calculate average bitrate of first segment
  1416. int64_t bitrate = (int64_t) range_length * 8 * AV_TIME_BASE / av_rescale_q(os->max_pts - os->start_pts,
  1417. st->time_base,
  1418. AV_TIME_BASE_Q);
  1419. if (bitrate >= 0)
  1420. os->bit_rate = bitrate;
  1421. }
  1422. add_segment(os, os->filename, os->start_pts, os->max_pts - os->start_pts, os->pos, range_length, index_length, next_exp_index);
  1423. av_log(s, AV_LOG_VERBOSE, "Representation %d media segment %d written to: %s\n", i, os->segment_index, os->full_path);
  1424. os->pos += range_length;
  1425. }
  1426. if (c->window_size) {
  1427. for (i = 0; i < s->nb_streams; i++) {
  1428. OutputStream *os = &c->streams[i];
  1429. int remove_count = os->nb_segments - c->window_size - c->extra_window_size;
  1430. if (remove_count > 0)
  1431. dashenc_delete_media_segments(s, os, remove_count);
  1432. }
  1433. }
  1434. if (final) {
  1435. for (i = 0; i < s->nb_streams; i++) {
  1436. OutputStream *os = &c->streams[i];
  1437. if (os->ctx && os->ctx_inited) {
  1438. int64_t file_size = avio_tell(os->ctx->pb);
  1439. av_write_trailer(os->ctx);
  1440. if (c->global_sidx) {
  1441. int j, start_index, start_number;
  1442. int64_t sidx_size = avio_tell(os->ctx->pb) - file_size;
  1443. get_start_index_number(os, c, &start_index, &start_number);
  1444. if (start_index >= os->nb_segments ||
  1445. os->segment_type != SEGMENT_TYPE_MP4)
  1446. continue;
  1447. os->init_range_length += sidx_size;
  1448. for (j = start_index; j < os->nb_segments; j++) {
  1449. Segment *seg = os->segments[j];
  1450. seg->start_pos += sidx_size;
  1451. }
  1452. }
  1453. }
  1454. }
  1455. }
  1456. if (ret >= 0) {
  1457. if (c->has_video && !final) {
  1458. c->nr_of_streams_flushed++;
  1459. if (c->nr_of_streams_flushed != c->nr_of_streams_to_flush)
  1460. return ret;
  1461. c->nr_of_streams_flushed = 0;
  1462. }
  1463. ret = write_manifest(s, final);
  1464. }
  1465. return ret;
  1466. }
  1467. static int dash_write_packet(AVFormatContext *s, AVPacket *pkt)
  1468. {
  1469. DASHContext *c = s->priv_data;
  1470. AVStream *st = s->streams[pkt->stream_index];
  1471. OutputStream *os = &c->streams[pkt->stream_index];
  1472. int64_t seg_end_duration, elapsed_duration;
  1473. int ret;
  1474. ret = update_stream_extradata(s, os, st->codecpar, &st->avg_frame_rate);
  1475. if (ret < 0)
  1476. return ret;
  1477. // Fill in a heuristic guess of the packet duration, if none is available.
  1478. // The mp4 muxer will do something similar (for the last packet in a fragment)
  1479. // if nothing is set (setting it for the other packets doesn't hurt).
  1480. // By setting a nonzero duration here, we can be sure that the mp4 muxer won't
  1481. // invoke its heuristic (this doesn't have to be identical to that algorithm),
  1482. // so that we know the exact timestamps of fragments.
  1483. if (!pkt->duration && os->last_dts != AV_NOPTS_VALUE)
  1484. pkt->duration = pkt->dts - os->last_dts;
  1485. os->last_dts = pkt->dts;
  1486. // If forcing the stream to start at 0, the mp4 muxer will set the start
  1487. // timestamps to 0. Do the same here, to avoid mismatches in duration/timestamps.
  1488. if (os->first_pts == AV_NOPTS_VALUE &&
  1489. s->avoid_negative_ts == AVFMT_AVOID_NEG_TS_MAKE_ZERO) {
  1490. pkt->pts -= pkt->dts;
  1491. pkt->dts = 0;
  1492. }
  1493. if (os->first_pts == AV_NOPTS_VALUE)
  1494. os->first_pts = pkt->pts;
  1495. os->last_pts = pkt->pts;
  1496. if (!c->availability_start_time[0]) {
  1497. int64_t start_time_us = av_gettime();
  1498. c->start_time_s = start_time_us / 1000000;
  1499. format_date_now(c->availability_start_time,
  1500. sizeof(c->availability_start_time));
  1501. }
  1502. if (!os->availability_time_offset && pkt->duration) {
  1503. int64_t frame_duration = av_rescale_q(pkt->duration, st->time_base,
  1504. AV_TIME_BASE_Q);
  1505. os->availability_time_offset = ((double) c->seg_duration -
  1506. frame_duration) / AV_TIME_BASE;
  1507. }
  1508. if (c->use_template && !c->use_timeline) {
  1509. elapsed_duration = pkt->pts - os->first_pts;
  1510. seg_end_duration = (int64_t) os->segment_index * c->seg_duration;
  1511. } else {
  1512. elapsed_duration = pkt->pts - os->start_pts;
  1513. seg_end_duration = c->seg_duration;
  1514. }
  1515. if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
  1516. pkt->flags & AV_PKT_FLAG_KEY && os->packets_written &&
  1517. av_compare_ts(elapsed_duration, st->time_base,
  1518. seg_end_duration, AV_TIME_BASE_Q) >= 0) {
  1519. int64_t prev_duration = c->last_duration;
  1520. c->last_duration = av_rescale_q(pkt->pts - os->start_pts,
  1521. st->time_base,
  1522. AV_TIME_BASE_Q);
  1523. c->total_duration = av_rescale_q(pkt->pts - os->first_pts,
  1524. st->time_base,
  1525. AV_TIME_BASE_Q);
  1526. if ((!c->use_timeline || !c->use_template) && prev_duration) {
  1527. if (c->last_duration < prev_duration*9/10 ||
  1528. c->last_duration > prev_duration*11/10) {
  1529. av_log(s, AV_LOG_WARNING,
  1530. "Segment durations differ too much, enable use_timeline "
  1531. "and use_template, or keep a stricter keyframe interval\n");
  1532. }
  1533. }
  1534. if ((ret = dash_flush(s, 0, pkt->stream_index)) < 0)
  1535. return ret;
  1536. }
  1537. if (!os->packets_written) {
  1538. // If we wrote a previous segment, adjust the start time of the segment
  1539. // to the end of the previous one (which is the same as the mp4 muxer
  1540. // does). This avoids gaps in the timeline.
  1541. if (os->max_pts != AV_NOPTS_VALUE)
  1542. os->start_pts = os->max_pts;
  1543. else
  1544. os->start_pts = pkt->pts;
  1545. }
  1546. if (os->max_pts == AV_NOPTS_VALUE)
  1547. os->max_pts = pkt->pts + pkt->duration;
  1548. else
  1549. os->max_pts = FFMAX(os->max_pts, pkt->pts + pkt->duration);
  1550. os->packets_written++;
  1551. os->total_pkt_size += pkt->size;
  1552. if ((ret = ff_write_chained(os->ctx, 0, pkt, s, 0)) < 0)
  1553. return ret;
  1554. if (!os->init_range_length)
  1555. flush_init_segment(s, os);
  1556. //open the output context when the first frame of a segment is ready
  1557. if (!c->single_file && os->packets_written == 1) {
  1558. AVDictionary *opts = NULL;
  1559. const char *proto = avio_find_protocol_name(s->url);
  1560. int use_rename = proto && !strcmp(proto, "file");
  1561. os->filename[0] = os->full_path[0] = os->temp_path[0] = '\0';
  1562. ff_dash_fill_tmpl_params(os->filename, sizeof(os->filename),
  1563. os->media_seg_name, pkt->stream_index,
  1564. os->segment_index, os->bit_rate, os->start_pts);
  1565. snprintf(os->full_path, sizeof(os->full_path), "%s%s", c->dirname,
  1566. os->filename);
  1567. snprintf(os->temp_path, sizeof(os->temp_path),
  1568. use_rename ? "%s.tmp" : "%s", os->full_path);
  1569. set_http_options(&opts, c);
  1570. ret = dashenc_io_open(s, &os->out, os->temp_path, &opts);
  1571. av_dict_free(&opts);
  1572. if (ret < 0) {
  1573. return handle_io_open_error(s, ret, os->temp_path);
  1574. }
  1575. if (c->lhls) {
  1576. char *prefetch_url = use_rename ? NULL : os->filename;
  1577. write_hls_media_playlist(os, s, pkt->stream_index, 0, prefetch_url);
  1578. }
  1579. }
  1580. //write out the data immediately in streaming mode
  1581. if (c->streaming && os->segment_type == SEGMENT_TYPE_MP4) {
  1582. int len = 0;
  1583. uint8_t *buf = NULL;
  1584. if (!os->written_len)
  1585. write_styp(os->ctx->pb);
  1586. avio_flush(os->ctx->pb);
  1587. len = avio_get_dyn_buf (os->ctx->pb, &buf);
  1588. if (os->out) {
  1589. avio_write(os->out, buf + os->written_len, len - os->written_len);
  1590. avio_flush(os->out);
  1591. }
  1592. os->written_len = len;
  1593. }
  1594. return ret;
  1595. }
  1596. static int dash_write_trailer(AVFormatContext *s)
  1597. {
  1598. DASHContext *c = s->priv_data;
  1599. int i;
  1600. if (s->nb_streams > 0) {
  1601. OutputStream *os = &c->streams[0];
  1602. // If no segments have been written so far, try to do a crude
  1603. // guess of the segment duration
  1604. if (!c->last_duration)
  1605. c->last_duration = av_rescale_q(os->max_pts - os->start_pts,
  1606. s->streams[0]->time_base,
  1607. AV_TIME_BASE_Q);
  1608. c->total_duration = av_rescale_q(os->max_pts - os->first_pts,
  1609. s->streams[0]->time_base,
  1610. AV_TIME_BASE_Q);
  1611. }
  1612. dash_flush(s, 1, -1);
  1613. if (c->remove_at_exit) {
  1614. for (i = 0; i < s->nb_streams; ++i) {
  1615. OutputStream *os = &c->streams[i];
  1616. dashenc_delete_media_segments(s, os, os->nb_segments);
  1617. dashenc_delete_segment_file(s, os->initfile);
  1618. if (c->hls_playlist && os->segment_type == SEGMENT_TYPE_MP4) {
  1619. char filename[1024];
  1620. get_hls_playlist_name(filename, sizeof(filename), c->dirname, i);
  1621. dashenc_delete_file(s, filename);
  1622. }
  1623. }
  1624. dashenc_delete_file(s, s->url);
  1625. if (c->hls_playlist && c->master_playlist_created) {
  1626. char filename[1024];
  1627. snprintf(filename, sizeof(filename), "%smaster.m3u8", c->dirname);
  1628. dashenc_delete_file(s, filename);
  1629. }
  1630. }
  1631. return 0;
  1632. }
  1633. static int dash_check_bitstream(struct AVFormatContext *s, const AVPacket *avpkt)
  1634. {
  1635. DASHContext *c = s->priv_data;
  1636. OutputStream *os = &c->streams[avpkt->stream_index];
  1637. AVFormatContext *oc = os->ctx;
  1638. if (oc->oformat->check_bitstream) {
  1639. int ret;
  1640. AVPacket pkt = *avpkt;
  1641. pkt.stream_index = 0;
  1642. ret = oc->oformat->check_bitstream(oc, &pkt);
  1643. if (ret == 1) {
  1644. AVStream *st = s->streams[avpkt->stream_index];
  1645. AVStream *ost = oc->streams[0];
  1646. st->internal->bsfcs = ost->internal->bsfcs;
  1647. st->internal->nb_bsfcs = ost->internal->nb_bsfcs;
  1648. ost->internal->bsfcs = NULL;
  1649. ost->internal->nb_bsfcs = 0;
  1650. }
  1651. return ret;
  1652. }
  1653. return 1;
  1654. }
  1655. #define OFFSET(x) offsetof(DASHContext, x)
  1656. #define E AV_OPT_FLAG_ENCODING_PARAM
  1657. static const AVOption options[] = {
  1658. { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  1659. { "window_size", "number of segments kept in the manifest", OFFSET(window_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, E },
  1660. { "extra_window_size", "number of segments kept outside of the manifest before removing from disk", OFFSET(extra_window_size), AV_OPT_TYPE_INT, { .i64 = 5 }, 0, INT_MAX, E },
  1661. #if FF_API_DASH_MIN_SEG_DURATION
  1662. { "min_seg_duration", "minimum segment duration (in microseconds) (will be deprecated)", OFFSET(min_seg_duration), AV_OPT_TYPE_INT, { .i64 = 5000000 }, 0, INT_MAX, E },
  1663. #endif
  1664. { "seg_duration", "segment duration (in seconds, fractional value can be set)", OFFSET(seg_duration), AV_OPT_TYPE_DURATION, { .i64 = 5000000 }, 0, INT_MAX, E },
  1665. { "remove_at_exit", "remove all segments when finished", OFFSET(remove_at_exit), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1666. { "use_template", "Use SegmentTemplate instead of SegmentList", OFFSET(use_template), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
  1667. { "use_timeline", "Use SegmentTimeline in SegmentTemplate", OFFSET(use_timeline), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
  1668. { "single_file", "Store all segments in one file, accessed using byte ranges", OFFSET(single_file), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1669. { "single_file_name", "DASH-templated name to be used for baseURL. Implies storing all segments in one file, accessed using byte ranges", OFFSET(single_file_name), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, E },
  1670. { "init_seg_name", "DASH-templated name to used for the initialization segment", OFFSET(init_seg_name), AV_OPT_TYPE_STRING, {.str = "init-stream$RepresentationID$.$ext$"}, 0, 0, E },
  1671. { "media_seg_name", "DASH-templated name to used for the media segments", OFFSET(media_seg_name), AV_OPT_TYPE_STRING, {.str = "chunk-stream$RepresentationID$-$Number%05d$.$ext$"}, 0, 0, E },
  1672. { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, E },
  1673. { "method", "set the HTTP method", OFFSET(method), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
  1674. { "http_user_agent", "override User-Agent field in HTTP header", OFFSET(user_agent), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E},
  1675. { "http_persistent", "Use persistent HTTP connections", OFFSET(http_persistent), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, E },
  1676. { "hls_playlist", "Generate HLS playlist files(master.m3u8, media_%d.m3u8)", OFFSET(hls_playlist), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1677. { "streaming", "Enable/Disable streaming mode of output. Each frame will be moof fragment", OFFSET(streaming), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1678. { "timeout", "set timeout for socket I/O operations", OFFSET(timeout), AV_OPT_TYPE_DURATION, { .i64 = -1 }, -1, INT_MAX, .flags = E },
  1679. { "index_correction", "Enable/Disable segment index correction logic", OFFSET(index_correction), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1680. { "format_options","set list of options for the container format (mp4/webm) used for dash", OFFSET(format_options_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E},
  1681. { "global_sidx", "Write global SIDX atom. Applicable only for single file, mp4 output, non-streaming mode", OFFSET(global_sidx), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1682. { "dash_segment_type", "set dash segment files type", OFFSET(segment_type_option), AV_OPT_TYPE_INT, {.i64 = SEGMENT_TYPE_AUTO }, 0, SEGMENT_TYPE_NB - 1, E, "segment_type"},
  1683. { "auto", "select segment file format based on codec", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_AUTO }, 0, UINT_MAX, E, "segment_type"},
  1684. { "mp4", "make segment file in ISOBMFF format", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_MP4 }, 0, UINT_MAX, E, "segment_type"},
  1685. { "webm", "make segment file in WebM format", 0, AV_OPT_TYPE_CONST, {.i64 = SEGMENT_TYPE_WEBM }, 0, UINT_MAX, E, "segment_type"},
  1686. { "ignore_io_errors", "Ignore IO errors during open and write. Useful for long-duration runs with network output", OFFSET(ignore_io_errors), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1687. { "lhls", "Enable Low-latency HLS(Experimental). Adds #EXT-X-PREFETCH tag with current segment's URI", OFFSET(lhls), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
  1688. { "master_m3u8_publish_rate", "Publish master playlist every after this many segment intervals", OFFSET(master_publish_rate), AV_OPT_TYPE_INT, {.i64 = 0}, 0, UINT_MAX, E},
  1689. { NULL },
  1690. };
  1691. static const AVClass dash_class = {
  1692. .class_name = "dash muxer",
  1693. .item_name = av_default_item_name,
  1694. .option = options,
  1695. .version = LIBAVUTIL_VERSION_INT,
  1696. };
  1697. AVOutputFormat ff_dash_muxer = {
  1698. .name = "dash",
  1699. .long_name = NULL_IF_CONFIG_SMALL("DASH Muxer"),
  1700. .extensions = "mpd",
  1701. .priv_data_size = sizeof(DASHContext),
  1702. .audio_codec = AV_CODEC_ID_AAC,
  1703. .video_codec = AV_CODEC_ID_H264,
  1704. .flags = AVFMT_GLOBALHEADER | AVFMT_NOFILE | AVFMT_TS_NEGATIVE,
  1705. .init = dash_init,
  1706. .write_header = dash_write_header,
  1707. .write_packet = dash_write_packet,
  1708. .write_trailer = dash_write_trailer,
  1709. .deinit = dash_free,
  1710. .check_bitstream = dash_check_bitstream,
  1711. .priv_class = &dash_class,
  1712. };