You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

582 lines
22KB

  1. /*
  2. * WebM DASH Manifest XML muxer
  3. * Copyright (c) 2014 Vignesh Venkatasubramanian
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /*
  22. * WebM DASH Specification:
  23. * https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification
  24. * ISO DASH Specification:
  25. * http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
  26. */
  27. #include <float.h>
  28. #include <stdint.h>
  29. #include <string.h>
  30. #include "avformat.h"
  31. #include "avio_internal.h"
  32. #include "matroska.h"
  33. #include "libavutil/avstring.h"
  34. #include "libavutil/dict.h"
  35. #include "libavutil/opt.h"
  36. #include "libavutil/time_internal.h"
  37. typedef struct AdaptationSet {
  38. char id[10];
  39. int *streams;
  40. int nb_streams;
  41. } AdaptationSet;
  42. typedef struct WebMDashMuxContext {
  43. const AVClass *class;
  44. char *adaptation_sets;
  45. AdaptationSet *as;
  46. int nb_as;
  47. int representation_id;
  48. int is_live;
  49. int chunk_start_index;
  50. int chunk_duration;
  51. char *utc_timing_url;
  52. double time_shift_buffer_depth;
  53. int minimum_update_period;
  54. } WebMDashMuxContext;
  55. static const char *get_codec_name(int codec_id)
  56. {
  57. return avcodec_descriptor_get(codec_id)->name;
  58. }
  59. static double get_duration(AVFormatContext *s)
  60. {
  61. int i = 0;
  62. double max = 0.0;
  63. for (i = 0; i < s->nb_streams; i++) {
  64. AVDictionaryEntry *duration = av_dict_get(s->streams[i]->metadata,
  65. DURATION, NULL, 0);
  66. if (!duration || atof(duration->value) < 0) continue;
  67. if (atof(duration->value) > max) max = atof(duration->value);
  68. }
  69. return max / 1000;
  70. }
  71. static int write_header(AVFormatContext *s)
  72. {
  73. WebMDashMuxContext *w = s->priv_data;
  74. double min_buffer_time = 1.0;
  75. avio_printf(s->pb, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
  76. avio_printf(s->pb, "<MPD\n");
  77. avio_printf(s->pb, " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n");
  78. avio_printf(s->pb, " xmlns=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  79. avio_printf(s->pb, " xsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  80. avio_printf(s->pb, " type=\"%s\"\n", w->is_live ? "dynamic" : "static");
  81. if (!w->is_live) {
  82. avio_printf(s->pb, " mediaPresentationDuration=\"PT%gS\"\n",
  83. get_duration(s));
  84. }
  85. avio_printf(s->pb, " minBufferTime=\"PT%gS\"\n", min_buffer_time);
  86. avio_printf(s->pb, " profiles=\"%s\"%s",
  87. w->is_live ? "urn:mpeg:dash:profile:isoff-live:2011" : "urn:webm:dash:profile:webm-on-demand:2012",
  88. w->is_live ? "\n" : ">\n");
  89. if (w->is_live) {
  90. time_t local_time = time(NULL);
  91. struct tm gmt_buffer;
  92. struct tm *gmt = gmtime_r(&local_time, &gmt_buffer);
  93. char gmt_iso[21];
  94. if (!strftime(gmt_iso, 21, "%Y-%m-%dT%H:%M:%SZ", gmt)) {
  95. return AVERROR_UNKNOWN;
  96. }
  97. if (s->flags & AVFMT_FLAG_BITEXACT) {
  98. av_strlcpy(gmt_iso, "", 1);
  99. }
  100. avio_printf(s->pb, " availabilityStartTime=\"%s\"\n", gmt_iso);
  101. avio_printf(s->pb, " timeShiftBufferDepth=\"PT%gS\"\n", w->time_shift_buffer_depth);
  102. avio_printf(s->pb, " minimumUpdatePeriod=\"PT%dS\"", w->minimum_update_period);
  103. avio_printf(s->pb, ">\n");
  104. if (w->utc_timing_url) {
  105. avio_printf(s->pb, "<UTCTiming\n");
  106. avio_printf(s->pb, " schemeIdUri=\"urn:mpeg:dash:utc:http-iso:2014\"\n");
  107. avio_printf(s->pb, " value=\"%s\"/>\n", w->utc_timing_url);
  108. }
  109. }
  110. return 0;
  111. }
  112. static void write_footer(AVFormatContext *s)
  113. {
  114. avio_printf(s->pb, "</MPD>\n");
  115. }
  116. static int subsegment_alignment(AVFormatContext *s, AdaptationSet *as) {
  117. int i;
  118. AVDictionaryEntry *gold = av_dict_get(s->streams[as->streams[0]]->metadata,
  119. CUE_TIMESTAMPS, NULL, 0);
  120. if (!gold) return 0;
  121. for (i = 1; i < as->nb_streams; i++) {
  122. AVDictionaryEntry *ts = av_dict_get(s->streams[as->streams[i]]->metadata,
  123. CUE_TIMESTAMPS, NULL, 0);
  124. if (!ts || strncmp(gold->value, ts->value, strlen(gold->value))) return 0;
  125. }
  126. return 1;
  127. }
  128. static int bitstream_switching(AVFormatContext *s, AdaptationSet *as) {
  129. int i;
  130. AVDictionaryEntry *gold_track_num = av_dict_get(s->streams[as->streams[0]]->metadata,
  131. TRACK_NUMBER, NULL, 0);
  132. AVCodecParameters *gold_par = s->streams[as->streams[0]]->codecpar;
  133. if (!gold_track_num) return 0;
  134. for (i = 1; i < as->nb_streams; i++) {
  135. AVDictionaryEntry *track_num = av_dict_get(s->streams[as->streams[i]]->metadata,
  136. TRACK_NUMBER, NULL, 0);
  137. AVCodecParameters *par = s->streams[as->streams[i]]->codecpar;
  138. if (!track_num ||
  139. strncmp(gold_track_num->value, track_num->value, strlen(gold_track_num->value)) ||
  140. gold_par->codec_id != par->codec_id ||
  141. gold_par->extradata_size != par->extradata_size ||
  142. memcmp(gold_par->extradata, par->extradata, par->extradata_size)) {
  143. return 0;
  144. }
  145. }
  146. return 1;
  147. }
  148. /*
  149. * Writes a Representation within an Adaptation Set. Returns 0 on success and
  150. * < 0 on failure.
  151. */
  152. static int write_representation(AVFormatContext *s, AVStream *stream, char *id,
  153. int output_width, int output_height,
  154. int output_sample_rate) {
  155. WebMDashMuxContext *w = s->priv_data;
  156. AVDictionaryEntry *irange = av_dict_get(stream->metadata, INITIALIZATION_RANGE, NULL, 0);
  157. AVDictionaryEntry *cues_start = av_dict_get(stream->metadata, CUES_START, NULL, 0);
  158. AVDictionaryEntry *cues_end = av_dict_get(stream->metadata, CUES_END, NULL, 0);
  159. AVDictionaryEntry *filename = av_dict_get(stream->metadata, FILENAME, NULL, 0);
  160. AVDictionaryEntry *bandwidth = av_dict_get(stream->metadata, BANDWIDTH, NULL, 0);
  161. const char *bandwidth_str;
  162. if ((w->is_live && (!filename)) ||
  163. (!w->is_live && (!irange || !cues_start || !cues_end || !filename || !bandwidth))) {
  164. return AVERROR_INVALIDDATA;
  165. }
  166. avio_printf(s->pb, "<Representation id=\"%s\"", id);
  167. // if bandwidth for live was not provided, use a default
  168. if (w->is_live && !bandwidth) {
  169. bandwidth_str = (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ? "128000" : "1000000";
  170. } else {
  171. bandwidth_str = bandwidth->value;
  172. }
  173. avio_printf(s->pb, " bandwidth=\"%s\"", bandwidth_str);
  174. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && output_width)
  175. avio_printf(s->pb, " width=\"%d\"", stream->codecpar->width);
  176. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && output_height)
  177. avio_printf(s->pb, " height=\"%d\"", stream->codecpar->height);
  178. if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && output_sample_rate)
  179. avio_printf(s->pb, " audioSamplingRate=\"%d\"", stream->codecpar->sample_rate);
  180. if (w->is_live) {
  181. // For live streams, Codec and Mime Type always go in the Representation tag.
  182. avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(stream->codecpar->codec_id));
  183. avio_printf(s->pb, " mimeType=\"%s/webm\"",
  184. stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  185. // For live streams, subsegments always start with key frames. So this
  186. // is always 1.
  187. avio_printf(s->pb, " startsWithSAP=\"1\"");
  188. avio_printf(s->pb, ">");
  189. } else {
  190. avio_printf(s->pb, ">\n");
  191. avio_printf(s->pb, "<BaseURL>%s</BaseURL>\n", filename->value);
  192. avio_printf(s->pb, "<SegmentBase\n");
  193. avio_printf(s->pb, " indexRange=\"%s-%s\">\n", cues_start->value, cues_end->value);
  194. avio_printf(s->pb, "<Initialization\n");
  195. avio_printf(s->pb, " range=\"0-%s\" />\n", irange->value);
  196. avio_printf(s->pb, "</SegmentBase>\n");
  197. }
  198. avio_printf(s->pb, "</Representation>\n");
  199. return 0;
  200. }
  201. /*
  202. * Checks if width of all streams are the same. Returns 1 if true, 0 otherwise.
  203. */
  204. static int check_matching_width(AVFormatContext *s, AdaptationSet *as) {
  205. int first_width, i;
  206. if (as->nb_streams < 2) return 1;
  207. first_width = s->streams[as->streams[0]]->codecpar->width;
  208. for (i = 1; i < as->nb_streams; i++)
  209. if (first_width != s->streams[as->streams[i]]->codecpar->width)
  210. return 0;
  211. return 1;
  212. }
  213. /*
  214. * Checks if height of all streams are the same. Returns 1 if true, 0 otherwise.
  215. */
  216. static int check_matching_height(AVFormatContext *s, AdaptationSet *as) {
  217. int first_height, i;
  218. if (as->nb_streams < 2) return 1;
  219. first_height = s->streams[as->streams[0]]->codecpar->height;
  220. for (i = 1; i < as->nb_streams; i++)
  221. if (first_height != s->streams[as->streams[i]]->codecpar->height)
  222. return 0;
  223. return 1;
  224. }
  225. /*
  226. * Checks if sample rate of all streams are the same. Returns 1 if true, 0 otherwise.
  227. */
  228. static int check_matching_sample_rate(AVFormatContext *s, AdaptationSet *as) {
  229. int first_sample_rate, i;
  230. if (as->nb_streams < 2) return 1;
  231. first_sample_rate = s->streams[as->streams[0]]->codecpar->sample_rate;
  232. for (i = 1; i < as->nb_streams; i++)
  233. if (first_sample_rate != s->streams[as->streams[i]]->codecpar->sample_rate)
  234. return 0;
  235. return 1;
  236. }
  237. static void free_adaptation_sets(AVFormatContext *s) {
  238. WebMDashMuxContext *w = s->priv_data;
  239. int i;
  240. for (i = 0; i < w->nb_as; i++) {
  241. av_freep(&w->as[i].streams);
  242. }
  243. av_freep(&w->as);
  244. w->nb_as = 0;
  245. }
  246. /*
  247. * Parses a live header filename and computes the representation id,
  248. * initialization pattern and the media pattern. Pass NULL if you don't want to
  249. * compute any of those 3. Returns 0 on success and non-zero on failure.
  250. *
  251. * Name of the header file should conform to the following pattern:
  252. * <file_description>_<representation_id>.hdr where <file_description> can be
  253. * anything. The chunks should be named according to the following pattern:
  254. * <file_description>_<representation_id>_<chunk_number>.chk
  255. */
  256. static int parse_filename(char *filename, char **representation_id,
  257. char **initialization_pattern, char **media_pattern) {
  258. char *underscore_pos = NULL;
  259. char *period_pos = NULL;
  260. char *temp_pos = NULL;
  261. char *filename_str = av_strdup(filename);
  262. int ret = 0;
  263. if (!filename_str) {
  264. ret = AVERROR(ENOMEM);
  265. goto end;
  266. }
  267. temp_pos = av_stristr(filename_str, "_");
  268. while (temp_pos) {
  269. underscore_pos = temp_pos + 1;
  270. temp_pos = av_stristr(temp_pos + 1, "_");
  271. }
  272. if (!underscore_pos) {
  273. ret = AVERROR_INVALIDDATA;
  274. goto end;
  275. }
  276. period_pos = av_stristr(underscore_pos, ".");
  277. if (!period_pos) {
  278. ret = AVERROR_INVALIDDATA;
  279. goto end;
  280. }
  281. *(underscore_pos - 1) = 0;
  282. if (representation_id) {
  283. *representation_id = av_malloc(period_pos - underscore_pos + 1);
  284. if (!(*representation_id)) {
  285. ret = AVERROR(ENOMEM);
  286. goto end;
  287. }
  288. av_strlcpy(*representation_id, underscore_pos, period_pos - underscore_pos + 1);
  289. }
  290. if (initialization_pattern) {
  291. *initialization_pattern = av_asprintf("%s_$RepresentationID$.hdr",
  292. filename_str);
  293. if (!(*initialization_pattern)) {
  294. ret = AVERROR(ENOMEM);
  295. goto end;
  296. }
  297. }
  298. if (media_pattern) {
  299. *media_pattern = av_asprintf("%s_$RepresentationID$_$Number$.chk",
  300. filename_str);
  301. if (!(*media_pattern)) {
  302. ret = AVERROR(ENOMEM);
  303. goto end;
  304. }
  305. }
  306. end:
  307. av_freep(&filename_str);
  308. return ret;
  309. }
  310. /*
  311. * Writes an Adaptation Set. Returns 0 on success and < 0 on failure.
  312. */
  313. static int write_adaptation_set(AVFormatContext *s, int as_index)
  314. {
  315. WebMDashMuxContext *w = s->priv_data;
  316. AdaptationSet *as = &w->as[as_index];
  317. AVCodecParameters *par = s->streams[as->streams[0]]->codecpar;
  318. AVDictionaryEntry *lang;
  319. int i;
  320. static const char boolean[2][6] = { "false", "true" };
  321. int subsegmentStartsWithSAP = 1;
  322. // Width, Height and Sample Rate will go in the AdaptationSet tag if they
  323. // are the same for all contained Representations. otherwise, they will go
  324. // on their respective Representation tag. For live streams, they always go
  325. // in the Representation tag.
  326. int width_in_as = 1, height_in_as = 1, sample_rate_in_as = 1;
  327. if (par->codec_type == AVMEDIA_TYPE_VIDEO) {
  328. width_in_as = !w->is_live && check_matching_width(s, as);
  329. height_in_as = !w->is_live && check_matching_height(s, as);
  330. } else {
  331. sample_rate_in_as = !w->is_live && check_matching_sample_rate(s, as);
  332. }
  333. avio_printf(s->pb, "<AdaptationSet id=\"%s\"", as->id);
  334. avio_printf(s->pb, " mimeType=\"%s/webm\"",
  335. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  336. avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(par->codec_id));
  337. lang = av_dict_get(s->streams[as->streams[0]]->metadata, "language", NULL, 0);
  338. if (lang) avio_printf(s->pb, " lang=\"%s\"", lang->value);
  339. if (par->codec_type == AVMEDIA_TYPE_VIDEO && width_in_as)
  340. avio_printf(s->pb, " width=\"%d\"", par->width);
  341. if (par->codec_type == AVMEDIA_TYPE_VIDEO && height_in_as)
  342. avio_printf(s->pb, " height=\"%d\"", par->height);
  343. if (par->codec_type == AVMEDIA_TYPE_AUDIO && sample_rate_in_as)
  344. avio_printf(s->pb, " audioSamplingRate=\"%d\"", par->sample_rate);
  345. avio_printf(s->pb, " bitstreamSwitching=\"%s\"",
  346. boolean[bitstream_switching(s, as)]);
  347. avio_printf(s->pb, " subsegmentAlignment=\"%s\"",
  348. boolean[w->is_live || subsegment_alignment(s, as)]);
  349. for (i = 0; i < as->nb_streams; i++) {
  350. AVDictionaryEntry *kf = av_dict_get(s->streams[as->streams[i]]->metadata,
  351. CLUSTER_KEYFRAME, NULL, 0);
  352. if (!w->is_live && (!kf || !strncmp(kf->value, "0", 1))) subsegmentStartsWithSAP = 0;
  353. }
  354. avio_printf(s->pb, " subsegmentStartsWithSAP=\"%d\"", subsegmentStartsWithSAP);
  355. avio_printf(s->pb, ">\n");
  356. if (w->is_live) {
  357. AVDictionaryEntry *filename =
  358. av_dict_get(s->streams[as->streams[0]]->metadata, FILENAME, NULL, 0);
  359. char *initialization_pattern = NULL;
  360. char *media_pattern = NULL;
  361. int ret = parse_filename(filename->value, NULL, &initialization_pattern,
  362. &media_pattern);
  363. if (ret) return ret;
  364. avio_printf(s->pb, "<ContentComponent id=\"1\" type=\"%s\"/>\n",
  365. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  366. avio_printf(s->pb, "<SegmentTemplate");
  367. avio_printf(s->pb, " timescale=\"1000\"");
  368. avio_printf(s->pb, " duration=\"%d\"", w->chunk_duration);
  369. avio_printf(s->pb, " media=\"%s\"", media_pattern);
  370. avio_printf(s->pb, " startNumber=\"%d\"", w->chunk_start_index);
  371. avio_printf(s->pb, " initialization=\"%s\"", initialization_pattern);
  372. avio_printf(s->pb, "/>\n");
  373. av_free(initialization_pattern);
  374. av_free(media_pattern);
  375. }
  376. for (i = 0; i < as->nb_streams; i++) {
  377. char *representation_id = NULL;
  378. int ret;
  379. if (w->is_live) {
  380. AVDictionaryEntry *filename =
  381. av_dict_get(s->streams[as->streams[i]]->metadata, FILENAME, NULL, 0);
  382. if (!filename)
  383. return AVERROR(EINVAL);
  384. if (ret = parse_filename(filename->value, &representation_id, NULL, NULL))
  385. return ret;
  386. } else {
  387. representation_id = av_asprintf("%d", w->representation_id++);
  388. if (!representation_id) return AVERROR(ENOMEM);
  389. }
  390. ret = write_representation(s, s->streams[as->streams[i]],
  391. representation_id, !width_in_as,
  392. !height_in_as, !sample_rate_in_as);
  393. av_free(representation_id);
  394. if (ret) return ret;
  395. }
  396. avio_printf(s->pb, "</AdaptationSet>\n");
  397. return 0;
  398. }
  399. static int to_integer(char *p, int len)
  400. {
  401. int ret;
  402. char *q = av_malloc(len);
  403. if (!q)
  404. return AVERROR(ENOMEM);
  405. av_strlcpy(q, p, len);
  406. ret = atoi(q);
  407. av_free(q);
  408. return ret;
  409. }
  410. static int parse_adaptation_sets(AVFormatContext *s)
  411. {
  412. WebMDashMuxContext *w = s->priv_data;
  413. char *p = w->adaptation_sets;
  414. char *q;
  415. enum { new_set, parsed_id, parsing_streams } state;
  416. if (!w->adaptation_sets) {
  417. av_log(s, AV_LOG_ERROR, "The 'adaptation_sets' option must be set.\n");
  418. return AVERROR(EINVAL);
  419. }
  420. // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on
  421. state = new_set;
  422. while (p < w->adaptation_sets + strlen(w->adaptation_sets)) {
  423. if (*p == ' ')
  424. continue;
  425. else if (state == new_set && !strncmp(p, "id=", 3)) {
  426. void *mem = av_realloc(w->as, sizeof(*w->as) * (w->nb_as + 1));
  427. const char *comma;
  428. if (mem == NULL)
  429. return AVERROR(ENOMEM);
  430. w->as = mem;
  431. ++w->nb_as;
  432. w->as[w->nb_as - 1].nb_streams = 0;
  433. w->as[w->nb_as - 1].streams = NULL;
  434. p += 3; // consume "id="
  435. q = w->as[w->nb_as - 1].id;
  436. comma = strchr(p, ',');
  437. if (!comma || comma - p >= sizeof(w->as[w->nb_as - 1].id)) {
  438. av_log(s, AV_LOG_ERROR, "'id' in 'adaptation_sets' is malformed.\n");
  439. return AVERROR(EINVAL);
  440. }
  441. while (*p != ',') *q++ = *p++;
  442. *q = 0;
  443. p++;
  444. state = parsed_id;
  445. } else if (state == parsed_id && !strncmp(p, "streams=", 8)) {
  446. p += 8; // consume "streams="
  447. state = parsing_streams;
  448. } else if (state == parsing_streams) {
  449. struct AdaptationSet *as = &w->as[w->nb_as - 1];
  450. int ret = av_reallocp_array(&as->streams, ++as->nb_streams,
  451. sizeof(*as->streams));
  452. if (ret < 0)
  453. return ret;
  454. q = p;
  455. while (*q != '\0' && *q != ',' && *q != ' ') q++;
  456. as->streams[as->nb_streams - 1] = to_integer(p, q - p + 1);
  457. if (as->streams[as->nb_streams - 1] < 0 ||
  458. as->streams[as->nb_streams - 1] >= s->nb_streams) {
  459. av_log(s, AV_LOG_ERROR, "Invalid value for 'streams' in adapation_sets.\n");
  460. return AVERROR(EINVAL);
  461. }
  462. if (*q == '\0') break;
  463. if (*q == ' ') state = new_set;
  464. p = ++q;
  465. } else {
  466. return -1;
  467. }
  468. }
  469. return 0;
  470. }
  471. static int webm_dash_manifest_write_header(AVFormatContext *s)
  472. {
  473. int i;
  474. double start = 0.0;
  475. int ret;
  476. WebMDashMuxContext *w = s->priv_data;
  477. for (unsigned i = 0; i < s->nb_streams; i++) {
  478. enum AVCodecID codec_id = s->streams[i]->codecpar->codec_id;
  479. if (codec_id != AV_CODEC_ID_VP8 && codec_id != AV_CODEC_ID_VP9 &&
  480. codec_id != AV_CODEC_ID_VORBIS && codec_id != AV_CODEC_ID_OPUS)
  481. return AVERROR(EINVAL);
  482. }
  483. ret = parse_adaptation_sets(s);
  484. if (ret < 0) {
  485. goto fail;
  486. }
  487. ret = write_header(s);
  488. if (ret < 0) {
  489. goto fail;
  490. }
  491. avio_printf(s->pb, "<Period id=\"0\"");
  492. avio_printf(s->pb, " start=\"PT%gS\"", start);
  493. if (!w->is_live) {
  494. avio_printf(s->pb, " duration=\"PT%gS\"", get_duration(s));
  495. }
  496. avio_printf(s->pb, " >\n");
  497. for (i = 0; i < w->nb_as; i++) {
  498. ret = write_adaptation_set(s, i);
  499. if (ret < 0) {
  500. goto fail;
  501. }
  502. }
  503. avio_printf(s->pb, "</Period>\n");
  504. write_footer(s);
  505. fail:
  506. free_adaptation_sets(s);
  507. return ret < 0 ? ret : 0;
  508. }
  509. static int webm_dash_manifest_write_packet(AVFormatContext *s, AVPacket *pkt)
  510. {
  511. return AVERROR_EOF;
  512. }
  513. #define OFFSET(x) offsetof(WebMDashMuxContext, x)
  514. static const AVOption options[] = {
  515. { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  516. { "live", "create a live stream manifest", OFFSET(is_live), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
  517. { "chunk_start_index", "start index of the chunk", OFFSET(chunk_start_index), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  518. { "chunk_duration_ms", "duration of each chunk (in milliseconds)", OFFSET(chunk_duration), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  519. { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  520. { "time_shift_buffer_depth", "Smallest time (in seconds) shifting buffer for which any Representation is guaranteed to be available.", OFFSET(time_shift_buffer_depth), AV_OPT_TYPE_DOUBLE, { .dbl = 60.0 }, 1.0, DBL_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  521. { "minimum_update_period", "Minimum Update Period (in seconds) of the manifest.", OFFSET(minimum_update_period), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  522. { NULL },
  523. };
  524. #if CONFIG_WEBM_DASH_MANIFEST_MUXER
  525. static const AVClass webm_dash_class = {
  526. .class_name = "WebM DASH Manifest muxer",
  527. .item_name = av_default_item_name,
  528. .option = options,
  529. .version = LIBAVUTIL_VERSION_INT,
  530. };
  531. AVOutputFormat ff_webm_dash_manifest_muxer = {
  532. .name = "webm_dash_manifest",
  533. .long_name = NULL_IF_CONFIG_SMALL("WebM DASH Manifest"),
  534. .mime_type = "application/xml",
  535. .extensions = "xml",
  536. .priv_data_size = sizeof(WebMDashMuxContext),
  537. .write_header = webm_dash_manifest_write_header,
  538. .write_packet = webm_dash_manifest_write_packet,
  539. .priv_class = &webm_dash_class,
  540. };
  541. #endif