You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

550 lines
21KB

  1. /*
  2. * WebM DASH Manifest XML muxer
  3. * Copyright (c) 2014 Vignesh Venkatasubramanian
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /*
  22. * WebM DASH Specification:
  23. * https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification
  24. * ISO DASH Specification:
  25. * http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
  26. */
  27. #include <float.h>
  28. #include <stdint.h>
  29. #include <string.h>
  30. #include "avformat.h"
  31. #include "matroska.h"
  32. #include "libavutil/avstring.h"
  33. #include "libavutil/dict.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/time_internal.h"
  36. typedef struct AdaptationSet {
  37. char id[10];
  38. int *streams;
  39. int nb_streams;
  40. } AdaptationSet;
  41. typedef struct WebMDashMuxContext {
  42. const AVClass *class;
  43. char *adaptation_sets;
  44. AdaptationSet *as;
  45. int nb_as;
  46. int representation_id;
  47. int is_live;
  48. int chunk_start_index;
  49. int chunk_duration;
  50. char *utc_timing_url;
  51. double time_shift_buffer_depth;
  52. int minimum_update_period;
  53. } WebMDashMuxContext;
  54. static const char *get_codec_name(int codec_id)
  55. {
  56. return avcodec_descriptor_get(codec_id)->name;
  57. }
  58. static double get_duration(AVFormatContext *s)
  59. {
  60. int i = 0;
  61. double max = 0.0;
  62. for (i = 0; i < s->nb_streams; i++) {
  63. AVDictionaryEntry *duration = av_dict_get(s->streams[i]->metadata,
  64. DURATION, NULL, 0);
  65. if (!duration || atof(duration->value) < 0) continue;
  66. if (atof(duration->value) > max) max = atof(duration->value);
  67. }
  68. return max / 1000;
  69. }
  70. static int write_header(AVFormatContext *s)
  71. {
  72. WebMDashMuxContext *w = s->priv_data;
  73. AVIOContext *pb = s->pb;
  74. double min_buffer_time = 1.0;
  75. avio_printf(pb, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
  76. avio_printf(pb, "<MPD\n");
  77. avio_printf(pb, " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n");
  78. avio_printf(pb, " xmlns=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  79. avio_printf(pb, " xsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  80. avio_printf(pb, " type=\"%s\"\n", w->is_live ? "dynamic" : "static");
  81. if (!w->is_live) {
  82. avio_printf(pb, " mediaPresentationDuration=\"PT%gS\"\n",
  83. get_duration(s));
  84. }
  85. avio_printf(pb, " minBufferTime=\"PT%gS\"\n", min_buffer_time);
  86. avio_printf(pb, " profiles=\"%s\"%s",
  87. w->is_live ? "urn:mpeg:dash:profile:isoff-live:2011" : "urn:webm:dash:profile:webm-on-demand:2012",
  88. w->is_live ? "\n" : ">\n");
  89. if (w->is_live) {
  90. time_t local_time = time(NULL);
  91. struct tm gmt_buffer;
  92. struct tm *gmt = gmtime_r(&local_time, &gmt_buffer);
  93. char gmt_iso[21];
  94. if (!strftime(gmt_iso, 21, "%Y-%m-%dT%H:%M:%SZ", gmt)) {
  95. return AVERROR_UNKNOWN;
  96. }
  97. if (s->flags & AVFMT_FLAG_BITEXACT) {
  98. av_strlcpy(gmt_iso, "", 1);
  99. }
  100. avio_printf(pb, " availabilityStartTime=\"%s\"\n", gmt_iso);
  101. avio_printf(pb, " timeShiftBufferDepth=\"PT%gS\"\n", w->time_shift_buffer_depth);
  102. avio_printf(pb, " minimumUpdatePeriod=\"PT%dS\"", w->minimum_update_period);
  103. avio_printf(pb, ">\n");
  104. if (w->utc_timing_url) {
  105. avio_printf(pb, "<UTCTiming\n");
  106. avio_printf(pb, " schemeIdUri=\"urn:mpeg:dash:utc:http-iso:2014\"\n");
  107. avio_printf(pb, " value=\"%s\"/>\n", w->utc_timing_url);
  108. }
  109. }
  110. return 0;
  111. }
  112. static void write_footer(AVFormatContext *s)
  113. {
  114. avio_printf(s->pb, "</MPD>\n");
  115. }
  116. static int subsegment_alignment(AVFormatContext *s, const AdaptationSet *as)
  117. {
  118. int i;
  119. AVDictionaryEntry *gold = av_dict_get(s->streams[as->streams[0]]->metadata,
  120. CUE_TIMESTAMPS, NULL, 0);
  121. if (!gold) return 0;
  122. for (i = 1; i < as->nb_streams; i++) {
  123. AVDictionaryEntry *ts = av_dict_get(s->streams[as->streams[i]]->metadata,
  124. CUE_TIMESTAMPS, NULL, 0);
  125. if (!ts || !av_strstart(ts->value, gold->value, NULL)) return 0;
  126. }
  127. return 1;
  128. }
  129. static int bitstream_switching(AVFormatContext *s, const AdaptationSet *as)
  130. {
  131. int i;
  132. const AVStream *gold_st = s->streams[as->streams[0]];
  133. AVDictionaryEntry *gold_track_num = av_dict_get(gold_st->metadata,
  134. TRACK_NUMBER, NULL, 0);
  135. AVCodecParameters *gold_par = gold_st->codecpar;
  136. if (!gold_track_num) return 0;
  137. for (i = 1; i < as->nb_streams; i++) {
  138. const AVStream *st = s->streams[as->streams[i]];
  139. AVDictionaryEntry *track_num = av_dict_get(st->metadata,
  140. TRACK_NUMBER, NULL, 0);
  141. AVCodecParameters *par = st->codecpar;
  142. if (!track_num ||
  143. !av_strstart(track_num->value, gold_track_num->value, NULL) ||
  144. gold_par->codec_id != par->codec_id ||
  145. gold_par->extradata_size != par->extradata_size ||
  146. (par->extradata_size > 0 &&
  147. memcmp(gold_par->extradata, par->extradata, par->extradata_size))) {
  148. return 0;
  149. }
  150. }
  151. return 1;
  152. }
  153. /*
  154. * Writes a Representation within an Adaptation Set. Returns 0 on success and
  155. * < 0 on failure.
  156. */
  157. static int write_representation(AVFormatContext *s, AVStream *st, char *id,
  158. int output_width, int output_height,
  159. int output_sample_rate)
  160. {
  161. WebMDashMuxContext *w = s->priv_data;
  162. AVIOContext *pb = s->pb;
  163. const AVCodecParameters *par = st->codecpar;
  164. AVDictionaryEntry *bandwidth = av_dict_get(st->metadata, BANDWIDTH, NULL, 0);
  165. const char *bandwidth_str;
  166. avio_printf(pb, "<Representation id=\"%s\"", id);
  167. if (bandwidth) {
  168. bandwidth_str = bandwidth->value;
  169. } else if (w->is_live) {
  170. // if bandwidth for live was not provided, use a default
  171. bandwidth_str = (par->codec_type == AVMEDIA_TYPE_AUDIO) ? "128000" : "1000000";
  172. } else {
  173. return AVERROR(EINVAL);
  174. }
  175. avio_printf(pb, " bandwidth=\"%s\"", bandwidth_str);
  176. if (par->codec_type == AVMEDIA_TYPE_VIDEO && output_width)
  177. avio_printf(pb, " width=\"%d\"", par->width);
  178. if (par->codec_type == AVMEDIA_TYPE_VIDEO && output_height)
  179. avio_printf(pb, " height=\"%d\"", par->height);
  180. if (par->codec_type == AVMEDIA_TYPE_AUDIO && output_sample_rate)
  181. avio_printf(pb, " audioSamplingRate=\"%d\"", par->sample_rate);
  182. if (w->is_live) {
  183. // For live streams, Codec and Mime Type always go in the Representation tag.
  184. avio_printf(pb, " codecs=\"%s\"", get_codec_name(par->codec_id));
  185. avio_printf(pb, " mimeType=\"%s/webm\"",
  186. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  187. // For live streams, subsegments always start with key frames. So this
  188. // is always 1.
  189. avio_printf(pb, " startsWithSAP=\"1\"");
  190. avio_printf(pb, ">");
  191. } else {
  192. AVDictionaryEntry *irange = av_dict_get(st->metadata, INITIALIZATION_RANGE, NULL, 0);
  193. AVDictionaryEntry *cues_start = av_dict_get(st->metadata, CUES_START, NULL, 0);
  194. AVDictionaryEntry *cues_end = av_dict_get(st->metadata, CUES_END, NULL, 0);
  195. AVDictionaryEntry *filename = av_dict_get(st->metadata, FILENAME, NULL, 0);
  196. if (!irange || !cues_start || !cues_end || !filename)
  197. return AVERROR(EINVAL);
  198. avio_printf(pb, ">\n");
  199. avio_printf(pb, "<BaseURL>%s</BaseURL>\n", filename->value);
  200. avio_printf(pb, "<SegmentBase\n");
  201. avio_printf(pb, " indexRange=\"%s-%s\">\n", cues_start->value, cues_end->value);
  202. avio_printf(pb, "<Initialization\n");
  203. avio_printf(pb, " range=\"0-%s\" />\n", irange->value);
  204. avio_printf(pb, "</SegmentBase>\n");
  205. }
  206. avio_printf(pb, "</Representation>\n");
  207. return 0;
  208. }
  209. /*
  210. * Checks if width of all streams are the same. Returns 1 if true, 0 otherwise.
  211. */
  212. static int check_matching_width(AVFormatContext *s, const AdaptationSet *as)
  213. {
  214. int first_width, i;
  215. if (as->nb_streams < 2) return 1;
  216. first_width = s->streams[as->streams[0]]->codecpar->width;
  217. for (i = 1; i < as->nb_streams; i++)
  218. if (first_width != s->streams[as->streams[i]]->codecpar->width)
  219. return 0;
  220. return 1;
  221. }
  222. /*
  223. * Checks if height of all streams are the same. Returns 1 if true, 0 otherwise.
  224. */
  225. static int check_matching_height(AVFormatContext *s, const AdaptationSet *as)
  226. {
  227. int first_height, i;
  228. if (as->nb_streams < 2) return 1;
  229. first_height = s->streams[as->streams[0]]->codecpar->height;
  230. for (i = 1; i < as->nb_streams; i++)
  231. if (first_height != s->streams[as->streams[i]]->codecpar->height)
  232. return 0;
  233. return 1;
  234. }
  235. /*
  236. * Checks if sample rate of all streams are the same. Returns 1 if true, 0 otherwise.
  237. */
  238. static int check_matching_sample_rate(AVFormatContext *s, const AdaptationSet *as)
  239. {
  240. int first_sample_rate, i;
  241. if (as->nb_streams < 2) return 1;
  242. first_sample_rate = s->streams[as->streams[0]]->codecpar->sample_rate;
  243. for (i = 1; i < as->nb_streams; i++)
  244. if (first_sample_rate != s->streams[as->streams[i]]->codecpar->sample_rate)
  245. return 0;
  246. return 1;
  247. }
  248. static void free_adaptation_sets(AVFormatContext *s)
  249. {
  250. WebMDashMuxContext *w = s->priv_data;
  251. int i;
  252. for (i = 0; i < w->nb_as; i++) {
  253. av_freep(&w->as[i].streams);
  254. }
  255. av_freep(&w->as);
  256. w->nb_as = 0;
  257. }
  258. /*
  259. * Parses a live header filename and returns the position of the '_' and '.'
  260. * delimiting <file_description> and <representation_id>.
  261. *
  262. * Name of the header file should conform to the following pattern:
  263. * <file_description>_<representation_id>.hdr where <file_description> can be
  264. * anything. The chunks should be named according to the following pattern:
  265. * <file_description>_<representation_id>_<chunk_number>.chk
  266. */
  267. static int split_filename(char *filename, char **underscore_pos,
  268. char **period_pos)
  269. {
  270. *underscore_pos = strrchr(filename, '_');
  271. if (!*underscore_pos)
  272. return AVERROR(EINVAL);
  273. *period_pos = strchr(*underscore_pos, '.');
  274. if (!*period_pos)
  275. return AVERROR(EINVAL);
  276. return 0;
  277. }
  278. /*
  279. * Writes an Adaptation Set. Returns 0 on success and < 0 on failure.
  280. */
  281. static int write_adaptation_set(AVFormatContext *s, int as_index)
  282. {
  283. WebMDashMuxContext *w = s->priv_data;
  284. AdaptationSet *as = &w->as[as_index];
  285. const AVStream *st = s->streams[as->streams[0]];
  286. AVCodecParameters *par = st->codecpar;
  287. AVDictionaryEntry *lang;
  288. AVIOContext *pb = s->pb;
  289. int i;
  290. static const char boolean[2][6] = { "false", "true" };
  291. int subsegmentStartsWithSAP = 1;
  292. // Width, Height and Sample Rate will go in the AdaptationSet tag if they
  293. // are the same for all contained Representations. otherwise, they will go
  294. // on their respective Representation tag. For live streams, they always go
  295. // in the Representation tag.
  296. int width_in_as = 1, height_in_as = 1, sample_rate_in_as = 1;
  297. if (par->codec_type == AVMEDIA_TYPE_VIDEO) {
  298. width_in_as = !w->is_live && check_matching_width (s, as);
  299. height_in_as = !w->is_live && check_matching_height(s, as);
  300. } else {
  301. sample_rate_in_as = !w->is_live && check_matching_sample_rate(s, as);
  302. }
  303. avio_printf(pb, "<AdaptationSet id=\"%s\"", as->id);
  304. avio_printf(pb, " mimeType=\"%s/webm\"",
  305. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  306. avio_printf(pb, " codecs=\"%s\"", get_codec_name(par->codec_id));
  307. lang = av_dict_get(st->metadata, "language", NULL, 0);
  308. if (lang)
  309. avio_printf(pb, " lang=\"%s\"", lang->value);
  310. if (par->codec_type == AVMEDIA_TYPE_VIDEO && width_in_as)
  311. avio_printf(pb, " width=\"%d\"", par->width);
  312. if (par->codec_type == AVMEDIA_TYPE_VIDEO && height_in_as)
  313. avio_printf(pb, " height=\"%d\"", par->height);
  314. if (par->codec_type == AVMEDIA_TYPE_AUDIO && sample_rate_in_as)
  315. avio_printf(pb, " audioSamplingRate=\"%d\"", par->sample_rate);
  316. avio_printf(pb, " bitstreamSwitching=\"%s\"",
  317. boolean[bitstream_switching(s, as)]);
  318. avio_printf(pb, " subsegmentAlignment=\"%s\"",
  319. boolean[w->is_live || subsegment_alignment(s, as)]);
  320. for (i = 0; i < as->nb_streams; i++) {
  321. AVDictionaryEntry *kf = av_dict_get(s->streams[as->streams[i]]->metadata,
  322. CLUSTER_KEYFRAME, NULL, 0);
  323. if (!w->is_live && (!kf || !strncmp(kf->value, "0", 1))) subsegmentStartsWithSAP = 0;
  324. }
  325. avio_printf(pb, " subsegmentStartsWithSAP=\"%d\"", subsegmentStartsWithSAP);
  326. avio_printf(pb, ">\n");
  327. if (w->is_live) {
  328. AVDictionaryEntry *filename =
  329. av_dict_get(st->metadata, FILENAME, NULL, 0);
  330. char *underscore_pos, *period_pos;
  331. int ret;
  332. if (!filename)
  333. return AVERROR(EINVAL);
  334. ret = split_filename(filename->value, &underscore_pos, &period_pos);
  335. if (ret) return ret;
  336. *underscore_pos = '\0';
  337. avio_printf(pb, "<ContentComponent id=\"1\" type=\"%s\"/>\n",
  338. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  339. avio_printf(pb, "<SegmentTemplate");
  340. avio_printf(pb, " timescale=\"1000\"");
  341. avio_printf(pb, " duration=\"%d\"", w->chunk_duration);
  342. avio_printf(pb, " media=\"%s_$RepresentationID$_$Number$.chk\"",
  343. filename->value);
  344. avio_printf(pb, " startNumber=\"%d\"", w->chunk_start_index);
  345. avio_printf(pb, " initialization=\"%s_$RepresentationID$.hdr\"",
  346. filename->value);
  347. avio_printf(pb, "/>\n");
  348. *underscore_pos = '_';
  349. }
  350. for (i = 0; i < as->nb_streams; i++) {
  351. char buf[25], *representation_id = buf, *underscore_pos, *period_pos;
  352. AVStream *st = s->streams[as->streams[i]];
  353. int ret;
  354. if (w->is_live) {
  355. AVDictionaryEntry *filename =
  356. av_dict_get(st->metadata, FILENAME, NULL, 0);
  357. if (!filename)
  358. return AVERROR(EINVAL);
  359. ret = split_filename(filename->value, &underscore_pos, &period_pos);
  360. if (ret < 0)
  361. return ret;
  362. representation_id = underscore_pos + 1;
  363. *period_pos = '\0';
  364. } else {
  365. snprintf(buf, sizeof(buf), "%d", w->representation_id++);
  366. }
  367. ret = write_representation(s, st, representation_id, !width_in_as,
  368. !height_in_as, !sample_rate_in_as);
  369. if (ret) return ret;
  370. if (w->is_live)
  371. *period_pos = '.';
  372. }
  373. avio_printf(s->pb, "</AdaptationSet>\n");
  374. return 0;
  375. }
  376. static int parse_adaptation_sets(AVFormatContext *s)
  377. {
  378. WebMDashMuxContext *w = s->priv_data;
  379. char *p = w->adaptation_sets;
  380. char *q;
  381. enum { new_set, parsed_id, parsing_streams } state;
  382. if (!w->adaptation_sets) {
  383. av_log(s, AV_LOG_ERROR, "The 'adaptation_sets' option must be set.\n");
  384. return AVERROR(EINVAL);
  385. }
  386. // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on
  387. state = new_set;
  388. while (1) {
  389. if (*p == '\0') {
  390. if (state == new_set)
  391. break;
  392. else
  393. return AVERROR(EINVAL);
  394. } else if (state == new_set && *p == ' ') {
  395. p++;
  396. continue;
  397. } else if (state == new_set && !strncmp(p, "id=", 3)) {
  398. void *mem = av_realloc(w->as, sizeof(*w->as) * (w->nb_as + 1));
  399. const char *comma;
  400. if (mem == NULL)
  401. return AVERROR(ENOMEM);
  402. w->as = mem;
  403. ++w->nb_as;
  404. w->as[w->nb_as - 1].nb_streams = 0;
  405. w->as[w->nb_as - 1].streams = NULL;
  406. p += 3; // consume "id="
  407. q = w->as[w->nb_as - 1].id;
  408. comma = strchr(p, ',');
  409. if (!comma || comma - p >= sizeof(w->as[w->nb_as - 1].id)) {
  410. av_log(s, AV_LOG_ERROR, "'id' in 'adaptation_sets' is malformed.\n");
  411. return AVERROR(EINVAL);
  412. }
  413. while (*p != ',') *q++ = *p++;
  414. *q = 0;
  415. p++;
  416. state = parsed_id;
  417. } else if (state == parsed_id && !strncmp(p, "streams=", 8)) {
  418. p += 8; // consume "streams="
  419. state = parsing_streams;
  420. } else if (state == parsing_streams) {
  421. struct AdaptationSet *as = &w->as[w->nb_as - 1];
  422. int64_t num;
  423. int ret = av_reallocp_array(&as->streams, ++as->nb_streams,
  424. sizeof(*as->streams));
  425. if (ret < 0)
  426. return ret;
  427. num = strtoll(p, &q, 10);
  428. if (!av_isdigit(*p) || (*q != ' ' && *q != '\0' && *q != ',') ||
  429. num < 0 || num >= s->nb_streams) {
  430. av_log(s, AV_LOG_ERROR, "Invalid value for 'streams' in adapation_sets.\n");
  431. return AVERROR(EINVAL);
  432. }
  433. as->streams[as->nb_streams - 1] = num;
  434. if (*q == '\0') break;
  435. if (*q == ' ') state = new_set;
  436. p = ++q;
  437. } else {
  438. return -1;
  439. }
  440. }
  441. return 0;
  442. }
  443. static int webm_dash_manifest_write_header(AVFormatContext *s)
  444. {
  445. int i;
  446. double start = 0.0;
  447. int ret;
  448. WebMDashMuxContext *w = s->priv_data;
  449. for (unsigned i = 0; i < s->nb_streams; i++) {
  450. enum AVCodecID codec_id = s->streams[i]->codecpar->codec_id;
  451. if (codec_id != AV_CODEC_ID_VP8 && codec_id != AV_CODEC_ID_VP9 &&
  452. codec_id != AV_CODEC_ID_VORBIS && codec_id != AV_CODEC_ID_OPUS)
  453. return AVERROR(EINVAL);
  454. }
  455. ret = parse_adaptation_sets(s);
  456. if (ret < 0) {
  457. goto fail;
  458. }
  459. ret = write_header(s);
  460. if (ret < 0) {
  461. goto fail;
  462. }
  463. avio_printf(s->pb, "<Period id=\"0\"");
  464. avio_printf(s->pb, " start=\"PT%gS\"", start);
  465. if (!w->is_live) {
  466. avio_printf(s->pb, " duration=\"PT%gS\"", get_duration(s));
  467. }
  468. avio_printf(s->pb, " >\n");
  469. for (i = 0; i < w->nb_as; i++) {
  470. ret = write_adaptation_set(s, i);
  471. if (ret < 0) {
  472. goto fail;
  473. }
  474. }
  475. avio_printf(s->pb, "</Period>\n");
  476. write_footer(s);
  477. fail:
  478. free_adaptation_sets(s);
  479. return ret < 0 ? ret : 0;
  480. }
  481. static int webm_dash_manifest_write_packet(AVFormatContext *s, AVPacket *pkt)
  482. {
  483. return AVERROR_EOF;
  484. }
  485. #define OFFSET(x) offsetof(WebMDashMuxContext, x)
  486. static const AVOption options[] = {
  487. { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  488. { "live", "create a live stream manifest", OFFSET(is_live), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
  489. { "chunk_start_index", "start index of the chunk", OFFSET(chunk_start_index), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  490. { "chunk_duration_ms", "duration of each chunk (in milliseconds)", OFFSET(chunk_duration), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  491. { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  492. { "time_shift_buffer_depth", "Smallest time (in seconds) shifting buffer for which any Representation is guaranteed to be available.", OFFSET(time_shift_buffer_depth), AV_OPT_TYPE_DOUBLE, { .dbl = 60.0 }, 1.0, DBL_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  493. { "minimum_update_period", "Minimum Update Period (in seconds) of the manifest.", OFFSET(minimum_update_period), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  494. { NULL },
  495. };
  496. static const AVClass webm_dash_class = {
  497. .class_name = "WebM DASH Manifest muxer",
  498. .item_name = av_default_item_name,
  499. .option = options,
  500. .version = LIBAVUTIL_VERSION_INT,
  501. };
  502. AVOutputFormat ff_webm_dash_manifest_muxer = {
  503. .name = "webm_dash_manifest",
  504. .long_name = NULL_IF_CONFIG_SMALL("WebM DASH Manifest"),
  505. .mime_type = "application/xml",
  506. .extensions = "xml",
  507. .priv_data_size = sizeof(WebMDashMuxContext),
  508. .write_header = webm_dash_manifest_write_header,
  509. .write_packet = webm_dash_manifest_write_packet,
  510. .priv_class = &webm_dash_class,
  511. };