You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

556 lines
22KB

  1. /*
  2. * WebM DASH Manifest XML muxer
  3. * Copyright (c) 2014 Vignesh Venkatasubramanian
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /*
  22. * WebM DASH Specification:
  23. * https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification
  24. * ISO DASH Specification:
  25. * http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
  26. */
  27. #include <float.h>
  28. #include <stdint.h>
  29. #include <string.h>
  30. #include "avformat.h"
  31. #include "avio_internal.h"
  32. #include "matroska.h"
  33. #include "libavutil/avstring.h"
  34. #include "libavutil/dict.h"
  35. #include "libavutil/opt.h"
  36. #include "libavutil/time_internal.h"
  37. typedef struct AdaptationSet {
  38. char id[10];
  39. int *streams;
  40. int nb_streams;
  41. } AdaptationSet;
  42. typedef struct WebMDashMuxContext {
  43. const AVClass *class;
  44. char *adaptation_sets;
  45. AdaptationSet *as;
  46. int nb_as;
  47. int representation_id;
  48. int is_live;
  49. int chunk_start_index;
  50. int chunk_duration;
  51. char *utc_timing_url;
  52. double time_shift_buffer_depth;
  53. int minimum_update_period;
  54. int debug_mode;
  55. } WebMDashMuxContext;
  56. static const char *get_codec_name(int codec_id)
  57. {
  58. switch (codec_id) {
  59. case AV_CODEC_ID_VP8:
  60. return "vp8";
  61. case AV_CODEC_ID_VP9:
  62. return "vp9";
  63. case AV_CODEC_ID_VORBIS:
  64. return "vorbis";
  65. case AV_CODEC_ID_OPUS:
  66. return "opus";
  67. }
  68. return NULL;
  69. }
  70. static double get_duration(AVFormatContext *s)
  71. {
  72. int i = 0;
  73. double max = 0.0;
  74. for (i = 0; i < s->nb_streams; i++) {
  75. AVDictionaryEntry *duration = av_dict_get(s->streams[i]->metadata,
  76. DURATION, NULL, 0);
  77. if (!duration || atof(duration->value) < 0) continue;
  78. if (atof(duration->value) > max) max = atof(duration->value);
  79. }
  80. return max / 1000;
  81. }
  82. static int write_header(AVFormatContext *s)
  83. {
  84. WebMDashMuxContext *w = s->priv_data;
  85. double min_buffer_time = 1.0;
  86. avio_printf(s->pb, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
  87. avio_printf(s->pb, "<MPD\n");
  88. avio_printf(s->pb, " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n");
  89. avio_printf(s->pb, " xmlns=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  90. avio_printf(s->pb, " xsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  91. avio_printf(s->pb, " type=\"%s\"\n", w->is_live ? "dynamic" : "static");
  92. if (!w->is_live) {
  93. avio_printf(s->pb, " mediaPresentationDuration=\"PT%gS\"\n",
  94. get_duration(s));
  95. }
  96. avio_printf(s->pb, " minBufferTime=\"PT%gS\"\n", min_buffer_time);
  97. avio_printf(s->pb, " profiles=\"%s\"%s",
  98. w->is_live ? "urn:mpeg:dash:profile:isoff-live:2011" : "urn:webm:dash:profile:webm-on-demand:2012",
  99. w->is_live ? "\n" : ">\n");
  100. if (w->is_live) {
  101. time_t local_time = time(NULL);
  102. struct tm gmt_buffer;
  103. struct tm *gmt = gmtime_r(&local_time, &gmt_buffer);
  104. char gmt_iso[21];
  105. if (!strftime(gmt_iso, 21, "%Y-%m-%dT%H:%M:%SZ", gmt)) {
  106. return AVERROR_UNKNOWN;
  107. }
  108. if (w->debug_mode) {
  109. av_strlcpy(gmt_iso, "", 1);
  110. }
  111. avio_printf(s->pb, " availabilityStartTime=\"%s\"\n", gmt_iso);
  112. avio_printf(s->pb, " timeShiftBufferDepth=\"PT%gS\"\n", w->time_shift_buffer_depth);
  113. avio_printf(s->pb, " minimumUpdatePeriod=\"PT%dS\"", w->minimum_update_period);
  114. avio_printf(s->pb, ">\n");
  115. if (w->utc_timing_url) {
  116. avio_printf(s->pb, "<UTCTiming\n");
  117. avio_printf(s->pb, " schemeIdUri=\"urn:mpeg:dash:utc:http-iso:2014\"\n");
  118. avio_printf(s->pb, " value=\"%s\"/>\n", w->utc_timing_url);
  119. }
  120. }
  121. return 0;
  122. }
  123. static void write_footer(AVFormatContext *s)
  124. {
  125. avio_printf(s->pb, "</MPD>\n");
  126. }
  127. static int subsegment_alignment(AVFormatContext *s, AdaptationSet *as) {
  128. int i;
  129. AVDictionaryEntry *gold = av_dict_get(s->streams[as->streams[0]]->metadata,
  130. CUE_TIMESTAMPS, NULL, 0);
  131. if (!gold) return 0;
  132. for (i = 1; i < as->nb_streams; i++) {
  133. AVDictionaryEntry *ts = av_dict_get(s->streams[as->streams[i]]->metadata,
  134. CUE_TIMESTAMPS, NULL, 0);
  135. if (!ts || strncmp(gold->value, ts->value, strlen(gold->value))) return 0;
  136. }
  137. return 1;
  138. }
  139. static int bitstream_switching(AVFormatContext *s, AdaptationSet *as) {
  140. int i;
  141. AVDictionaryEntry *gold_track_num = av_dict_get(s->streams[as->streams[0]]->metadata,
  142. TRACK_NUMBER, NULL, 0);
  143. AVCodecParameters *gold_par = s->streams[as->streams[0]]->codecpar;
  144. if (!gold_track_num) return 0;
  145. for (i = 1; i < as->nb_streams; i++) {
  146. AVDictionaryEntry *track_num = av_dict_get(s->streams[as->streams[i]]->metadata,
  147. TRACK_NUMBER, NULL, 0);
  148. AVCodecParameters *par = s->streams[as->streams[i]]->codecpar;
  149. if (!track_num ||
  150. strncmp(gold_track_num->value, track_num->value, strlen(gold_track_num->value)) ||
  151. gold_par->codec_id != par->codec_id ||
  152. gold_par->extradata_size != par->extradata_size ||
  153. memcmp(gold_par->extradata, par->extradata, par->extradata_size)) {
  154. return 0;
  155. }
  156. }
  157. return 1;
  158. }
  159. /*
  160. * Writes a Representation within an Adaptation Set. Returns 0 on success and
  161. * < 0 on failure.
  162. */
  163. static int write_representation(AVFormatContext *s, AVStream *stream, char *id,
  164. int output_width, int output_height,
  165. int output_sample_rate) {
  166. WebMDashMuxContext *w = s->priv_data;
  167. AVDictionaryEntry *irange = av_dict_get(stream->metadata, INITIALIZATION_RANGE, NULL, 0);
  168. AVDictionaryEntry *cues_start = av_dict_get(stream->metadata, CUES_START, NULL, 0);
  169. AVDictionaryEntry *cues_end = av_dict_get(stream->metadata, CUES_END, NULL, 0);
  170. AVDictionaryEntry *filename = av_dict_get(stream->metadata, FILENAME, NULL, 0);
  171. AVDictionaryEntry *bandwidth = av_dict_get(stream->metadata, BANDWIDTH, NULL, 0);
  172. const char *bandwidth_str;
  173. if ((w->is_live && (!filename)) ||
  174. (!w->is_live && (!irange || !cues_start || !cues_end || !filename || !bandwidth))) {
  175. return AVERROR_INVALIDDATA;
  176. }
  177. avio_printf(s->pb, "<Representation id=\"%s\"", id);
  178. // if bandwidth for live was not provided, use a default
  179. if (w->is_live && !bandwidth) {
  180. bandwidth_str = (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ? "128000" : "1000000";
  181. } else {
  182. bandwidth_str = bandwidth->value;
  183. }
  184. avio_printf(s->pb, " bandwidth=\"%s\"", bandwidth_str);
  185. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && output_width)
  186. avio_printf(s->pb, " width=\"%d\"", stream->codecpar->width);
  187. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && output_height)
  188. avio_printf(s->pb, " height=\"%d\"", stream->codecpar->height);
  189. if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && output_sample_rate)
  190. avio_printf(s->pb, " audioSamplingRate=\"%d\"", stream->codecpar->sample_rate);
  191. if (w->is_live) {
  192. // For live streams, Codec and Mime Type always go in the Representation tag.
  193. avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(stream->codecpar->codec_id));
  194. avio_printf(s->pb, " mimeType=\"%s/webm\"",
  195. stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  196. // For live streams, subsegments always start with key frames. So this
  197. // is always 1.
  198. avio_printf(s->pb, " startsWithSAP=\"1\"");
  199. avio_printf(s->pb, ">");
  200. } else {
  201. avio_printf(s->pb, ">\n");
  202. avio_printf(s->pb, "<BaseURL>%s</BaseURL>\n", filename->value);
  203. avio_printf(s->pb, "<SegmentBase\n");
  204. avio_printf(s->pb, " indexRange=\"%s-%s\">\n", cues_start->value, cues_end->value);
  205. avio_printf(s->pb, "<Initialization\n");
  206. avio_printf(s->pb, " range=\"0-%s\" />\n", irange->value);
  207. avio_printf(s->pb, "</SegmentBase>\n");
  208. }
  209. avio_printf(s->pb, "</Representation>\n");
  210. return 0;
  211. }
  212. /*
  213. * Checks if width of all streams are the same. Returns 1 if true, 0 otherwise.
  214. */
  215. static int check_matching_width(AVFormatContext *s, AdaptationSet *as) {
  216. int first_width, i;
  217. if (as->nb_streams < 2) return 1;
  218. first_width = s->streams[as->streams[0]]->codecpar->width;
  219. for (i = 1; i < as->nb_streams; i++)
  220. if (first_width != s->streams[as->streams[i]]->codecpar->width)
  221. return 0;
  222. return 1;
  223. }
  224. /*
  225. * Checks if height of all streams are the same. Returns 1 if true, 0 otherwise.
  226. */
  227. static int check_matching_height(AVFormatContext *s, AdaptationSet *as) {
  228. int first_height, i;
  229. if (as->nb_streams < 2) return 1;
  230. first_height = s->streams[as->streams[0]]->codecpar->height;
  231. for (i = 1; i < as->nb_streams; i++)
  232. if (first_height != s->streams[as->streams[i]]->codecpar->height)
  233. return 0;
  234. return 1;
  235. }
  236. /*
  237. * Checks if sample rate of all streams are the same. Returns 1 if true, 0 otherwise.
  238. */
  239. static int check_matching_sample_rate(AVFormatContext *s, AdaptationSet *as) {
  240. int first_sample_rate, i;
  241. if (as->nb_streams < 2) return 1;
  242. first_sample_rate = s->streams[as->streams[0]]->codecpar->sample_rate;
  243. for (i = 1; i < as->nb_streams; i++)
  244. if (first_sample_rate != s->streams[as->streams[i]]->codecpar->sample_rate)
  245. return 0;
  246. return 1;
  247. }
  248. static void free_adaptation_sets(AVFormatContext *s) {
  249. WebMDashMuxContext *w = s->priv_data;
  250. int i;
  251. for (i = 0; i < w->nb_as; i++) {
  252. av_freep(&w->as[i].streams);
  253. }
  254. av_freep(&w->as);
  255. w->nb_as = 0;
  256. }
  257. /*
  258. * Parses a live header filename and computes the representation id,
  259. * initialization pattern and the media pattern. Pass NULL if you don't want to
  260. * compute any of those 3. Returns 0 on success and non-zero on failure.
  261. *
  262. * Name of the header file should conform to the following pattern:
  263. * <file_description>_<representation_id>.hdr where <file_description> can be
  264. * anything. The chunks should be named according to the following pattern:
  265. * <file_description>_<representation_id>_<chunk_number>.chk
  266. */
  267. static int parse_filename(char *filename, char **representation_id,
  268. char **initialization_pattern, char **media_pattern) {
  269. char *underscore_pos = NULL;
  270. char *period_pos = NULL;
  271. char *temp_pos = NULL;
  272. char *filename_str = av_strdup(filename);
  273. if (!filename_str) return AVERROR(ENOMEM);
  274. temp_pos = av_stristr(filename_str, "_");
  275. while (temp_pos) {
  276. underscore_pos = temp_pos + 1;
  277. temp_pos = av_stristr(temp_pos + 1, "_");
  278. }
  279. if (!underscore_pos) return AVERROR_INVALIDDATA;
  280. period_pos = av_stristr(underscore_pos, ".");
  281. if (!period_pos) return AVERROR_INVALIDDATA;
  282. *(underscore_pos - 1) = 0;
  283. if (representation_id) {
  284. *representation_id = av_malloc(period_pos - underscore_pos + 1);
  285. if (!(*representation_id)) return AVERROR(ENOMEM);
  286. av_strlcpy(*representation_id, underscore_pos, period_pos - underscore_pos + 1);
  287. }
  288. if (initialization_pattern) {
  289. *initialization_pattern = av_asprintf("%s_$RepresentationID$.hdr",
  290. filename_str);
  291. if (!(*initialization_pattern)) return AVERROR(ENOMEM);
  292. }
  293. if (media_pattern) {
  294. *media_pattern = av_asprintf("%s_$RepresentationID$_$Number$.chk",
  295. filename_str);
  296. if (!(*media_pattern)) return AVERROR(ENOMEM);
  297. }
  298. av_free(filename_str);
  299. return 0;
  300. }
  301. /*
  302. * Writes an Adaptation Set. Returns 0 on success and < 0 on failure.
  303. */
  304. static int write_adaptation_set(AVFormatContext *s, int as_index)
  305. {
  306. WebMDashMuxContext *w = s->priv_data;
  307. AdaptationSet *as = &w->as[as_index];
  308. AVCodecParameters *par = s->streams[as->streams[0]]->codecpar;
  309. AVDictionaryEntry *lang;
  310. int i;
  311. static const char boolean[2][6] = { "false", "true" };
  312. int subsegmentStartsWithSAP = 1;
  313. // Width, Height and Sample Rate will go in the AdaptationSet tag if they
  314. // are the same for all contained Representations. otherwise, they will go
  315. // on their respective Representation tag. For live streams, they always go
  316. // in the Representation tag.
  317. int width_in_as = 1, height_in_as = 1, sample_rate_in_as = 1;
  318. if (par->codec_type == AVMEDIA_TYPE_VIDEO) {
  319. width_in_as = !w->is_live && check_matching_width(s, as);
  320. height_in_as = !w->is_live && check_matching_height(s, as);
  321. } else {
  322. sample_rate_in_as = !w->is_live && check_matching_sample_rate(s, as);
  323. }
  324. avio_printf(s->pb, "<AdaptationSet id=\"%s\"", as->id);
  325. avio_printf(s->pb, " mimeType=\"%s/webm\"",
  326. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  327. avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(par->codec_id));
  328. lang = av_dict_get(s->streams[as->streams[0]]->metadata, "language", NULL, 0);
  329. if (lang) avio_printf(s->pb, " lang=\"%s\"", lang->value);
  330. if (par->codec_type == AVMEDIA_TYPE_VIDEO && width_in_as)
  331. avio_printf(s->pb, " width=\"%d\"", par->width);
  332. if (par->codec_type == AVMEDIA_TYPE_VIDEO && height_in_as)
  333. avio_printf(s->pb, " height=\"%d\"", par->height);
  334. if (par->codec_type == AVMEDIA_TYPE_AUDIO && sample_rate_in_as)
  335. avio_printf(s->pb, " audioSamplingRate=\"%d\"", par->sample_rate);
  336. avio_printf(s->pb, " bitstreamSwitching=\"%s\"",
  337. boolean[bitstream_switching(s, as)]);
  338. avio_printf(s->pb, " subsegmentAlignment=\"%s\"",
  339. boolean[w->is_live || subsegment_alignment(s, as)]);
  340. for (i = 0; i < as->nb_streams; i++) {
  341. AVDictionaryEntry *kf = av_dict_get(s->streams[as->streams[i]]->metadata,
  342. CLUSTER_KEYFRAME, NULL, 0);
  343. if (!w->is_live && (!kf || !strncmp(kf->value, "0", 1))) subsegmentStartsWithSAP = 0;
  344. }
  345. avio_printf(s->pb, " subsegmentStartsWithSAP=\"%d\"", subsegmentStartsWithSAP);
  346. avio_printf(s->pb, ">\n");
  347. if (w->is_live) {
  348. AVDictionaryEntry *filename =
  349. av_dict_get(s->streams[as->streams[0]]->metadata, FILENAME, NULL, 0);
  350. char *initialization_pattern = NULL;
  351. char *media_pattern = NULL;
  352. int ret = parse_filename(filename->value, NULL, &initialization_pattern,
  353. &media_pattern);
  354. if (ret) return ret;
  355. avio_printf(s->pb, "<ContentComponent id=\"1\" type=\"%s\"/>\n",
  356. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  357. avio_printf(s->pb, "<SegmentTemplate");
  358. avio_printf(s->pb, " timescale=\"1000\"");
  359. avio_printf(s->pb, " duration=\"%d\"", w->chunk_duration);
  360. avio_printf(s->pb, " media=\"%s\"", media_pattern);
  361. avio_printf(s->pb, " startNumber=\"%d\"", w->chunk_start_index);
  362. avio_printf(s->pb, " initialization=\"%s\"", initialization_pattern);
  363. avio_printf(s->pb, "/>\n");
  364. av_free(initialization_pattern);
  365. av_free(media_pattern);
  366. }
  367. for (i = 0; i < as->nb_streams; i++) {
  368. char *representation_id = NULL;
  369. int ret;
  370. if (w->is_live) {
  371. AVDictionaryEntry *filename =
  372. av_dict_get(s->streams[as->streams[i]]->metadata, FILENAME, NULL, 0);
  373. if (!filename)
  374. return AVERROR(EINVAL);
  375. if (ret = parse_filename(filename->value, &representation_id, NULL, NULL))
  376. return ret;
  377. } else {
  378. representation_id = av_asprintf("%d", w->representation_id++);
  379. if (!representation_id) return AVERROR(ENOMEM);
  380. }
  381. ret = write_representation(s, s->streams[as->streams[i]],
  382. representation_id, !width_in_as,
  383. !height_in_as, !sample_rate_in_as);
  384. av_free(representation_id);
  385. if (ret) return ret;
  386. }
  387. avio_printf(s->pb, "</AdaptationSet>\n");
  388. return 0;
  389. }
  390. static int to_integer(char *p, int len)
  391. {
  392. int ret;
  393. char *q = av_malloc(sizeof(char) * len);
  394. if (!q)
  395. return AVERROR(ENOMEM);
  396. av_strlcpy(q, p, len);
  397. ret = atoi(q);
  398. av_free(q);
  399. return ret;
  400. }
  401. static int parse_adaptation_sets(AVFormatContext *s)
  402. {
  403. WebMDashMuxContext *w = s->priv_data;
  404. char *p = w->adaptation_sets;
  405. char *q;
  406. enum { new_set, parsed_id, parsing_streams } state;
  407. // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on
  408. state = new_set;
  409. while (p < w->adaptation_sets + strlen(w->adaptation_sets)) {
  410. if (*p == ' ')
  411. continue;
  412. else if (state == new_set && !strncmp(p, "id=", 3)) {
  413. void *mem = av_realloc(w->as, sizeof(*w->as) * (w->nb_as + 1));
  414. if (mem == NULL)
  415. return AVERROR(ENOMEM);
  416. w->as = mem;
  417. ++w->nb_as;
  418. w->as[w->nb_as - 1].nb_streams = 0;
  419. w->as[w->nb_as - 1].streams = NULL;
  420. p += 3; // consume "id="
  421. q = w->as[w->nb_as - 1].id;
  422. while (*p != ',') *q++ = *p++;
  423. *q = 0;
  424. p++;
  425. state = parsed_id;
  426. } else if (state == parsed_id && !strncmp(p, "streams=", 8)) {
  427. p += 8; // consume "streams="
  428. state = parsing_streams;
  429. } else if (state == parsing_streams) {
  430. struct AdaptationSet *as = &w->as[w->nb_as - 1];
  431. q = p;
  432. while (*q != '\0' && *q != ',' && *q != ' ') q++;
  433. as->streams = av_realloc(as->streams, sizeof(*as->streams) * ++as->nb_streams);
  434. if (as->streams == NULL)
  435. return AVERROR(ENOMEM);
  436. as->streams[as->nb_streams - 1] = to_integer(p, q - p + 1);
  437. if (as->streams[as->nb_streams - 1] < 0) return -1;
  438. if (*q == '\0') break;
  439. if (*q == ' ') state = new_set;
  440. p = ++q;
  441. } else {
  442. return -1;
  443. }
  444. }
  445. return 0;
  446. }
  447. static int webm_dash_manifest_write_header(AVFormatContext *s)
  448. {
  449. int i;
  450. double start = 0.0;
  451. int ret;
  452. WebMDashMuxContext *w = s->priv_data;
  453. ret = parse_adaptation_sets(s);
  454. if (ret < 0) {
  455. goto fail;
  456. }
  457. ret = write_header(s);
  458. if (ret < 0) {
  459. goto fail;
  460. }
  461. avio_printf(s->pb, "<Period id=\"0\"");
  462. avio_printf(s->pb, " start=\"PT%gS\"", start);
  463. if (!w->is_live) {
  464. avio_printf(s->pb, " duration=\"PT%gS\"", get_duration(s));
  465. }
  466. avio_printf(s->pb, " >\n");
  467. for (i = 0; i < w->nb_as; i++) {
  468. ret = write_adaptation_set(s, i);
  469. if (ret < 0) {
  470. goto fail;
  471. }
  472. }
  473. avio_printf(s->pb, "</Period>\n");
  474. write_footer(s);
  475. fail:
  476. free_adaptation_sets(s);
  477. return ret < 0 ? ret : 0;
  478. }
  479. static int webm_dash_manifest_write_packet(AVFormatContext *s, AVPacket *pkt)
  480. {
  481. return AVERROR_EOF;
  482. }
  483. static int webm_dash_manifest_write_trailer(AVFormatContext *s)
  484. {
  485. free_adaptation_sets(s);
  486. return 0;
  487. }
  488. #define OFFSET(x) offsetof(WebMDashMuxContext, x)
  489. static const AVOption options[] = {
  490. { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  491. { "debug_mode", "[private option - users should never set this]. Create deterministic output", OFFSET(debug_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
  492. { "live", "create a live stream manifest", OFFSET(is_live), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
  493. { "chunk_start_index", "start index of the chunk", OFFSET(chunk_start_index), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  494. { "chunk_duration_ms", "duration of each chunk (in milliseconds)", OFFSET(chunk_duration), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  495. { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  496. { "time_shift_buffer_depth", "Smallest time (in seconds) shifting buffer for which any Representation is guaranteed to be available.", OFFSET(time_shift_buffer_depth), AV_OPT_TYPE_DOUBLE, { .dbl = 60.0 }, 1.0, DBL_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  497. { "minimum_update_period", "Minimum Update Period (in seconds) of the manifest.", OFFSET(minimum_update_period), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  498. { NULL },
  499. };
  500. #if CONFIG_WEBM_DASH_MANIFEST_MUXER
  501. static const AVClass webm_dash_class = {
  502. .class_name = "WebM DASH Manifest muxer",
  503. .item_name = av_default_item_name,
  504. .option = options,
  505. .version = LIBAVUTIL_VERSION_INT,
  506. };
  507. AVOutputFormat ff_webm_dash_manifest_muxer = {
  508. .name = "webm_dash_manifest",
  509. .long_name = NULL_IF_CONFIG_SMALL("WebM DASH Manifest"),
  510. .mime_type = "application/xml",
  511. .extensions = "xml",
  512. .priv_data_size = sizeof(WebMDashMuxContext),
  513. .write_header = webm_dash_manifest_write_header,
  514. .write_packet = webm_dash_manifest_write_packet,
  515. .write_trailer = webm_dash_manifest_write_trailer,
  516. .priv_class = &webm_dash_class,
  517. };
  518. #endif