You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

592 lines
23KB

  1. /*
  2. * WebM DASH Manifest XML muxer
  3. * Copyright (c) 2014 Vignesh Venkatasubramanian
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /*
  22. * WebM DASH Specification:
  23. * https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification
  24. * ISO DASH Specification:
  25. * http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
  26. */
  27. #include <float.h>
  28. #include <stdint.h>
  29. #include <string.h>
  30. #include "avformat.h"
  31. #include "avio_internal.h"
  32. #include "matroska.h"
  33. #include "libavutil/avstring.h"
  34. #include "libavutil/dict.h"
  35. #include "libavutil/opt.h"
  36. #include "libavutil/time_internal.h"
  37. typedef struct AdaptationSet {
  38. char id[10];
  39. int *streams;
  40. int nb_streams;
  41. } AdaptationSet;
  42. typedef struct WebMDashMuxContext {
  43. const AVClass *class;
  44. char *adaptation_sets;
  45. AdaptationSet *as;
  46. int nb_as;
  47. int representation_id;
  48. int is_live;
  49. int chunk_start_index;
  50. int chunk_duration;
  51. char *utc_timing_url;
  52. double time_shift_buffer_depth;
  53. int minimum_update_period;
  54. int debug_mode;
  55. } WebMDashMuxContext;
  56. static const char *get_codec_name(int codec_id)
  57. {
  58. switch (codec_id) {
  59. case AV_CODEC_ID_VP8:
  60. return "vp8";
  61. case AV_CODEC_ID_VP9:
  62. return "vp9";
  63. case AV_CODEC_ID_VORBIS:
  64. return "vorbis";
  65. case AV_CODEC_ID_OPUS:
  66. return "opus";
  67. }
  68. return NULL;
  69. }
  70. static double get_duration(AVFormatContext *s)
  71. {
  72. int i = 0;
  73. double max = 0.0;
  74. for (i = 0; i < s->nb_streams; i++) {
  75. AVDictionaryEntry *duration = av_dict_get(s->streams[i]->metadata,
  76. DURATION, NULL, 0);
  77. if (!duration || atof(duration->value) < 0) continue;
  78. if (atof(duration->value) > max) max = atof(duration->value);
  79. }
  80. return max / 1000;
  81. }
  82. static int write_header(AVFormatContext *s)
  83. {
  84. WebMDashMuxContext *w = s->priv_data;
  85. double min_buffer_time = 1.0;
  86. avio_printf(s->pb, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
  87. avio_printf(s->pb, "<MPD\n");
  88. avio_printf(s->pb, " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n");
  89. avio_printf(s->pb, " xmlns=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  90. avio_printf(s->pb, " xsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  91. avio_printf(s->pb, " type=\"%s\"\n", w->is_live ? "dynamic" : "static");
  92. if (!w->is_live) {
  93. avio_printf(s->pb, " mediaPresentationDuration=\"PT%gS\"\n",
  94. get_duration(s));
  95. }
  96. avio_printf(s->pb, " minBufferTime=\"PT%gS\"\n", min_buffer_time);
  97. avio_printf(s->pb, " profiles=\"%s\"%s",
  98. w->is_live ? "urn:mpeg:dash:profile:isoff-live:2011" : "urn:webm:dash:profile:webm-on-demand:2012",
  99. w->is_live ? "\n" : ">\n");
  100. if (w->is_live) {
  101. time_t local_time = time(NULL);
  102. struct tm gmt_buffer;
  103. struct tm *gmt = gmtime_r(&local_time, &gmt_buffer);
  104. char gmt_iso[21];
  105. if (!strftime(gmt_iso, 21, "%Y-%m-%dT%H:%M:%SZ", gmt)) {
  106. return AVERROR_UNKNOWN;
  107. }
  108. if (w->debug_mode) {
  109. av_strlcpy(gmt_iso, "", 1);
  110. }
  111. avio_printf(s->pb, " availabilityStartTime=\"%s\"\n", gmt_iso);
  112. avio_printf(s->pb, " timeShiftBufferDepth=\"PT%gS\"\n", w->time_shift_buffer_depth);
  113. avio_printf(s->pb, " minimumUpdatePeriod=\"PT%dS\"", w->minimum_update_period);
  114. avio_printf(s->pb, ">\n");
  115. if (w->utc_timing_url) {
  116. avio_printf(s->pb, "<UTCTiming\n");
  117. avio_printf(s->pb, " schemeIdUri=\"urn:mpeg:dash:utc:http-iso:2014\"\n");
  118. avio_printf(s->pb, " value=\"%s\"/>\n", w->utc_timing_url);
  119. }
  120. }
  121. return 0;
  122. }
  123. static void write_footer(AVFormatContext *s)
  124. {
  125. avio_printf(s->pb, "</MPD>\n");
  126. }
  127. static int subsegment_alignment(AVFormatContext *s, AdaptationSet *as) {
  128. int i;
  129. AVDictionaryEntry *gold = av_dict_get(s->streams[as->streams[0]]->metadata,
  130. CUE_TIMESTAMPS, NULL, 0);
  131. if (!gold) return 0;
  132. for (i = 1; i < as->nb_streams; i++) {
  133. AVDictionaryEntry *ts = av_dict_get(s->streams[as->streams[i]]->metadata,
  134. CUE_TIMESTAMPS, NULL, 0);
  135. if (!ts || strncmp(gold->value, ts->value, strlen(gold->value))) return 0;
  136. }
  137. return 1;
  138. }
  139. static int bitstream_switching(AVFormatContext *s, AdaptationSet *as) {
  140. int i;
  141. AVDictionaryEntry *gold_track_num = av_dict_get(s->streams[as->streams[0]]->metadata,
  142. TRACK_NUMBER, NULL, 0);
  143. AVCodecParameters *gold_par = s->streams[as->streams[0]]->codecpar;
  144. if (!gold_track_num) return 0;
  145. for (i = 1; i < as->nb_streams; i++) {
  146. AVDictionaryEntry *track_num = av_dict_get(s->streams[as->streams[i]]->metadata,
  147. TRACK_NUMBER, NULL, 0);
  148. AVCodecParameters *par = s->streams[as->streams[i]]->codecpar;
  149. if (!track_num ||
  150. strncmp(gold_track_num->value, track_num->value, strlen(gold_track_num->value)) ||
  151. gold_par->codec_id != par->codec_id ||
  152. gold_par->extradata_size != par->extradata_size ||
  153. memcmp(gold_par->extradata, par->extradata, par->extradata_size)) {
  154. return 0;
  155. }
  156. }
  157. return 1;
  158. }
  159. /*
  160. * Writes a Representation within an Adaptation Set. Returns 0 on success and
  161. * < 0 on failure.
  162. */
  163. static int write_representation(AVFormatContext *s, AVStream *stream, char *id,
  164. int output_width, int output_height,
  165. int output_sample_rate) {
  166. WebMDashMuxContext *w = s->priv_data;
  167. AVDictionaryEntry *irange = av_dict_get(stream->metadata, INITIALIZATION_RANGE, NULL, 0);
  168. AVDictionaryEntry *cues_start = av_dict_get(stream->metadata, CUES_START, NULL, 0);
  169. AVDictionaryEntry *cues_end = av_dict_get(stream->metadata, CUES_END, NULL, 0);
  170. AVDictionaryEntry *filename = av_dict_get(stream->metadata, FILENAME, NULL, 0);
  171. AVDictionaryEntry *bandwidth = av_dict_get(stream->metadata, BANDWIDTH, NULL, 0);
  172. const char *bandwidth_str;
  173. if ((w->is_live && (!filename)) ||
  174. (!w->is_live && (!irange || !cues_start || !cues_end || !filename || !bandwidth))) {
  175. return AVERROR_INVALIDDATA;
  176. }
  177. avio_printf(s->pb, "<Representation id=\"%s\"", id);
  178. // if bandwidth for live was not provided, use a default
  179. if (w->is_live && !bandwidth) {
  180. bandwidth_str = (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ? "128000" : "1000000";
  181. } else {
  182. bandwidth_str = bandwidth->value;
  183. }
  184. avio_printf(s->pb, " bandwidth=\"%s\"", bandwidth_str);
  185. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && output_width)
  186. avio_printf(s->pb, " width=\"%d\"", stream->codecpar->width);
  187. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && output_height)
  188. avio_printf(s->pb, " height=\"%d\"", stream->codecpar->height);
  189. if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && output_sample_rate)
  190. avio_printf(s->pb, " audioSamplingRate=\"%d\"", stream->codecpar->sample_rate);
  191. if (w->is_live) {
  192. // For live streams, Codec and Mime Type always go in the Representation tag.
  193. avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(stream->codecpar->codec_id));
  194. avio_printf(s->pb, " mimeType=\"%s/webm\"",
  195. stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  196. // For live streams, subsegments always start with key frames. So this
  197. // is always 1.
  198. avio_printf(s->pb, " startsWithSAP=\"1\"");
  199. avio_printf(s->pb, ">");
  200. } else {
  201. avio_printf(s->pb, ">\n");
  202. avio_printf(s->pb, "<BaseURL>%s</BaseURL>\n", filename->value);
  203. avio_printf(s->pb, "<SegmentBase\n");
  204. avio_printf(s->pb, " indexRange=\"%s-%s\">\n", cues_start->value, cues_end->value);
  205. avio_printf(s->pb, "<Initialization\n");
  206. avio_printf(s->pb, " range=\"0-%s\" />\n", irange->value);
  207. avio_printf(s->pb, "</SegmentBase>\n");
  208. }
  209. avio_printf(s->pb, "</Representation>\n");
  210. return 0;
  211. }
  212. /*
  213. * Checks if width of all streams are the same. Returns 1 if true, 0 otherwise.
  214. */
  215. static int check_matching_width(AVFormatContext *s, AdaptationSet *as) {
  216. int first_width, i;
  217. if (as->nb_streams < 2) return 1;
  218. first_width = s->streams[as->streams[0]]->codecpar->width;
  219. for (i = 1; i < as->nb_streams; i++)
  220. if (first_width != s->streams[as->streams[i]]->codecpar->width)
  221. return 0;
  222. return 1;
  223. }
  224. /*
  225. * Checks if height of all streams are the same. Returns 1 if true, 0 otherwise.
  226. */
  227. static int check_matching_height(AVFormatContext *s, AdaptationSet *as) {
  228. int first_height, i;
  229. if (as->nb_streams < 2) return 1;
  230. first_height = s->streams[as->streams[0]]->codecpar->height;
  231. for (i = 1; i < as->nb_streams; i++)
  232. if (first_height != s->streams[as->streams[i]]->codecpar->height)
  233. return 0;
  234. return 1;
  235. }
  236. /*
  237. * Checks if sample rate of all streams are the same. Returns 1 if true, 0 otherwise.
  238. */
  239. static int check_matching_sample_rate(AVFormatContext *s, AdaptationSet *as) {
  240. int first_sample_rate, i;
  241. if (as->nb_streams < 2) return 1;
  242. first_sample_rate = s->streams[as->streams[0]]->codecpar->sample_rate;
  243. for (i = 1; i < as->nb_streams; i++)
  244. if (first_sample_rate != s->streams[as->streams[i]]->codecpar->sample_rate)
  245. return 0;
  246. return 1;
  247. }
  248. static void free_adaptation_sets(AVFormatContext *s) {
  249. WebMDashMuxContext *w = s->priv_data;
  250. int i;
  251. for (i = 0; i < w->nb_as; i++) {
  252. av_freep(&w->as[i].streams);
  253. }
  254. av_freep(&w->as);
  255. w->nb_as = 0;
  256. }
  257. /*
  258. * Parses a live header filename and computes the representation id,
  259. * initialization pattern and the media pattern. Pass NULL if you don't want to
  260. * compute any of those 3. Returns 0 on success and non-zero on failure.
  261. *
  262. * Name of the header file should conform to the following pattern:
  263. * <file_description>_<representation_id>.hdr where <file_description> can be
  264. * anything. The chunks should be named according to the following pattern:
  265. * <file_description>_<representation_id>_<chunk_number>.chk
  266. */
  267. static int parse_filename(char *filename, char **representation_id,
  268. char **initialization_pattern, char **media_pattern) {
  269. char *underscore_pos = NULL;
  270. char *period_pos = NULL;
  271. char *temp_pos = NULL;
  272. char *filename_str = av_strdup(filename);
  273. int ret = 0;
  274. if (!filename_str) {
  275. ret = AVERROR(ENOMEM);
  276. goto end;
  277. }
  278. temp_pos = av_stristr(filename_str, "_");
  279. while (temp_pos) {
  280. underscore_pos = temp_pos + 1;
  281. temp_pos = av_stristr(temp_pos + 1, "_");
  282. }
  283. if (!underscore_pos) {
  284. ret = AVERROR_INVALIDDATA;
  285. goto end;
  286. }
  287. period_pos = av_stristr(underscore_pos, ".");
  288. if (!period_pos) {
  289. ret = AVERROR_INVALIDDATA;
  290. goto end;
  291. }
  292. *(underscore_pos - 1) = 0;
  293. if (representation_id) {
  294. *representation_id = av_malloc(period_pos - underscore_pos + 1);
  295. if (!(*representation_id)) {
  296. ret = AVERROR(ENOMEM);
  297. goto end;
  298. }
  299. av_strlcpy(*representation_id, underscore_pos, period_pos - underscore_pos + 1);
  300. }
  301. if (initialization_pattern) {
  302. *initialization_pattern = av_asprintf("%s_$RepresentationID$.hdr",
  303. filename_str);
  304. if (!(*initialization_pattern)) {
  305. ret = AVERROR(ENOMEM);
  306. goto end;
  307. }
  308. }
  309. if (media_pattern) {
  310. *media_pattern = av_asprintf("%s_$RepresentationID$_$Number$.chk",
  311. filename_str);
  312. if (!(*media_pattern)) {
  313. ret = AVERROR(ENOMEM);
  314. goto end;
  315. }
  316. }
  317. end:
  318. av_freep(&filename_str);
  319. return ret;
  320. }
  321. /*
  322. * Writes an Adaptation Set. Returns 0 on success and < 0 on failure.
  323. */
  324. static int write_adaptation_set(AVFormatContext *s, int as_index)
  325. {
  326. WebMDashMuxContext *w = s->priv_data;
  327. AdaptationSet *as = &w->as[as_index];
  328. AVCodecParameters *par = s->streams[as->streams[0]]->codecpar;
  329. AVDictionaryEntry *lang;
  330. int i;
  331. static const char boolean[2][6] = { "false", "true" };
  332. int subsegmentStartsWithSAP = 1;
  333. // Width, Height and Sample Rate will go in the AdaptationSet tag if they
  334. // are the same for all contained Representations. otherwise, they will go
  335. // on their respective Representation tag. For live streams, they always go
  336. // in the Representation tag.
  337. int width_in_as = 1, height_in_as = 1, sample_rate_in_as = 1;
  338. if (par->codec_type == AVMEDIA_TYPE_VIDEO) {
  339. width_in_as = !w->is_live && check_matching_width(s, as);
  340. height_in_as = !w->is_live && check_matching_height(s, as);
  341. } else {
  342. sample_rate_in_as = !w->is_live && check_matching_sample_rate(s, as);
  343. }
  344. avio_printf(s->pb, "<AdaptationSet id=\"%s\"", as->id);
  345. avio_printf(s->pb, " mimeType=\"%s/webm\"",
  346. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  347. avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(par->codec_id));
  348. lang = av_dict_get(s->streams[as->streams[0]]->metadata, "language", NULL, 0);
  349. if (lang) avio_printf(s->pb, " lang=\"%s\"", lang->value);
  350. if (par->codec_type == AVMEDIA_TYPE_VIDEO && width_in_as)
  351. avio_printf(s->pb, " width=\"%d\"", par->width);
  352. if (par->codec_type == AVMEDIA_TYPE_VIDEO && height_in_as)
  353. avio_printf(s->pb, " height=\"%d\"", par->height);
  354. if (par->codec_type == AVMEDIA_TYPE_AUDIO && sample_rate_in_as)
  355. avio_printf(s->pb, " audioSamplingRate=\"%d\"", par->sample_rate);
  356. avio_printf(s->pb, " bitstreamSwitching=\"%s\"",
  357. boolean[bitstream_switching(s, as)]);
  358. avio_printf(s->pb, " subsegmentAlignment=\"%s\"",
  359. boolean[w->is_live || subsegment_alignment(s, as)]);
  360. for (i = 0; i < as->nb_streams; i++) {
  361. AVDictionaryEntry *kf = av_dict_get(s->streams[as->streams[i]]->metadata,
  362. CLUSTER_KEYFRAME, NULL, 0);
  363. if (!w->is_live && (!kf || !strncmp(kf->value, "0", 1))) subsegmentStartsWithSAP = 0;
  364. }
  365. avio_printf(s->pb, " subsegmentStartsWithSAP=\"%d\"", subsegmentStartsWithSAP);
  366. avio_printf(s->pb, ">\n");
  367. if (w->is_live) {
  368. AVDictionaryEntry *filename =
  369. av_dict_get(s->streams[as->streams[0]]->metadata, FILENAME, NULL, 0);
  370. char *initialization_pattern = NULL;
  371. char *media_pattern = NULL;
  372. int ret = parse_filename(filename->value, NULL, &initialization_pattern,
  373. &media_pattern);
  374. if (ret) return ret;
  375. avio_printf(s->pb, "<ContentComponent id=\"1\" type=\"%s\"/>\n",
  376. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  377. avio_printf(s->pb, "<SegmentTemplate");
  378. avio_printf(s->pb, " timescale=\"1000\"");
  379. avio_printf(s->pb, " duration=\"%d\"", w->chunk_duration);
  380. avio_printf(s->pb, " media=\"%s\"", media_pattern);
  381. avio_printf(s->pb, " startNumber=\"%d\"", w->chunk_start_index);
  382. avio_printf(s->pb, " initialization=\"%s\"", initialization_pattern);
  383. avio_printf(s->pb, "/>\n");
  384. av_free(initialization_pattern);
  385. av_free(media_pattern);
  386. }
  387. for (i = 0; i < as->nb_streams; i++) {
  388. char *representation_id = NULL;
  389. int ret;
  390. if (w->is_live) {
  391. AVDictionaryEntry *filename =
  392. av_dict_get(s->streams[as->streams[i]]->metadata, FILENAME, NULL, 0);
  393. if (!filename)
  394. return AVERROR(EINVAL);
  395. if (ret = parse_filename(filename->value, &representation_id, NULL, NULL))
  396. return ret;
  397. } else {
  398. representation_id = av_asprintf("%d", w->representation_id++);
  399. if (!representation_id) return AVERROR(ENOMEM);
  400. }
  401. ret = write_representation(s, s->streams[as->streams[i]],
  402. representation_id, !width_in_as,
  403. !height_in_as, !sample_rate_in_as);
  404. av_free(representation_id);
  405. if (ret) return ret;
  406. }
  407. avio_printf(s->pb, "</AdaptationSet>\n");
  408. return 0;
  409. }
  410. static int to_integer(char *p, int len)
  411. {
  412. int ret;
  413. char *q = av_malloc(sizeof(char) * len);
  414. if (!q)
  415. return AVERROR(ENOMEM);
  416. av_strlcpy(q, p, len);
  417. ret = atoi(q);
  418. av_free(q);
  419. return ret;
  420. }
  421. static int parse_adaptation_sets(AVFormatContext *s)
  422. {
  423. WebMDashMuxContext *w = s->priv_data;
  424. char *p = w->adaptation_sets;
  425. char *q;
  426. enum { new_set, parsed_id, parsing_streams } state;
  427. if (!w->adaptation_sets) {
  428. av_log(s, AV_LOG_ERROR, "The 'adaptation_sets' option must be set.\n");
  429. return AVERROR(EINVAL);
  430. }
  431. // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on
  432. state = new_set;
  433. while (p < w->adaptation_sets + strlen(w->adaptation_sets)) {
  434. if (*p == ' ')
  435. continue;
  436. else if (state == new_set && !strncmp(p, "id=", 3)) {
  437. void *mem = av_realloc(w->as, sizeof(*w->as) * (w->nb_as + 1));
  438. const char *comma;
  439. if (mem == NULL)
  440. return AVERROR(ENOMEM);
  441. w->as = mem;
  442. ++w->nb_as;
  443. w->as[w->nb_as - 1].nb_streams = 0;
  444. w->as[w->nb_as - 1].streams = NULL;
  445. p += 3; // consume "id="
  446. q = w->as[w->nb_as - 1].id;
  447. comma = strchr(p, ',');
  448. if (!comma || comma - p >= sizeof(w->as[w->nb_as - 1].id)) {
  449. av_log(s, AV_LOG_ERROR, "'id' in 'adaptation_sets' is malformed.\n");
  450. return AVERROR(EINVAL);
  451. }
  452. while (*p != ',') *q++ = *p++;
  453. *q = 0;
  454. p++;
  455. state = parsed_id;
  456. } else if (state == parsed_id && !strncmp(p, "streams=", 8)) {
  457. p += 8; // consume "streams="
  458. state = parsing_streams;
  459. } else if (state == parsing_streams) {
  460. struct AdaptationSet *as = &w->as[w->nb_as - 1];
  461. q = p;
  462. while (*q != '\0' && *q != ',' && *q != ' ') q++;
  463. as->streams = av_realloc(as->streams, sizeof(*as->streams) * ++as->nb_streams);
  464. if (as->streams == NULL)
  465. return AVERROR(ENOMEM);
  466. as->streams[as->nb_streams - 1] = to_integer(p, q - p + 1);
  467. if (as->streams[as->nb_streams - 1] < 0 ||
  468. as->streams[as->nb_streams - 1] >= s->nb_streams) {
  469. av_log(s, AV_LOG_ERROR, "Invalid value for 'streams' in adapation_sets.\n");
  470. return AVERROR(EINVAL);
  471. }
  472. if (*q == '\0') break;
  473. if (*q == ' ') state = new_set;
  474. p = ++q;
  475. } else {
  476. return -1;
  477. }
  478. }
  479. return 0;
  480. }
  481. static int webm_dash_manifest_write_header(AVFormatContext *s)
  482. {
  483. int i;
  484. double start = 0.0;
  485. int ret;
  486. WebMDashMuxContext *w = s->priv_data;
  487. ret = parse_adaptation_sets(s);
  488. if (ret < 0) {
  489. goto fail;
  490. }
  491. ret = write_header(s);
  492. if (ret < 0) {
  493. goto fail;
  494. }
  495. avio_printf(s->pb, "<Period id=\"0\"");
  496. avio_printf(s->pb, " start=\"PT%gS\"", start);
  497. if (!w->is_live) {
  498. avio_printf(s->pb, " duration=\"PT%gS\"", get_duration(s));
  499. }
  500. avio_printf(s->pb, " >\n");
  501. for (i = 0; i < w->nb_as; i++) {
  502. ret = write_adaptation_set(s, i);
  503. if (ret < 0) {
  504. goto fail;
  505. }
  506. }
  507. avio_printf(s->pb, "</Period>\n");
  508. write_footer(s);
  509. fail:
  510. free_adaptation_sets(s);
  511. return ret < 0 ? ret : 0;
  512. }
  513. static int webm_dash_manifest_write_packet(AVFormatContext *s, AVPacket *pkt)
  514. {
  515. return AVERROR_EOF;
  516. }
  517. static int webm_dash_manifest_write_trailer(AVFormatContext *s)
  518. {
  519. free_adaptation_sets(s);
  520. return 0;
  521. }
  522. #define OFFSET(x) offsetof(WebMDashMuxContext, x)
  523. static const AVOption options[] = {
  524. { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  525. { "debug_mode", "[private option - users should never set this]. Create deterministic output", OFFSET(debug_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
  526. { "live", "create a live stream manifest", OFFSET(is_live), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
  527. { "chunk_start_index", "start index of the chunk", OFFSET(chunk_start_index), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  528. { "chunk_duration_ms", "duration of each chunk (in milliseconds)", OFFSET(chunk_duration), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  529. { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  530. { "time_shift_buffer_depth", "Smallest time (in seconds) shifting buffer for which any Representation is guaranteed to be available.", OFFSET(time_shift_buffer_depth), AV_OPT_TYPE_DOUBLE, { .dbl = 60.0 }, 1.0, DBL_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  531. { "minimum_update_period", "Minimum Update Period (in seconds) of the manifest.", OFFSET(minimum_update_period), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  532. { NULL },
  533. };
  534. #if CONFIG_WEBM_DASH_MANIFEST_MUXER
  535. static const AVClass webm_dash_class = {
  536. .class_name = "WebM DASH Manifest muxer",
  537. .item_name = av_default_item_name,
  538. .option = options,
  539. .version = LIBAVUTIL_VERSION_INT,
  540. };
  541. AVOutputFormat ff_webm_dash_manifest_muxer = {
  542. .name = "webm_dash_manifest",
  543. .long_name = NULL_IF_CONFIG_SMALL("WebM DASH Manifest"),
  544. .mime_type = "application/xml",
  545. .extensions = "xml",
  546. .priv_data_size = sizeof(WebMDashMuxContext),
  547. .write_header = webm_dash_manifest_write_header,
  548. .write_packet = webm_dash_manifest_write_packet,
  549. .write_trailer = webm_dash_manifest_write_trailer,
  550. .priv_class = &webm_dash_class,
  551. };
  552. #endif