You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

573 lines
22KB

  1. /*
  2. * WebM DASH Manifest XML muxer
  3. * Copyright (c) 2014 Vignesh Venkatasubramanian
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /*
  22. * WebM DASH Specification:
  23. * https://sites.google.com/a/webmproject.org/wiki/adaptive-streaming/webm-dash-specification
  24. * ISO DASH Specification:
  25. * http://standards.iso.org/ittf/PubliclyAvailableStandards/c065274_ISO_IEC_23009-1_2014.zip
  26. */
  27. #include <float.h>
  28. #include <stdint.h>
  29. #include <string.h>
  30. #include "avformat.h"
  31. #include "matroska.h"
  32. #include "libavutil/avstring.h"
  33. #include "libavutil/dict.h"
  34. #include "libavutil/opt.h"
  35. #include "libavutil/time_internal.h"
  36. typedef struct AdaptationSet {
  37. char id[10];
  38. int *streams;
  39. int nb_streams;
  40. } AdaptationSet;
  41. typedef struct WebMDashMuxContext {
  42. const AVClass *class;
  43. char *adaptation_sets;
  44. AdaptationSet *as;
  45. int nb_as;
  46. int representation_id;
  47. int is_live;
  48. int chunk_start_index;
  49. int chunk_duration;
  50. char *utc_timing_url;
  51. double time_shift_buffer_depth;
  52. int minimum_update_period;
  53. } WebMDashMuxContext;
  54. static const char *get_codec_name(int codec_id)
  55. {
  56. return avcodec_descriptor_get(codec_id)->name;
  57. }
  58. static double get_duration(AVFormatContext *s)
  59. {
  60. int i = 0;
  61. double max = 0.0;
  62. for (i = 0; i < s->nb_streams; i++) {
  63. AVDictionaryEntry *duration = av_dict_get(s->streams[i]->metadata,
  64. DURATION, NULL, 0);
  65. if (!duration || atof(duration->value) < 0) continue;
  66. if (atof(duration->value) > max) max = atof(duration->value);
  67. }
  68. return max / 1000;
  69. }
  70. static int write_header(AVFormatContext *s)
  71. {
  72. WebMDashMuxContext *w = s->priv_data;
  73. double min_buffer_time = 1.0;
  74. avio_printf(s->pb, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
  75. avio_printf(s->pb, "<MPD\n");
  76. avio_printf(s->pb, " xmlns:xsi=\"http://www.w3.org/2001/XMLSchema-instance\"\n");
  77. avio_printf(s->pb, " xmlns=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  78. avio_printf(s->pb, " xsi:schemaLocation=\"urn:mpeg:DASH:schema:MPD:2011\"\n");
  79. avio_printf(s->pb, " type=\"%s\"\n", w->is_live ? "dynamic" : "static");
  80. if (!w->is_live) {
  81. avio_printf(s->pb, " mediaPresentationDuration=\"PT%gS\"\n",
  82. get_duration(s));
  83. }
  84. avio_printf(s->pb, " minBufferTime=\"PT%gS\"\n", min_buffer_time);
  85. avio_printf(s->pb, " profiles=\"%s\"%s",
  86. w->is_live ? "urn:mpeg:dash:profile:isoff-live:2011" : "urn:webm:dash:profile:webm-on-demand:2012",
  87. w->is_live ? "\n" : ">\n");
  88. if (w->is_live) {
  89. time_t local_time = time(NULL);
  90. struct tm gmt_buffer;
  91. struct tm *gmt = gmtime_r(&local_time, &gmt_buffer);
  92. char gmt_iso[21];
  93. if (!strftime(gmt_iso, 21, "%Y-%m-%dT%H:%M:%SZ", gmt)) {
  94. return AVERROR_UNKNOWN;
  95. }
  96. if (s->flags & AVFMT_FLAG_BITEXACT) {
  97. av_strlcpy(gmt_iso, "", 1);
  98. }
  99. avio_printf(s->pb, " availabilityStartTime=\"%s\"\n", gmt_iso);
  100. avio_printf(s->pb, " timeShiftBufferDepth=\"PT%gS\"\n", w->time_shift_buffer_depth);
  101. avio_printf(s->pb, " minimumUpdatePeriod=\"PT%dS\"", w->minimum_update_period);
  102. avio_printf(s->pb, ">\n");
  103. if (w->utc_timing_url) {
  104. avio_printf(s->pb, "<UTCTiming\n");
  105. avio_printf(s->pb, " schemeIdUri=\"urn:mpeg:dash:utc:http-iso:2014\"\n");
  106. avio_printf(s->pb, " value=\"%s\"/>\n", w->utc_timing_url);
  107. }
  108. }
  109. return 0;
  110. }
  111. static void write_footer(AVFormatContext *s)
  112. {
  113. avio_printf(s->pb, "</MPD>\n");
  114. }
  115. static int subsegment_alignment(AVFormatContext *s, AdaptationSet *as) {
  116. int i;
  117. AVDictionaryEntry *gold = av_dict_get(s->streams[as->streams[0]]->metadata,
  118. CUE_TIMESTAMPS, NULL, 0);
  119. if (!gold) return 0;
  120. for (i = 1; i < as->nb_streams; i++) {
  121. AVDictionaryEntry *ts = av_dict_get(s->streams[as->streams[i]]->metadata,
  122. CUE_TIMESTAMPS, NULL, 0);
  123. if (!ts || strncmp(gold->value, ts->value, strlen(gold->value))) return 0;
  124. }
  125. return 1;
  126. }
  127. static int bitstream_switching(AVFormatContext *s, AdaptationSet *as) {
  128. int i;
  129. AVDictionaryEntry *gold_track_num = av_dict_get(s->streams[as->streams[0]]->metadata,
  130. TRACK_NUMBER, NULL, 0);
  131. AVCodecParameters *gold_par = s->streams[as->streams[0]]->codecpar;
  132. if (!gold_track_num) return 0;
  133. for (i = 1; i < as->nb_streams; i++) {
  134. AVDictionaryEntry *track_num = av_dict_get(s->streams[as->streams[i]]->metadata,
  135. TRACK_NUMBER, NULL, 0);
  136. AVCodecParameters *par = s->streams[as->streams[i]]->codecpar;
  137. if (!track_num ||
  138. strncmp(gold_track_num->value, track_num->value, strlen(gold_track_num->value)) ||
  139. gold_par->codec_id != par->codec_id ||
  140. gold_par->extradata_size != par->extradata_size ||
  141. memcmp(gold_par->extradata, par->extradata, par->extradata_size)) {
  142. return 0;
  143. }
  144. }
  145. return 1;
  146. }
  147. /*
  148. * Writes a Representation within an Adaptation Set. Returns 0 on success and
  149. * < 0 on failure.
  150. */
  151. static int write_representation(AVFormatContext *s, AVStream *stream, char *id,
  152. int output_width, int output_height,
  153. int output_sample_rate) {
  154. WebMDashMuxContext *w = s->priv_data;
  155. AVDictionaryEntry *irange = av_dict_get(stream->metadata, INITIALIZATION_RANGE, NULL, 0);
  156. AVDictionaryEntry *cues_start = av_dict_get(stream->metadata, CUES_START, NULL, 0);
  157. AVDictionaryEntry *cues_end = av_dict_get(stream->metadata, CUES_END, NULL, 0);
  158. AVDictionaryEntry *filename = av_dict_get(stream->metadata, FILENAME, NULL, 0);
  159. AVDictionaryEntry *bandwidth = av_dict_get(stream->metadata, BANDWIDTH, NULL, 0);
  160. const char *bandwidth_str;
  161. if ((w->is_live && (!filename)) ||
  162. (!w->is_live && (!irange || !cues_start || !cues_end || !filename || !bandwidth))) {
  163. return AVERROR_INVALIDDATA;
  164. }
  165. avio_printf(s->pb, "<Representation id=\"%s\"", id);
  166. // if bandwidth for live was not provided, use a default
  167. if (w->is_live && !bandwidth) {
  168. bandwidth_str = (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) ? "128000" : "1000000";
  169. } else {
  170. bandwidth_str = bandwidth->value;
  171. }
  172. avio_printf(s->pb, " bandwidth=\"%s\"", bandwidth_str);
  173. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && output_width)
  174. avio_printf(s->pb, " width=\"%d\"", stream->codecpar->width);
  175. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && output_height)
  176. avio_printf(s->pb, " height=\"%d\"", stream->codecpar->height);
  177. if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && output_sample_rate)
  178. avio_printf(s->pb, " audioSamplingRate=\"%d\"", stream->codecpar->sample_rate);
  179. if (w->is_live) {
  180. // For live streams, Codec and Mime Type always go in the Representation tag.
  181. avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(stream->codecpar->codec_id));
  182. avio_printf(s->pb, " mimeType=\"%s/webm\"",
  183. stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  184. // For live streams, subsegments always start with key frames. So this
  185. // is always 1.
  186. avio_printf(s->pb, " startsWithSAP=\"1\"");
  187. avio_printf(s->pb, ">");
  188. } else {
  189. avio_printf(s->pb, ">\n");
  190. avio_printf(s->pb, "<BaseURL>%s</BaseURL>\n", filename->value);
  191. avio_printf(s->pb, "<SegmentBase\n");
  192. avio_printf(s->pb, " indexRange=\"%s-%s\">\n", cues_start->value, cues_end->value);
  193. avio_printf(s->pb, "<Initialization\n");
  194. avio_printf(s->pb, " range=\"0-%s\" />\n", irange->value);
  195. avio_printf(s->pb, "</SegmentBase>\n");
  196. }
  197. avio_printf(s->pb, "</Representation>\n");
  198. return 0;
  199. }
  200. /*
  201. * Checks if width of all streams are the same. Returns 1 if true, 0 otherwise.
  202. */
  203. static int check_matching_width(AVFormatContext *s, AdaptationSet *as) {
  204. int first_width, i;
  205. if (as->nb_streams < 2) return 1;
  206. first_width = s->streams[as->streams[0]]->codecpar->width;
  207. for (i = 1; i < as->nb_streams; i++)
  208. if (first_width != s->streams[as->streams[i]]->codecpar->width)
  209. return 0;
  210. return 1;
  211. }
  212. /*
  213. * Checks if height of all streams are the same. Returns 1 if true, 0 otherwise.
  214. */
  215. static int check_matching_height(AVFormatContext *s, AdaptationSet *as) {
  216. int first_height, i;
  217. if (as->nb_streams < 2) return 1;
  218. first_height = s->streams[as->streams[0]]->codecpar->height;
  219. for (i = 1; i < as->nb_streams; i++)
  220. if (first_height != s->streams[as->streams[i]]->codecpar->height)
  221. return 0;
  222. return 1;
  223. }
  224. /*
  225. * Checks if sample rate of all streams are the same. Returns 1 if true, 0 otherwise.
  226. */
  227. static int check_matching_sample_rate(AVFormatContext *s, AdaptationSet *as) {
  228. int first_sample_rate, i;
  229. if (as->nb_streams < 2) return 1;
  230. first_sample_rate = s->streams[as->streams[0]]->codecpar->sample_rate;
  231. for (i = 1; i < as->nb_streams; i++)
  232. if (first_sample_rate != s->streams[as->streams[i]]->codecpar->sample_rate)
  233. return 0;
  234. return 1;
  235. }
  236. static void free_adaptation_sets(AVFormatContext *s) {
  237. WebMDashMuxContext *w = s->priv_data;
  238. int i;
  239. for (i = 0; i < w->nb_as; i++) {
  240. av_freep(&w->as[i].streams);
  241. }
  242. av_freep(&w->as);
  243. w->nb_as = 0;
  244. }
  245. /*
  246. * Parses a live header filename and computes the representation id,
  247. * initialization pattern and the media pattern. Pass NULL if you don't want to
  248. * compute any of those 3. Returns 0 on success and non-zero on failure.
  249. *
  250. * Name of the header file should conform to the following pattern:
  251. * <file_description>_<representation_id>.hdr where <file_description> can be
  252. * anything. The chunks should be named according to the following pattern:
  253. * <file_description>_<representation_id>_<chunk_number>.chk
  254. */
  255. static int parse_filename(char *filename, char **representation_id,
  256. char **initialization_pattern, char **media_pattern) {
  257. char *underscore_pos = NULL;
  258. char *period_pos = NULL;
  259. char *temp_pos = NULL;
  260. char *filename_str = av_strdup(filename);
  261. int ret = 0;
  262. if (!filename_str) {
  263. ret = AVERROR(ENOMEM);
  264. goto end;
  265. }
  266. temp_pos = av_stristr(filename_str, "_");
  267. while (temp_pos) {
  268. underscore_pos = temp_pos + 1;
  269. temp_pos = av_stristr(temp_pos + 1, "_");
  270. }
  271. if (!underscore_pos) {
  272. ret = AVERROR_INVALIDDATA;
  273. goto end;
  274. }
  275. period_pos = av_stristr(underscore_pos, ".");
  276. if (!period_pos) {
  277. ret = AVERROR_INVALIDDATA;
  278. goto end;
  279. }
  280. *(underscore_pos - 1) = 0;
  281. if (representation_id) {
  282. *representation_id = av_malloc(period_pos - underscore_pos + 1);
  283. if (!(*representation_id)) {
  284. ret = AVERROR(ENOMEM);
  285. goto end;
  286. }
  287. av_strlcpy(*representation_id, underscore_pos, period_pos - underscore_pos + 1);
  288. }
  289. if (initialization_pattern) {
  290. *initialization_pattern = av_asprintf("%s_$RepresentationID$.hdr",
  291. filename_str);
  292. if (!(*initialization_pattern)) {
  293. ret = AVERROR(ENOMEM);
  294. goto end;
  295. }
  296. }
  297. if (media_pattern) {
  298. *media_pattern = av_asprintf("%s_$RepresentationID$_$Number$.chk",
  299. filename_str);
  300. if (!(*media_pattern)) {
  301. ret = AVERROR(ENOMEM);
  302. goto end;
  303. }
  304. }
  305. end:
  306. av_freep(&filename_str);
  307. return ret;
  308. }
  309. /*
  310. * Writes an Adaptation Set. Returns 0 on success and < 0 on failure.
  311. */
  312. static int write_adaptation_set(AVFormatContext *s, int as_index)
  313. {
  314. WebMDashMuxContext *w = s->priv_data;
  315. AdaptationSet *as = &w->as[as_index];
  316. AVCodecParameters *par = s->streams[as->streams[0]]->codecpar;
  317. AVDictionaryEntry *lang;
  318. int i;
  319. static const char boolean[2][6] = { "false", "true" };
  320. int subsegmentStartsWithSAP = 1;
  321. // Width, Height and Sample Rate will go in the AdaptationSet tag if they
  322. // are the same for all contained Representations. otherwise, they will go
  323. // on their respective Representation tag. For live streams, they always go
  324. // in the Representation tag.
  325. int width_in_as = 1, height_in_as = 1, sample_rate_in_as = 1;
  326. if (par->codec_type == AVMEDIA_TYPE_VIDEO) {
  327. width_in_as = !w->is_live && check_matching_width(s, as);
  328. height_in_as = !w->is_live && check_matching_height(s, as);
  329. } else {
  330. sample_rate_in_as = !w->is_live && check_matching_sample_rate(s, as);
  331. }
  332. avio_printf(s->pb, "<AdaptationSet id=\"%s\"", as->id);
  333. avio_printf(s->pb, " mimeType=\"%s/webm\"",
  334. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  335. avio_printf(s->pb, " codecs=\"%s\"", get_codec_name(par->codec_id));
  336. lang = av_dict_get(s->streams[as->streams[0]]->metadata, "language", NULL, 0);
  337. if (lang) avio_printf(s->pb, " lang=\"%s\"", lang->value);
  338. if (par->codec_type == AVMEDIA_TYPE_VIDEO && width_in_as)
  339. avio_printf(s->pb, " width=\"%d\"", par->width);
  340. if (par->codec_type == AVMEDIA_TYPE_VIDEO && height_in_as)
  341. avio_printf(s->pb, " height=\"%d\"", par->height);
  342. if (par->codec_type == AVMEDIA_TYPE_AUDIO && sample_rate_in_as)
  343. avio_printf(s->pb, " audioSamplingRate=\"%d\"", par->sample_rate);
  344. avio_printf(s->pb, " bitstreamSwitching=\"%s\"",
  345. boolean[bitstream_switching(s, as)]);
  346. avio_printf(s->pb, " subsegmentAlignment=\"%s\"",
  347. boolean[w->is_live || subsegment_alignment(s, as)]);
  348. for (i = 0; i < as->nb_streams; i++) {
  349. AVDictionaryEntry *kf = av_dict_get(s->streams[as->streams[i]]->metadata,
  350. CLUSTER_KEYFRAME, NULL, 0);
  351. if (!w->is_live && (!kf || !strncmp(kf->value, "0", 1))) subsegmentStartsWithSAP = 0;
  352. }
  353. avio_printf(s->pb, " subsegmentStartsWithSAP=\"%d\"", subsegmentStartsWithSAP);
  354. avio_printf(s->pb, ">\n");
  355. if (w->is_live) {
  356. AVDictionaryEntry *filename =
  357. av_dict_get(s->streams[as->streams[0]]->metadata, FILENAME, NULL, 0);
  358. char *initialization_pattern = NULL;
  359. char *media_pattern = NULL;
  360. int ret = parse_filename(filename->value, NULL, &initialization_pattern,
  361. &media_pattern);
  362. if (ret) return ret;
  363. avio_printf(s->pb, "<ContentComponent id=\"1\" type=\"%s\"/>\n",
  364. par->codec_type == AVMEDIA_TYPE_VIDEO ? "video" : "audio");
  365. avio_printf(s->pb, "<SegmentTemplate");
  366. avio_printf(s->pb, " timescale=\"1000\"");
  367. avio_printf(s->pb, " duration=\"%d\"", w->chunk_duration);
  368. avio_printf(s->pb, " media=\"%s\"", media_pattern);
  369. avio_printf(s->pb, " startNumber=\"%d\"", w->chunk_start_index);
  370. avio_printf(s->pb, " initialization=\"%s\"", initialization_pattern);
  371. avio_printf(s->pb, "/>\n");
  372. av_free(initialization_pattern);
  373. av_free(media_pattern);
  374. }
  375. for (i = 0; i < as->nb_streams; i++) {
  376. char *representation_id = NULL;
  377. int ret;
  378. if (w->is_live) {
  379. AVDictionaryEntry *filename =
  380. av_dict_get(s->streams[as->streams[i]]->metadata, FILENAME, NULL, 0);
  381. if (!filename)
  382. return AVERROR(EINVAL);
  383. if (ret = parse_filename(filename->value, &representation_id, NULL, NULL))
  384. return ret;
  385. } else {
  386. representation_id = av_asprintf("%d", w->representation_id++);
  387. if (!representation_id) return AVERROR(ENOMEM);
  388. }
  389. ret = write_representation(s, s->streams[as->streams[i]],
  390. representation_id, !width_in_as,
  391. !height_in_as, !sample_rate_in_as);
  392. av_free(representation_id);
  393. if (ret) return ret;
  394. }
  395. avio_printf(s->pb, "</AdaptationSet>\n");
  396. return 0;
  397. }
  398. static int parse_adaptation_sets(AVFormatContext *s)
  399. {
  400. WebMDashMuxContext *w = s->priv_data;
  401. char *p = w->adaptation_sets;
  402. char *q;
  403. enum { new_set, parsed_id, parsing_streams } state;
  404. if (!w->adaptation_sets) {
  405. av_log(s, AV_LOG_ERROR, "The 'adaptation_sets' option must be set.\n");
  406. return AVERROR(EINVAL);
  407. }
  408. // syntax id=0,streams=0,1,2 id=1,streams=3,4 and so on
  409. state = new_set;
  410. while (1) {
  411. if (*p == '\0') {
  412. if (state == new_set)
  413. break;
  414. else
  415. return AVERROR(EINVAL);
  416. } else if (state == new_set && *p == ' ') {
  417. p++;
  418. continue;
  419. } else if (state == new_set && !strncmp(p, "id=", 3)) {
  420. void *mem = av_realloc(w->as, sizeof(*w->as) * (w->nb_as + 1));
  421. const char *comma;
  422. if (mem == NULL)
  423. return AVERROR(ENOMEM);
  424. w->as = mem;
  425. ++w->nb_as;
  426. w->as[w->nb_as - 1].nb_streams = 0;
  427. w->as[w->nb_as - 1].streams = NULL;
  428. p += 3; // consume "id="
  429. q = w->as[w->nb_as - 1].id;
  430. comma = strchr(p, ',');
  431. if (!comma || comma - p >= sizeof(w->as[w->nb_as - 1].id)) {
  432. av_log(s, AV_LOG_ERROR, "'id' in 'adaptation_sets' is malformed.\n");
  433. return AVERROR(EINVAL);
  434. }
  435. while (*p != ',') *q++ = *p++;
  436. *q = 0;
  437. p++;
  438. state = parsed_id;
  439. } else if (state == parsed_id && !strncmp(p, "streams=", 8)) {
  440. p += 8; // consume "streams="
  441. state = parsing_streams;
  442. } else if (state == parsing_streams) {
  443. struct AdaptationSet *as = &w->as[w->nb_as - 1];
  444. int64_t num;
  445. int ret = av_reallocp_array(&as->streams, ++as->nb_streams,
  446. sizeof(*as->streams));
  447. if (ret < 0)
  448. return ret;
  449. num = strtoll(p, &q, 10);
  450. if (!av_isdigit(*p) || (*q != ' ' && *q != '\0' && *q != ',') ||
  451. num < 0 || num >= s->nb_streams) {
  452. av_log(s, AV_LOG_ERROR, "Invalid value for 'streams' in adapation_sets.\n");
  453. return AVERROR(EINVAL);
  454. }
  455. as->streams[as->nb_streams - 1] = num;
  456. if (*q == '\0') break;
  457. if (*q == ' ') state = new_set;
  458. p = ++q;
  459. } else {
  460. return -1;
  461. }
  462. }
  463. return 0;
  464. }
  465. static int webm_dash_manifest_write_header(AVFormatContext *s)
  466. {
  467. int i;
  468. double start = 0.0;
  469. int ret;
  470. WebMDashMuxContext *w = s->priv_data;
  471. for (unsigned i = 0; i < s->nb_streams; i++) {
  472. enum AVCodecID codec_id = s->streams[i]->codecpar->codec_id;
  473. if (codec_id != AV_CODEC_ID_VP8 && codec_id != AV_CODEC_ID_VP9 &&
  474. codec_id != AV_CODEC_ID_VORBIS && codec_id != AV_CODEC_ID_OPUS)
  475. return AVERROR(EINVAL);
  476. }
  477. ret = parse_adaptation_sets(s);
  478. if (ret < 0) {
  479. goto fail;
  480. }
  481. ret = write_header(s);
  482. if (ret < 0) {
  483. goto fail;
  484. }
  485. avio_printf(s->pb, "<Period id=\"0\"");
  486. avio_printf(s->pb, " start=\"PT%gS\"", start);
  487. if (!w->is_live) {
  488. avio_printf(s->pb, " duration=\"PT%gS\"", get_duration(s));
  489. }
  490. avio_printf(s->pb, " >\n");
  491. for (i = 0; i < w->nb_as; i++) {
  492. ret = write_adaptation_set(s, i);
  493. if (ret < 0) {
  494. goto fail;
  495. }
  496. }
  497. avio_printf(s->pb, "</Period>\n");
  498. write_footer(s);
  499. fail:
  500. free_adaptation_sets(s);
  501. return ret < 0 ? ret : 0;
  502. }
  503. static int webm_dash_manifest_write_packet(AVFormatContext *s, AVPacket *pkt)
  504. {
  505. return AVERROR_EOF;
  506. }
  507. #define OFFSET(x) offsetof(WebMDashMuxContext, x)
  508. static const AVOption options[] = {
  509. { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  510. { "live", "create a live stream manifest", OFFSET(is_live), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
  511. { "chunk_start_index", "start index of the chunk", OFFSET(chunk_start_index), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  512. { "chunk_duration_ms", "duration of each chunk (in milliseconds)", OFFSET(chunk_duration), AV_OPT_TYPE_INT, {.i64 = 1000}, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  513. { "utc_timing_url", "URL of the page that will return the UTC timestamp in ISO format", OFFSET(utc_timing_url), AV_OPT_TYPE_STRING, { 0 }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
  514. { "time_shift_buffer_depth", "Smallest time (in seconds) shifting buffer for which any Representation is guaranteed to be available.", OFFSET(time_shift_buffer_depth), AV_OPT_TYPE_DOUBLE, { .dbl = 60.0 }, 1.0, DBL_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  515. { "minimum_update_period", "Minimum Update Period (in seconds) of the manifest.", OFFSET(minimum_update_period), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
  516. { NULL },
  517. };
  518. static const AVClass webm_dash_class = {
  519. .class_name = "WebM DASH Manifest muxer",
  520. .item_name = av_default_item_name,
  521. .option = options,
  522. .version = LIBAVUTIL_VERSION_INT,
  523. };
  524. AVOutputFormat ff_webm_dash_manifest_muxer = {
  525. .name = "webm_dash_manifest",
  526. .long_name = NULL_IF_CONFIG_SMALL("WebM DASH Manifest"),
  527. .mime_type = "application/xml",
  528. .extensions = "xml",
  529. .priv_data_size = sizeof(WebMDashMuxContext),
  530. .write_header = webm_dash_manifest_write_header,
  531. .write_packet = webm_dash_manifest_write_packet,
  532. .priv_class = &webm_dash_class,
  533. };