You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1250 lines
42KB

  1. /*
  2. * nut muxer
  3. * Copyright (c) 2004-2007 Michael Niedermayer
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdint.h>
  22. #include "libavutil/intreadwrite.h"
  23. #include "libavutil/mathematics.h"
  24. #include "libavutil/tree.h"
  25. #include "libavutil/dict.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavutil/time.h"
  28. #include "libavutil/opt.h"
  29. #include "libavcodec/bytestream.h"
  30. #include "libavcodec/mpegaudiodata.h"
  31. #include "nut.h"
  32. #include "internal.h"
  33. #include "avio_internal.h"
  34. #include "riff.h"
  35. static int find_expected_header(AVCodecContext *c, int size, int key_frame,
  36. uint8_t out[64])
  37. {
  38. int sample_rate = c->sample_rate;
  39. if (size > 4096)
  40. return 0;
  41. AV_WB24(out, 1);
  42. if (c->codec_id == AV_CODEC_ID_MPEG4) {
  43. if (key_frame) {
  44. return 3;
  45. } else {
  46. out[3] = 0xB6;
  47. return 4;
  48. }
  49. } else if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  50. c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  51. return 3;
  52. } else if (c->codec_id == AV_CODEC_ID_H264) {
  53. return 3;
  54. } else if (c->codec_id == AV_CODEC_ID_MP3 ||
  55. c->codec_id == AV_CODEC_ID_MP2) {
  56. int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
  57. int layer = c->codec_id == AV_CODEC_ID_MP3 ? 3 : 2;
  58. unsigned int header = 0xFFF00000;
  59. lsf = sample_rate < (24000 + 32000) / 2;
  60. mpeg25 = sample_rate < (12000 + 16000) / 2;
  61. sample_rate <<= lsf + mpeg25;
  62. if (sample_rate < (32000 + 44100) / 2) sample_rate_index = 2;
  63. else if (sample_rate < (44100 + 48000) / 2) sample_rate_index = 0;
  64. else sample_rate_index = 1;
  65. sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
  66. for (bitrate_index = 2; bitrate_index < 30; bitrate_index++) {
  67. frame_size =
  68. avpriv_mpa_bitrate_tab[lsf][layer - 1][bitrate_index >> 1];
  69. frame_size = (frame_size * 144000) / (sample_rate << lsf) +
  70. (bitrate_index & 1);
  71. if (frame_size == size)
  72. break;
  73. }
  74. header |= (!lsf) << 19;
  75. header |= (4 - layer) << 17;
  76. header |= 1 << 16; //no crc
  77. AV_WB32(out, header);
  78. if (size <= 0)
  79. return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
  80. if (bitrate_index == 30)
  81. return -1; //something is wrong ...
  82. header |= (bitrate_index >> 1) << 12;
  83. header |= sample_rate_index << 10;
  84. header |= (bitrate_index & 1) << 9;
  85. return 2; //FIXME actually put the needed ones in build_elision_headers()
  86. //return 3; //we guess that the private bit is not set
  87. //FIXME the above assumptions should be checked, if these turn out false too often something should be done
  88. }
  89. return 0;
  90. }
  91. static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size, int frame_type)
  92. {
  93. NUTContext *nut = s->priv_data;
  94. uint8_t out[64];
  95. int i;
  96. int len = find_expected_header(c, size, frame_type, out);
  97. for (i = 1; i < nut->header_count; i++) {
  98. if (len == nut->header_len[i] && !memcmp(out, nut->header[i], len)) {
  99. return i;
  100. }
  101. }
  102. return 0;
  103. }
  104. static void build_elision_headers(AVFormatContext *s)
  105. {
  106. NUTContext *nut = s->priv_data;
  107. int i;
  108. //FIXME this is lame
  109. //FIXME write a 2pass mode to find the maximal headers
  110. static const uint8_t headers[][5] = {
  111. { 3, 0x00, 0x00, 0x01 },
  112. { 4, 0x00, 0x00, 0x01, 0xB6},
  113. { 2, 0xFF, 0xFA }, //mp3+crc
  114. { 2, 0xFF, 0xFB }, //mp3
  115. { 2, 0xFF, 0xFC }, //mp2+crc
  116. { 2, 0xFF, 0xFD }, //mp2
  117. };
  118. nut->header_count = 7;
  119. for (i = 1; i < nut->header_count; i++) {
  120. nut->header_len[i] = headers[i - 1][0];
  121. nut->header[i] = &headers[i - 1][1];
  122. }
  123. }
  124. static void build_frame_code(AVFormatContext *s)
  125. {
  126. NUTContext *nut = s->priv_data;
  127. int key_frame, index, pred, stream_id;
  128. int start = 1;
  129. int end = 254;
  130. int keyframe_0_esc = s->nb_streams > 2;
  131. int pred_table[10];
  132. FrameCode *ft;
  133. ft = &nut->frame_code[start];
  134. ft->flags = FLAG_CODED;
  135. ft->size_mul = 1;
  136. ft->pts_delta = 1;
  137. start++;
  138. if (keyframe_0_esc) {
  139. /* keyframe = 0 escape */
  140. FrameCode *ft = &nut->frame_code[start];
  141. ft->flags = FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
  142. ft->size_mul = 1;
  143. start++;
  144. }
  145. for (stream_id = 0; stream_id < s->nb_streams; stream_id++) {
  146. int start2 = start + (end - start) * stream_id / s->nb_streams;
  147. int end2 = start + (end - start) * (stream_id + 1) / s->nb_streams;
  148. AVCodecContext *codec = s->streams[stream_id]->codec;
  149. int is_audio = codec->codec_type == AVMEDIA_TYPE_AUDIO;
  150. int intra_only = /*codec->intra_only || */ is_audio;
  151. int pred_count;
  152. int frame_size = 0;
  153. if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
  154. frame_size = av_get_audio_frame_duration(codec, 0);
  155. if (codec->codec_id == AV_CODEC_ID_VORBIS && !frame_size)
  156. frame_size = 64;
  157. } else {
  158. AVRational f = av_div_q(codec->time_base, *nut->stream[stream_id].time_base);
  159. if (f.den == 1 && f.num>0)
  160. frame_size = f.num;
  161. }
  162. if (!frame_size)
  163. frame_size = 1;
  164. for (key_frame = 0; key_frame < 2; key_frame++) {
  165. if (!intra_only || !keyframe_0_esc || key_frame != 0) {
  166. FrameCode *ft = &nut->frame_code[start2];
  167. ft->flags = FLAG_KEY * key_frame;
  168. ft->flags |= FLAG_SIZE_MSB | FLAG_CODED_PTS;
  169. ft->stream_id = stream_id;
  170. ft->size_mul = 1;
  171. if (is_audio)
  172. ft->header_idx = find_header_idx(s, codec, -1, key_frame);
  173. start2++;
  174. }
  175. }
  176. key_frame = intra_only;
  177. #if 1
  178. if (is_audio) {
  179. int frame_bytes;
  180. int pts;
  181. if (codec->block_align > 0) {
  182. frame_bytes = codec->block_align;
  183. } else {
  184. int frame_size = av_get_audio_frame_duration(codec, 0);
  185. frame_bytes = frame_size * (int64_t)codec->bit_rate / (8 * codec->sample_rate);
  186. }
  187. for (pts = 0; pts < 2; pts++) {
  188. for (pred = 0; pred < 2; pred++) {
  189. FrameCode *ft = &nut->frame_code[start2];
  190. ft->flags = FLAG_KEY * key_frame;
  191. ft->stream_id = stream_id;
  192. ft->size_mul = frame_bytes + 2;
  193. ft->size_lsb = frame_bytes + pred;
  194. ft->pts_delta = pts * frame_size;
  195. ft->header_idx = find_header_idx(s, codec, frame_bytes + pred, key_frame);
  196. start2++;
  197. }
  198. }
  199. } else {
  200. FrameCode *ft = &nut->frame_code[start2];
  201. ft->flags = FLAG_KEY | FLAG_SIZE_MSB;
  202. ft->stream_id = stream_id;
  203. ft->size_mul = 1;
  204. ft->pts_delta = frame_size;
  205. start2++;
  206. }
  207. #endif
  208. if (codec->has_b_frames) {
  209. pred_count = 5;
  210. pred_table[0] = -2;
  211. pred_table[1] = -1;
  212. pred_table[2] = 1;
  213. pred_table[3] = 3;
  214. pred_table[4] = 4;
  215. } else if (codec->codec_id == AV_CODEC_ID_VORBIS) {
  216. pred_count = 3;
  217. pred_table[0] = 2;
  218. pred_table[1] = 9;
  219. pred_table[2] = 16;
  220. } else {
  221. pred_count = 1;
  222. pred_table[0] = 1;
  223. }
  224. for (pred = 0; pred < pred_count; pred++) {
  225. int start3 = start2 + (end2 - start2) * pred / pred_count;
  226. int end3 = start2 + (end2 - start2) * (pred + 1) / pred_count;
  227. pred_table[pred] *= frame_size;
  228. for (index = start3; index < end3; index++) {
  229. FrameCode *ft = &nut->frame_code[index];
  230. ft->flags = FLAG_KEY * key_frame;
  231. ft->flags |= FLAG_SIZE_MSB;
  232. ft->stream_id = stream_id;
  233. //FIXME use single byte size and pred from last
  234. ft->size_mul = end3 - start3;
  235. ft->size_lsb = index - start3;
  236. ft->pts_delta = pred_table[pred];
  237. if (is_audio)
  238. ft->header_idx = find_header_idx(s, codec, -1, key_frame);
  239. }
  240. }
  241. }
  242. memmove(&nut->frame_code['N' + 1], &nut->frame_code['N'], sizeof(FrameCode) * (255 - 'N'));
  243. nut->frame_code[0].flags =
  244. nut->frame_code[255].flags =
  245. nut->frame_code['N'].flags = FLAG_INVALID;
  246. }
  247. static void put_tt(NUTContext *nut, AVRational *time_base, AVIOContext *bc, uint64_t val)
  248. {
  249. val *= nut->time_base_count;
  250. val += time_base - nut->time_base;
  251. ff_put_v(bc, val);
  252. }
  253. /**
  254. * Store a string as vb.
  255. */
  256. static void put_str(AVIOContext *bc, const char *string)
  257. {
  258. size_t len = strlen(string);
  259. ff_put_v(bc, len);
  260. avio_write(bc, string, len);
  261. }
  262. static void put_s(AVIOContext *bc, int64_t val)
  263. {
  264. ff_put_v(bc, 2 * FFABS(val) - (val > 0));
  265. }
  266. #ifdef TRACE
  267. static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, const char *file,
  268. const char *func, int line)
  269. {
  270. av_log(NULL, AV_LOG_DEBUG, "ff_put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  271. ff_put_v(bc, v);
  272. }
  273. static inline void put_s_trace(AVIOContext *bc, int64_t v, const char *file, const char *func, int line)
  274. {
  275. av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  276. put_s(bc, v);
  277. }
  278. #define ff_put_v(bc, v) ff_put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  279. #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  280. #endif
  281. //FIXME remove calculate_checksum
  282. static void put_packet(NUTContext *nut, AVIOContext *bc, AVIOContext *dyn_bc,
  283. int calculate_checksum, uint64_t startcode)
  284. {
  285. uint8_t *dyn_buf = NULL;
  286. int dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  287. int forw_ptr = dyn_size + 4 * calculate_checksum;
  288. if (forw_ptr > 4096)
  289. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  290. avio_wb64(bc, startcode);
  291. ff_put_v(bc, forw_ptr);
  292. if (forw_ptr > 4096)
  293. avio_wl32(bc, ffio_get_checksum(bc));
  294. if (calculate_checksum)
  295. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  296. avio_write(bc, dyn_buf, dyn_size);
  297. if (calculate_checksum)
  298. avio_wl32(bc, ffio_get_checksum(bc));
  299. av_free(dyn_buf);
  300. }
  301. static void write_mainheader(NUTContext *nut, AVIOContext *bc)
  302. {
  303. int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields,
  304. tmp_head_idx;
  305. int64_t tmp_match;
  306. ff_put_v(bc, nut->version);
  307. if (nut->version > 3)
  308. ff_put_v(bc, nut->minor_version = 1);
  309. ff_put_v(bc, nut->avf->nb_streams);
  310. ff_put_v(bc, nut->max_distance);
  311. ff_put_v(bc, nut->time_base_count);
  312. for (i = 0; i < nut->time_base_count; i++) {
  313. ff_put_v(bc, nut->time_base[i].num);
  314. ff_put_v(bc, nut->time_base[i].den);
  315. }
  316. tmp_pts = 0;
  317. tmp_mul = 1;
  318. tmp_stream = 0;
  319. tmp_match = 1 - (1LL << 62);
  320. tmp_head_idx = 0;
  321. for (i = 0; i < 256; ) {
  322. tmp_fields = 0;
  323. tmp_size = 0;
  324. // tmp_res=0;
  325. if (tmp_pts != nut->frame_code[i].pts_delta ) tmp_fields = 1;
  326. if (tmp_mul != nut->frame_code[i].size_mul ) tmp_fields = 2;
  327. if (tmp_stream != nut->frame_code[i].stream_id ) tmp_fields = 3;
  328. if (tmp_size != nut->frame_code[i].size_lsb ) tmp_fields = 4;
  329. // if (tmp_res != nut->frame_code[i].res ) tmp_fields=5;
  330. if (tmp_head_idx != nut->frame_code[i].header_idx) tmp_fields = 8;
  331. tmp_pts = nut->frame_code[i].pts_delta;
  332. tmp_flags = nut->frame_code[i].flags;
  333. tmp_stream = nut->frame_code[i].stream_id;
  334. tmp_mul = nut->frame_code[i].size_mul;
  335. tmp_size = nut->frame_code[i].size_lsb;
  336. // tmp_res = nut->frame_code[i].res;
  337. tmp_head_idx = nut->frame_code[i].header_idx;
  338. for (j = 0; i < 256; j++, i++) {
  339. if (i == 'N') {
  340. j--;
  341. continue;
  342. }
  343. if (nut->frame_code[i].pts_delta != tmp_pts ||
  344. nut->frame_code[i].flags != tmp_flags ||
  345. nut->frame_code[i].stream_id != tmp_stream ||
  346. nut->frame_code[i].size_mul != tmp_mul ||
  347. nut->frame_code[i].size_lsb != tmp_size + j ||
  348. // nut->frame_code[i].res != tmp_res ||
  349. nut->frame_code[i].header_idx != tmp_head_idx)
  350. break;
  351. }
  352. if (j != tmp_mul - tmp_size)
  353. tmp_fields = 6;
  354. ff_put_v(bc, tmp_flags);
  355. ff_put_v(bc, tmp_fields);
  356. if (tmp_fields > 0) put_s(bc, tmp_pts);
  357. if (tmp_fields > 1) ff_put_v(bc, tmp_mul);
  358. if (tmp_fields > 2) ff_put_v(bc, tmp_stream);
  359. if (tmp_fields > 3) ff_put_v(bc, tmp_size);
  360. if (tmp_fields > 4) ff_put_v(bc, 0 /*tmp_res*/);
  361. if (tmp_fields > 5) ff_put_v(bc, j);
  362. if (tmp_fields > 6) ff_put_v(bc, tmp_match);
  363. if (tmp_fields > 7) ff_put_v(bc, tmp_head_idx);
  364. }
  365. ff_put_v(bc, nut->header_count - 1);
  366. for (i = 1; i < nut->header_count; i++) {
  367. ff_put_v(bc, nut->header_len[i]);
  368. avio_write(bc, nut->header[i], nut->header_len[i]);
  369. }
  370. // flags had been effectively introduced in version 4
  371. if (nut->version > 3)
  372. ff_put_v(bc, nut->flags);
  373. }
  374. static int write_streamheader(AVFormatContext *avctx, AVIOContext *bc,
  375. AVStream *st, int i)
  376. {
  377. NUTContext *nut = avctx->priv_data;
  378. AVCodecContext *codec = st->codec;
  379. ff_put_v(bc, i);
  380. switch (codec->codec_type) {
  381. case AVMEDIA_TYPE_VIDEO: ff_put_v(bc, 0); break;
  382. case AVMEDIA_TYPE_AUDIO: ff_put_v(bc, 1); break;
  383. case AVMEDIA_TYPE_SUBTITLE: ff_put_v(bc, 2); break;
  384. default: ff_put_v(bc, 3); break;
  385. }
  386. ff_put_v(bc, 4);
  387. if (codec->codec_tag) {
  388. avio_wl32(bc, codec->codec_tag);
  389. } else {
  390. av_log(avctx, AV_LOG_ERROR, "No codec tag defined for stream %d\n", i);
  391. return AVERROR(EINVAL);
  392. }
  393. ff_put_v(bc, nut->stream[i].time_base - nut->time_base);
  394. ff_put_v(bc, nut->stream[i].msb_pts_shift);
  395. ff_put_v(bc, nut->stream[i].max_pts_distance);
  396. ff_put_v(bc, codec->has_b_frames);
  397. avio_w8(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
  398. ff_put_v(bc, codec->extradata_size);
  399. avio_write(bc, codec->extradata, codec->extradata_size);
  400. switch (codec->codec_type) {
  401. case AVMEDIA_TYPE_AUDIO:
  402. ff_put_v(bc, codec->sample_rate);
  403. ff_put_v(bc, 1);
  404. ff_put_v(bc, codec->channels);
  405. break;
  406. case AVMEDIA_TYPE_VIDEO:
  407. ff_put_v(bc, codec->width);
  408. ff_put_v(bc, codec->height);
  409. if (st->sample_aspect_ratio.num <= 0 ||
  410. st->sample_aspect_ratio.den <= 0) {
  411. ff_put_v(bc, 0);
  412. ff_put_v(bc, 0);
  413. } else {
  414. ff_put_v(bc, st->sample_aspect_ratio.num);
  415. ff_put_v(bc, st->sample_aspect_ratio.den);
  416. }
  417. ff_put_v(bc, 0); /* csp type -- unknown */
  418. break;
  419. default:
  420. break;
  421. }
  422. return 0;
  423. }
  424. static int add_info(AVIOContext *bc, const char *type, const char *value)
  425. {
  426. put_str(bc, type);
  427. put_s(bc, -1);
  428. put_str(bc, value);
  429. return 1;
  430. }
  431. static int write_globalinfo(NUTContext *nut, AVIOContext *bc)
  432. {
  433. AVFormatContext *s = nut->avf;
  434. AVDictionaryEntry *t = NULL;
  435. AVIOContext *dyn_bc;
  436. uint8_t *dyn_buf = NULL;
  437. int count = 0, dyn_size;
  438. int ret = avio_open_dyn_buf(&dyn_bc);
  439. if (ret < 0)
  440. return ret;
  441. ff_standardize_creation_time(s);
  442. while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  443. count += add_info(dyn_bc, t->key, t->value);
  444. ff_put_v(bc, 0); //stream_if_plus1
  445. ff_put_v(bc, 0); //chapter_id
  446. ff_put_v(bc, 0); //timestamp_start
  447. ff_put_v(bc, 0); //length
  448. ff_put_v(bc, count);
  449. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  450. avio_write(bc, dyn_buf, dyn_size);
  451. av_free(dyn_buf);
  452. return 0;
  453. }
  454. static int write_streaminfo(NUTContext *nut, AVIOContext *bc, int stream_id) {
  455. AVFormatContext *s= nut->avf;
  456. AVStream* st = s->streams[stream_id];
  457. AVDictionaryEntry *t = NULL;
  458. AVIOContext *dyn_bc;
  459. uint8_t *dyn_buf=NULL;
  460. int count=0, dyn_size, i;
  461. int ret = avio_open_dyn_buf(&dyn_bc);
  462. if (ret < 0)
  463. return ret;
  464. while ((t = av_dict_get(st->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  465. count += add_info(dyn_bc, t->key, t->value);
  466. for (i=0; ff_nut_dispositions[i].flag; ++i) {
  467. if (st->disposition & ff_nut_dispositions[i].flag)
  468. count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
  469. }
  470. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  471. uint8_t buf[256];
  472. if (st->r_frame_rate.num>0 && st->r_frame_rate.den>0)
  473. snprintf(buf, sizeof(buf), "%d/%d", st->r_frame_rate.num, st->r_frame_rate.den);
  474. else
  475. snprintf(buf, sizeof(buf), "%d/%d", st->codec->time_base.den, st->codec->time_base.num);
  476. count += add_info(dyn_bc, "r_frame_rate", buf);
  477. }
  478. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  479. if (count) {
  480. ff_put_v(bc, stream_id + 1); //stream_id_plus1
  481. ff_put_v(bc, 0); //chapter_id
  482. ff_put_v(bc, 0); //timestamp_start
  483. ff_put_v(bc, 0); //length
  484. ff_put_v(bc, count);
  485. avio_write(bc, dyn_buf, dyn_size);
  486. }
  487. av_free(dyn_buf);
  488. return count;
  489. }
  490. static int write_chapter(NUTContext *nut, AVIOContext *bc, int id)
  491. {
  492. AVIOContext *dyn_bc;
  493. uint8_t *dyn_buf = NULL;
  494. AVDictionaryEntry *t = NULL;
  495. AVChapter *ch = nut->avf->chapters[id];
  496. int ret, dyn_size, count = 0;
  497. ret = avio_open_dyn_buf(&dyn_bc);
  498. if (ret < 0)
  499. return ret;
  500. ff_put_v(bc, 0); // stream_id_plus1
  501. put_s(bc, id + 1); // chapter_id
  502. put_tt(nut, nut->chapter[id].time_base, bc, ch->start); // chapter_start
  503. ff_put_v(bc, ch->end - ch->start); // chapter_len
  504. while ((t = av_dict_get(ch->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  505. count += add_info(dyn_bc, t->key, t->value);
  506. ff_put_v(bc, count);
  507. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  508. avio_write(bc, dyn_buf, dyn_size);
  509. av_freep(&dyn_buf);
  510. return 0;
  511. }
  512. static int write_index(NUTContext *nut, AVIOContext *bc) {
  513. int i;
  514. Syncpoint dummy= { .pos= 0 };
  515. Syncpoint *next_node[2] = { NULL };
  516. int64_t startpos = avio_tell(bc);
  517. int64_t payload_size;
  518. put_tt(nut, nut->max_pts_tb, bc, nut->max_pts);
  519. ff_put_v(bc, nut->sp_count);
  520. for (i=0; i<nut->sp_count; i++) {
  521. av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pos_cmp, (void**)next_node);
  522. ff_put_v(bc, (next_node[1]->pos >> 4) - (dummy.pos>>4));
  523. dummy.pos = next_node[1]->pos;
  524. }
  525. for (i=0; i<nut->avf->nb_streams; i++) {
  526. StreamContext *nus= &nut->stream[i];
  527. int64_t last_pts= -1;
  528. int j, k;
  529. for (j=0; j<nut->sp_count; j++) {
  530. int flag;
  531. int n = 0;
  532. if (j && nus->keyframe_pts[j] == nus->keyframe_pts[j-1]) {
  533. av_log(nut->avf, AV_LOG_WARNING, "Multiple keyframes with same PTS\n");
  534. nus->keyframe_pts[j] = AV_NOPTS_VALUE;
  535. }
  536. flag = (nus->keyframe_pts[j] != AV_NOPTS_VALUE) ^ (j+1 == nut->sp_count);
  537. for (; j<nut->sp_count && (nus->keyframe_pts[j] != AV_NOPTS_VALUE) == flag; j++)
  538. n++;
  539. ff_put_v(bc, 1 + 2*flag + 4*n);
  540. for (k= j - n; k<=j && k<nut->sp_count; k++) {
  541. if (nus->keyframe_pts[k] == AV_NOPTS_VALUE)
  542. continue;
  543. av_assert0(nus->keyframe_pts[k] > last_pts);
  544. ff_put_v(bc, nus->keyframe_pts[k] - last_pts);
  545. last_pts = nus->keyframe_pts[k];
  546. }
  547. }
  548. }
  549. payload_size = avio_tell(bc) - startpos + 8 + 4;
  550. avio_wb64(bc, 8 + payload_size + av_log2(payload_size) / 7 + 1 + 4*(payload_size > 4096));
  551. return 0;
  552. }
  553. static int write_headers(AVFormatContext *avctx, AVIOContext *bc)
  554. {
  555. NUTContext *nut = avctx->priv_data;
  556. AVIOContext *dyn_bc;
  557. int i, ret;
  558. ff_metadata_conv_ctx(avctx, ff_nut_metadata_conv, NULL);
  559. ret = avio_open_dyn_buf(&dyn_bc);
  560. if (ret < 0)
  561. return ret;
  562. write_mainheader(nut, dyn_bc);
  563. put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
  564. for (i = 0; i < nut->avf->nb_streams; i++) {
  565. ret = avio_open_dyn_buf(&dyn_bc);
  566. if (ret < 0)
  567. return ret;
  568. ret = write_streamheader(avctx, dyn_bc, nut->avf->streams[i], i);
  569. if (ret < 0)
  570. return ret;
  571. put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
  572. }
  573. ret = avio_open_dyn_buf(&dyn_bc);
  574. if (ret < 0)
  575. return ret;
  576. write_globalinfo(nut, dyn_bc);
  577. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  578. for (i = 0; i < nut->avf->nb_streams; i++) {
  579. ret = avio_open_dyn_buf(&dyn_bc);
  580. if (ret < 0)
  581. return ret;
  582. ret = write_streaminfo(nut, dyn_bc, i);
  583. if (ret < 0)
  584. return ret;
  585. if (ret > 0)
  586. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  587. else
  588. ffio_free_dyn_buf(&dyn_bc);
  589. }
  590. for (i = 0; i < nut->avf->nb_chapters; i++) {
  591. ret = avio_open_dyn_buf(&dyn_bc);
  592. if (ret < 0)
  593. return ret;
  594. ret = write_chapter(nut, dyn_bc, i);
  595. if (ret < 0) {
  596. ffio_free_dyn_buf(&dyn_bc);
  597. return ret;
  598. }
  599. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  600. }
  601. nut->last_syncpoint_pos = INT_MIN;
  602. nut->header_count++;
  603. return 0;
  604. }
  605. static int nut_write_header(AVFormatContext *s)
  606. {
  607. NUTContext *nut = s->priv_data;
  608. AVIOContext *bc = s->pb;
  609. int i, j, ret;
  610. nut->avf = s;
  611. nut->version = FFMAX(NUT_STABLE_VERSION, 3 + !!nut->flags);
  612. if (nut->version > 3 && s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  613. av_log(s, AV_LOG_ERROR,
  614. "The additional syncpoint modes require version %d, "
  615. "that is currently not finalized, "
  616. "please set -f_strict experimental in order to enable it.\n",
  617. nut->version);
  618. return AVERROR_EXPERIMENTAL;
  619. }
  620. nut->stream = av_calloc(s->nb_streams, sizeof(*nut->stream ));
  621. nut->chapter = av_calloc(s->nb_chapters, sizeof(*nut->chapter));
  622. nut->time_base= av_calloc(s->nb_streams +
  623. s->nb_chapters, sizeof(*nut->time_base));
  624. if (!nut->stream || !nut->chapter || !nut->time_base) {
  625. av_freep(&nut->stream);
  626. av_freep(&nut->chapter);
  627. av_freep(&nut->time_base);
  628. return AVERROR(ENOMEM);
  629. }
  630. for (i = 0; i < s->nb_streams; i++) {
  631. AVStream *st = s->streams[i];
  632. int ssize;
  633. AVRational time_base;
  634. ff_parse_specific_params(st, &time_base.den, &ssize, &time_base.num);
  635. if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->codec->sample_rate) {
  636. time_base = (AVRational) {1, st->codec->sample_rate};
  637. } else {
  638. time_base = ff_choose_timebase(s, st, 48000);
  639. }
  640. avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
  641. for (j = 0; j < nut->time_base_count; j++)
  642. if (!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))) {
  643. break;
  644. }
  645. nut->time_base[j] = time_base;
  646. nut->stream[i].time_base = &nut->time_base[j];
  647. if (j == nut->time_base_count)
  648. nut->time_base_count++;
  649. if (INT64_C(1000) * time_base.num >= time_base.den)
  650. nut->stream[i].msb_pts_shift = 7;
  651. else
  652. nut->stream[i].msb_pts_shift = 14;
  653. nut->stream[i].max_pts_distance =
  654. FFMAX(time_base.den, time_base.num) / time_base.num;
  655. }
  656. for (i = 0; i < s->nb_chapters; i++) {
  657. AVChapter *ch = s->chapters[i];
  658. for (j = 0; j < nut->time_base_count; j++)
  659. if (!memcmp(&ch->time_base, &nut->time_base[j], sizeof(AVRational)))
  660. break;
  661. nut->time_base[j] = ch->time_base;
  662. nut->chapter[i].time_base = &nut->time_base[j];
  663. if (j == nut->time_base_count)
  664. nut->time_base_count++;
  665. }
  666. nut->max_distance = MAX_DISTANCE;
  667. build_elision_headers(s);
  668. build_frame_code(s);
  669. av_assert0(nut->frame_code['N'].flags == FLAG_INVALID);
  670. avio_write(bc, ID_STRING, strlen(ID_STRING));
  671. avio_w8(bc, 0);
  672. if ((ret = write_headers(s, bc)) < 0)
  673. return ret;
  674. if (s->avoid_negative_ts < 0)
  675. s->avoid_negative_ts = 1;
  676. avio_flush(bc);
  677. return 0;
  678. }
  679. static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc,
  680. AVPacket *pkt)
  681. {
  682. int flags = 0;
  683. if (pkt->flags & AV_PKT_FLAG_KEY)
  684. flags |= FLAG_KEY;
  685. if (pkt->stream_index != fc->stream_id)
  686. flags |= FLAG_STREAM_ID;
  687. if (pkt->size / fc->size_mul)
  688. flags |= FLAG_SIZE_MSB;
  689. if (pkt->pts - nus->last_pts != fc->pts_delta)
  690. flags |= FLAG_CODED_PTS;
  691. if (pkt->side_data_elems && nut->version > 3)
  692. flags |= FLAG_SM_DATA;
  693. if (pkt->size > 2 * nut->max_distance)
  694. flags |= FLAG_CHECKSUM;
  695. if (FFABS(pkt->pts - nus->last_pts) > nus->max_pts_distance)
  696. flags |= FLAG_CHECKSUM;
  697. if (pkt->size < nut->header_len[fc->header_idx] ||
  698. (pkt->size > 4096 && fc->header_idx) ||
  699. memcmp(pkt->data, nut->header[fc->header_idx],
  700. nut->header_len[fc->header_idx]))
  701. flags |= FLAG_HEADER_IDX;
  702. return flags | (fc->flags & FLAG_CODED);
  703. }
  704. static int find_best_header_idx(NUTContext *nut, AVPacket *pkt)
  705. {
  706. int i;
  707. int best_i = 0;
  708. int best_len = 0;
  709. if (pkt->size > 4096)
  710. return 0;
  711. for (i = 1; i < nut->header_count; i++)
  712. if (pkt->size >= nut->header_len[i]
  713. && nut->header_len[i] > best_len
  714. && !memcmp(pkt->data, nut->header[i], nut->header_len[i])) {
  715. best_i = i;
  716. best_len = nut->header_len[i];
  717. }
  718. return best_i;
  719. }
  720. static int write_sm_data(AVFormatContext *s, AVIOContext *bc, AVPacket *pkt, int is_meta)
  721. {
  722. int ret, i, dyn_size;
  723. unsigned flags;
  724. AVIOContext *dyn_bc;
  725. int sm_data_count = 0;
  726. uint8_t tmp[256];
  727. uint8_t *dyn_buf;
  728. ret = avio_open_dyn_buf(&dyn_bc);
  729. if (ret < 0)
  730. return ret;
  731. for (i = 0; i<pkt->side_data_elems; i++) {
  732. const uint8_t *data = pkt->side_data[i].data;
  733. int size = pkt->side_data[i].size;
  734. const uint8_t *data_end = data + size;
  735. if (is_meta) {
  736. if ( pkt->side_data[i].type == AV_PKT_DATA_METADATA_UPDATE
  737. || pkt->side_data[i].type == AV_PKT_DATA_STRINGS_METADATA) {
  738. if (!size || data[size-1]) {
  739. ret = AVERROR(EINVAL);
  740. goto fail;
  741. }
  742. while (data < data_end) {
  743. const uint8_t *key = data;
  744. const uint8_t *val = data + strlen(key) + 1;
  745. if(val >= data_end) {
  746. ret = AVERROR(EINVAL);
  747. goto fail;
  748. }
  749. put_str(dyn_bc, key);
  750. put_s(dyn_bc, -1);
  751. put_str(dyn_bc, val);
  752. data = val + strlen(val) + 1;
  753. sm_data_count++;
  754. }
  755. }
  756. } else {
  757. switch (pkt->side_data[i].type) {
  758. case AV_PKT_DATA_PALETTE:
  759. case AV_PKT_DATA_NEW_EXTRADATA:
  760. case AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL:
  761. default:
  762. if (pkt->side_data[i].type == AV_PKT_DATA_PALETTE) {
  763. put_str(dyn_bc, "Palette");
  764. } else if(pkt->side_data[i].type == AV_PKT_DATA_NEW_EXTRADATA) {
  765. put_str(dyn_bc, "Extradata");
  766. } else if(pkt->side_data[i].type == AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL) {
  767. snprintf(tmp, sizeof(tmp), "CodecSpecificSide%"PRId64"", AV_RB64(data));
  768. put_str(dyn_bc, tmp);
  769. } else {
  770. snprintf(tmp, sizeof(tmp), "UserData%s-SD-%d",
  771. (s->flags & AVFMT_FLAG_BITEXACT) ? "Lavf" : LIBAVFORMAT_IDENT,
  772. pkt->side_data[i].type);
  773. put_str(dyn_bc, tmp);
  774. }
  775. put_s(dyn_bc, -2);
  776. put_str(dyn_bc, "bin");
  777. ff_put_v(dyn_bc, pkt->side_data[i].size);
  778. avio_write(dyn_bc, data, pkt->side_data[i].size);
  779. sm_data_count++;
  780. break;
  781. case AV_PKT_DATA_PARAM_CHANGE:
  782. flags = bytestream_get_le32(&data);
  783. if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
  784. put_str(dyn_bc, "Channels");
  785. put_s(dyn_bc, bytestream_get_le32(&data));
  786. sm_data_count++;
  787. }
  788. if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
  789. put_str(dyn_bc, "ChannelLayout");
  790. put_s(dyn_bc, -2);
  791. put_str(dyn_bc, "u64");
  792. ff_put_v(bc, 8);
  793. avio_write(dyn_bc, data, 8); data+=8;
  794. sm_data_count++;
  795. }
  796. if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
  797. put_str(dyn_bc, "SampleRate");
  798. put_s(dyn_bc, bytestream_get_le32(&data));
  799. sm_data_count++;
  800. }
  801. if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
  802. put_str(dyn_bc, "Width");
  803. put_s(dyn_bc, bytestream_get_le32(&data));
  804. put_str(dyn_bc, "Height");
  805. put_s(dyn_bc, bytestream_get_le32(&data));
  806. sm_data_count+=2;
  807. }
  808. break;
  809. case AV_PKT_DATA_SKIP_SAMPLES:
  810. if (AV_RL32(data)) {
  811. put_str(dyn_bc, "SkipStart");
  812. put_s(dyn_bc, (unsigned)AV_RL32(data));
  813. sm_data_count++;
  814. }
  815. if (AV_RL32(data+4)) {
  816. put_str(dyn_bc, "SkipEnd");
  817. put_s(dyn_bc, (unsigned)AV_RL32(data+4));
  818. sm_data_count++;
  819. }
  820. break;
  821. case AV_PKT_DATA_METADATA_UPDATE:
  822. case AV_PKT_DATA_STRINGS_METADATA:
  823. case AV_PKT_DATA_QUALITY_STATS:
  824. // belongs into meta, not side data
  825. break;
  826. }
  827. }
  828. }
  829. fail:
  830. ff_put_v(bc, sm_data_count);
  831. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  832. avio_write(bc, dyn_buf, dyn_size);
  833. av_freep(&dyn_buf);
  834. return ret;
  835. }
  836. static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
  837. {
  838. NUTContext *nut = s->priv_data;
  839. StreamContext *nus = &nut->stream[pkt->stream_index];
  840. AVIOContext *bc = s->pb, *dyn_bc, *sm_bc = NULL;
  841. FrameCode *fc;
  842. int64_t coded_pts;
  843. int best_length, frame_code, flags, needed_flags, i, header_idx;
  844. int best_header_idx;
  845. int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
  846. int store_sp = 0;
  847. int ret = 0;
  848. int sm_size = 0;
  849. int data_size = pkt->size;
  850. uint8_t *sm_buf = NULL;
  851. if (pkt->pts < 0) {
  852. av_log(s, AV_LOG_ERROR,
  853. "Negative pts not supported stream %d, pts %"PRId64"\n",
  854. pkt->stream_index, pkt->pts);
  855. if (pkt->pts == AV_NOPTS_VALUE)
  856. av_log(s, AV_LOG_ERROR, "Try to enable the genpts flag\n");
  857. return AVERROR(EINVAL);
  858. }
  859. if (pkt->side_data_elems && nut->version > 3) {
  860. ret = avio_open_dyn_buf(&sm_bc);
  861. if (ret < 0)
  862. return ret;
  863. ret = write_sm_data(s, sm_bc, pkt, 0);
  864. if (ret >= 0)
  865. ret = write_sm_data(s, sm_bc, pkt, 1);
  866. sm_size = avio_close_dyn_buf(sm_bc, &sm_buf);
  867. if (ret < 0)
  868. goto fail;
  869. data_size += sm_size;
  870. }
  871. if (1LL << (20 + 3 * nut->header_count) <= avio_tell(bc))
  872. write_headers(s, bc);
  873. if (key_frame && !(nus->last_flags & FLAG_KEY))
  874. store_sp = 1;
  875. if (data_size + 30 /*FIXME check*/ + avio_tell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
  876. store_sp = 1;
  877. //FIXME: Ensure store_sp is 1 in the first place.
  878. if (store_sp &&
  879. (!(nut->flags & NUT_PIPE) || nut->last_syncpoint_pos == INT_MIN)) {
  880. int64_t sp_pos = INT64_MAX;
  881. ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
  882. for (i = 0; i < s->nb_streams; i++) {
  883. AVStream *st = s->streams[i];
  884. int64_t dts_tb = av_rescale_rnd(pkt->dts,
  885. nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
  886. nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
  887. AV_ROUND_DOWN);
  888. int index = av_index_search_timestamp(st, dts_tb,
  889. AVSEEK_FLAG_BACKWARD);
  890. if (index >= 0) {
  891. sp_pos = FFMIN(sp_pos, st->index_entries[index].pos);
  892. if (!nut->write_index && 2*index > st->nb_index_entries) {
  893. memmove(st->index_entries,
  894. st->index_entries + index,
  895. sizeof(*st->index_entries) * (st->nb_index_entries - index));
  896. st->nb_index_entries -= index;
  897. }
  898. }
  899. }
  900. nut->last_syncpoint_pos = avio_tell(bc);
  901. ret = avio_open_dyn_buf(&dyn_bc);
  902. if (ret < 0)
  903. goto fail;
  904. put_tt(nut, nus->time_base, dyn_bc, pkt->dts);
  905. ff_put_v(dyn_bc, sp_pos != INT64_MAX ? (nut->last_syncpoint_pos - sp_pos) >> 4 : 0);
  906. if (nut->flags & NUT_BROADCAST) {
  907. put_tt(nut, nus->time_base, dyn_bc,
  908. av_rescale_q(av_gettime(), AV_TIME_BASE_Q, *nus->time_base));
  909. }
  910. put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
  911. if (nut->write_index) {
  912. if ((ret = ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0 /*unused*/, pkt->dts)) < 0)
  913. goto fail;
  914. if ((1ll<<60) % nut->sp_count == 0)
  915. for (i=0; i<s->nb_streams; i++) {
  916. int j;
  917. StreamContext *nus = &nut->stream[i];
  918. av_reallocp_array(&nus->keyframe_pts, 2*nut->sp_count, sizeof(*nus->keyframe_pts));
  919. if (!nus->keyframe_pts) {
  920. ret = AVERROR(ENOMEM);
  921. goto fail;
  922. }
  923. for (j=nut->sp_count == 1 ? 0 : nut->sp_count; j<2*nut->sp_count; j++)
  924. nus->keyframe_pts[j] = AV_NOPTS_VALUE;
  925. }
  926. }
  927. }
  928. av_assert0(nus->last_pts != AV_NOPTS_VALUE);
  929. coded_pts = pkt->pts & ((1 << nus->msb_pts_shift) - 1);
  930. if (ff_lsb2full(nus, coded_pts) != pkt->pts)
  931. coded_pts = pkt->pts + (1 << nus->msb_pts_shift);
  932. best_header_idx = find_best_header_idx(nut, pkt);
  933. best_length = INT_MAX;
  934. frame_code = -1;
  935. for (i = 0; i < 256; i++) {
  936. int length = 0;
  937. FrameCode *fc = &nut->frame_code[i];
  938. int flags = fc->flags;
  939. if (flags & FLAG_INVALID)
  940. continue;
  941. needed_flags = get_needed_flags(nut, nus, fc, pkt);
  942. if (flags & FLAG_CODED) {
  943. length++;
  944. flags = needed_flags;
  945. }
  946. if ((flags & needed_flags) != needed_flags)
  947. continue;
  948. if ((flags ^ needed_flags) & FLAG_KEY)
  949. continue;
  950. if (flags & FLAG_STREAM_ID)
  951. length += ff_get_v_length(pkt->stream_index);
  952. if (data_size % fc->size_mul != fc->size_lsb)
  953. continue;
  954. if (flags & FLAG_SIZE_MSB)
  955. length += ff_get_v_length(data_size / fc->size_mul);
  956. if (flags & FLAG_CHECKSUM)
  957. length += 4;
  958. if (flags & FLAG_CODED_PTS)
  959. length += ff_get_v_length(coded_pts);
  960. if ( (flags & FLAG_CODED)
  961. && nut->header_len[best_header_idx] > nut->header_len[fc->header_idx] + 1) {
  962. flags |= FLAG_HEADER_IDX;
  963. }
  964. if (flags & FLAG_HEADER_IDX) {
  965. length += 1 - nut->header_len[best_header_idx];
  966. } else {
  967. length -= nut->header_len[fc->header_idx];
  968. }
  969. length *= 4;
  970. length += !(flags & FLAG_CODED_PTS);
  971. length += !(flags & FLAG_CHECKSUM);
  972. if (length < best_length) {
  973. best_length = length;
  974. frame_code = i;
  975. }
  976. }
  977. av_assert0(frame_code != -1);
  978. fc = &nut->frame_code[frame_code];
  979. flags = fc->flags;
  980. needed_flags = get_needed_flags(nut, nus, fc, pkt);
  981. header_idx = fc->header_idx;
  982. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  983. avio_w8(bc, frame_code);
  984. if (flags & FLAG_CODED) {
  985. ff_put_v(bc, (flags ^ needed_flags) & ~(FLAG_CODED));
  986. flags = needed_flags;
  987. }
  988. if (flags & FLAG_STREAM_ID) ff_put_v(bc, pkt->stream_index);
  989. if (flags & FLAG_CODED_PTS) ff_put_v(bc, coded_pts);
  990. if (flags & FLAG_SIZE_MSB ) ff_put_v(bc, data_size / fc->size_mul);
  991. if (flags & FLAG_HEADER_IDX) ff_put_v(bc, header_idx = best_header_idx);
  992. if (flags & FLAG_CHECKSUM) avio_wl32(bc, ffio_get_checksum(bc));
  993. else ffio_get_checksum(bc);
  994. if (flags & FLAG_SM_DATA) {
  995. avio_write(bc, sm_buf, sm_size);
  996. }
  997. avio_write(bc, pkt->data + nut->header_len[header_idx], pkt->size - nut->header_len[header_idx]);
  998. nus->last_flags = flags;
  999. nus->last_pts = pkt->pts;
  1000. //FIXME just store one per syncpoint
  1001. if (flags & FLAG_KEY && !(nut->flags & NUT_PIPE)) {
  1002. av_add_index_entry(
  1003. s->streams[pkt->stream_index],
  1004. nut->last_syncpoint_pos,
  1005. pkt->pts,
  1006. 0,
  1007. 0,
  1008. AVINDEX_KEYFRAME);
  1009. if (nus->keyframe_pts && nus->keyframe_pts[nut->sp_count] == AV_NOPTS_VALUE)
  1010. nus->keyframe_pts[nut->sp_count] = pkt->pts;
  1011. }
  1012. if (!nut->max_pts_tb || av_compare_ts(nut->max_pts, *nut->max_pts_tb, pkt->pts, *nus->time_base) < 0) {
  1013. nut->max_pts = pkt->pts;
  1014. nut->max_pts_tb = nus->time_base;
  1015. }
  1016. fail:
  1017. av_freep(&sm_buf);
  1018. return ret;
  1019. }
  1020. static int nut_write_trailer(AVFormatContext *s)
  1021. {
  1022. NUTContext *nut = s->priv_data;
  1023. AVIOContext *bc = s->pb, *dyn_bc;
  1024. int ret;
  1025. while (nut->header_count < 3)
  1026. write_headers(s, bc);
  1027. ret = avio_open_dyn_buf(&dyn_bc);
  1028. if (ret >= 0 && nut->sp_count) {
  1029. av_assert1(nut->write_index);
  1030. write_index(nut, dyn_bc);
  1031. put_packet(nut, bc, dyn_bc, 1, INDEX_STARTCODE);
  1032. }
  1033. return 0;
  1034. }
  1035. static void nut_write_deinit(AVFormatContext *s)
  1036. {
  1037. NUTContext *nut = s->priv_data;
  1038. int i;
  1039. ff_nut_free_sp(nut);
  1040. if (nut->stream)
  1041. for (i=0; i<s->nb_streams; i++)
  1042. av_freep(&nut->stream[i].keyframe_pts);
  1043. av_freep(&nut->stream);
  1044. av_freep(&nut->chapter);
  1045. av_freep(&nut->time_base);
  1046. }
  1047. #define OFFSET(x) offsetof(NUTContext, x)
  1048. #define E AV_OPT_FLAG_ENCODING_PARAM
  1049. static const AVOption options[] = {
  1050. { "syncpoints", "NUT syncpoint behaviour", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, E, "syncpoints" },
  1051. { "default", "", 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, E, "syncpoints" },
  1052. { "none", "Disable syncpoints, low overhead and unseekable", 0, AV_OPT_TYPE_CONST, {.i64 = NUT_PIPE}, INT_MIN, INT_MAX, E, "syncpoints" },
  1053. { "timestamped", "Extend syncpoints with a wallclock timestamp", 0, AV_OPT_TYPE_CONST, {.i64 = NUT_BROADCAST}, INT_MIN, INT_MAX, E, "syncpoints" },
  1054. { "write_index", "Write index", OFFSET(write_index), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, E, },
  1055. { NULL },
  1056. };
  1057. static const AVClass class = {
  1058. .class_name = "nutenc",
  1059. .item_name = av_default_item_name,
  1060. .option = options,
  1061. .version = LIBAVUTIL_VERSION_INT,
  1062. };
  1063. AVOutputFormat ff_nut_muxer = {
  1064. .name = "nut",
  1065. .long_name = NULL_IF_CONFIG_SMALL("NUT"),
  1066. .mime_type = "video/x-nut",
  1067. .extensions = "nut",
  1068. .priv_data_size = sizeof(NUTContext),
  1069. .audio_codec = CONFIG_LIBVORBIS ? AV_CODEC_ID_VORBIS :
  1070. CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_MP2,
  1071. .video_codec = AV_CODEC_ID_MPEG4,
  1072. .write_header = nut_write_header,
  1073. .write_packet = nut_write_packet,
  1074. .write_trailer = nut_write_trailer,
  1075. .deinit = nut_write_deinit,
  1076. .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
  1077. .codec_tag = ff_nut_codec_tags,
  1078. .priv_class = &class,
  1079. };