You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1175 lines
39KB

  1. /*
  2. * nut muxer
  3. * Copyright (c) 2004-2007 Michael Niedermayer
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdint.h>
  22. #include "libavutil/intreadwrite.h"
  23. #include "libavutil/mathematics.h"
  24. #include "libavutil/tree.h"
  25. #include "libavutil/dict.h"
  26. #include "libavutil/avassert.h"
  27. #include "libavcodec/bytestream.h"
  28. #include "libavcodec/mpegaudiodata.h"
  29. #include "nut.h"
  30. #include "internal.h"
  31. #include "avio_internal.h"
  32. #include "riff.h"
  33. static int find_expected_header(AVCodecContext *c, int size, int key_frame,
  34. uint8_t out[64])
  35. {
  36. int sample_rate = c->sample_rate;
  37. if (size > 4096)
  38. return 0;
  39. AV_WB24(out, 1);
  40. if (c->codec_id == AV_CODEC_ID_MPEG4) {
  41. if (key_frame) {
  42. return 3;
  43. } else {
  44. out[3] = 0xB6;
  45. return 4;
  46. }
  47. } else if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  48. c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  49. return 3;
  50. } else if (c->codec_id == AV_CODEC_ID_H264) {
  51. return 3;
  52. } else if (c->codec_id == AV_CODEC_ID_MP3 ||
  53. c->codec_id == AV_CODEC_ID_MP2) {
  54. int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
  55. int layer = c->codec_id == AV_CODEC_ID_MP3 ? 3 : 2;
  56. unsigned int header = 0xFFF00000;
  57. lsf = sample_rate < (24000 + 32000) / 2;
  58. mpeg25 = sample_rate < (12000 + 16000) / 2;
  59. sample_rate <<= lsf + mpeg25;
  60. if (sample_rate < (32000 + 44100) / 2) sample_rate_index = 2;
  61. else if (sample_rate < (44100 + 48000) / 2) sample_rate_index = 0;
  62. else sample_rate_index = 1;
  63. sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
  64. for (bitrate_index = 2; bitrate_index < 30; bitrate_index++) {
  65. frame_size =
  66. avpriv_mpa_bitrate_tab[lsf][layer - 1][bitrate_index >> 1];
  67. frame_size = (frame_size * 144000) / (sample_rate << lsf) +
  68. (bitrate_index & 1);
  69. if (frame_size == size)
  70. break;
  71. }
  72. header |= (!lsf) << 19;
  73. header |= (4 - layer) << 17;
  74. header |= 1 << 16; //no crc
  75. AV_WB32(out, header);
  76. if (size <= 0)
  77. return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
  78. if (bitrate_index == 30)
  79. return -1; //something is wrong ...
  80. header |= (bitrate_index >> 1) << 12;
  81. header |= sample_rate_index << 10;
  82. header |= (bitrate_index & 1) << 9;
  83. return 2; //FIXME actually put the needed ones in build_elision_headers()
  84. //return 3; //we guess that the private bit is not set
  85. //FIXME the above assumptions should be checked, if these turn out false too often something should be done
  86. }
  87. return 0;
  88. }
  89. static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size, int frame_type)
  90. {
  91. NUTContext *nut = s->priv_data;
  92. uint8_t out[64];
  93. int i;
  94. int len = find_expected_header(c, size, frame_type, out);
  95. for (i = 1; i < nut->header_count; i++) {
  96. if (len == nut->header_len[i] && !memcmp(out, nut->header[i], len)) {
  97. return i;
  98. }
  99. }
  100. return 0;
  101. }
  102. static void build_elision_headers(AVFormatContext *s)
  103. {
  104. NUTContext *nut = s->priv_data;
  105. int i;
  106. //FIXME this is lame
  107. //FIXME write a 2pass mode to find the maximal headers
  108. static const uint8_t headers[][5] = {
  109. { 3, 0x00, 0x00, 0x01 },
  110. { 4, 0x00, 0x00, 0x01, 0xB6},
  111. { 2, 0xFF, 0xFA }, //mp3+crc
  112. { 2, 0xFF, 0xFB }, //mp3
  113. { 2, 0xFF, 0xFC }, //mp2+crc
  114. { 2, 0xFF, 0xFD }, //mp2
  115. };
  116. nut->header_count = 7;
  117. for (i = 1; i < nut->header_count; i++) {
  118. nut->header_len[i] = headers[i - 1][0];
  119. nut->header[i] = &headers[i - 1][1];
  120. }
  121. }
  122. static void build_frame_code(AVFormatContext *s)
  123. {
  124. NUTContext *nut = s->priv_data;
  125. int key_frame, index, pred, stream_id;
  126. int start = 1;
  127. int end = 254;
  128. int keyframe_0_esc = s->nb_streams > 2;
  129. int pred_table[10];
  130. FrameCode *ft;
  131. ft = &nut->frame_code[start];
  132. ft->flags = FLAG_CODED;
  133. ft->size_mul = 1;
  134. ft->pts_delta = 1;
  135. start++;
  136. if (keyframe_0_esc) {
  137. /* keyframe = 0 escape */
  138. FrameCode *ft = &nut->frame_code[start];
  139. ft->flags = FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
  140. ft->size_mul = 1;
  141. start++;
  142. }
  143. for (stream_id = 0; stream_id < s->nb_streams; stream_id++) {
  144. int start2 = start + (end - start) * stream_id / s->nb_streams;
  145. int end2 = start + (end - start) * (stream_id + 1) / s->nb_streams;
  146. AVCodecContext *codec = s->streams[stream_id]->codec;
  147. int is_audio = codec->codec_type == AVMEDIA_TYPE_AUDIO;
  148. int intra_only = /*codec->intra_only || */ is_audio;
  149. int pred_count;
  150. int frame_size = 0;
  151. if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
  152. frame_size = av_get_audio_frame_duration(codec, 0);
  153. if (codec->codec_id == AV_CODEC_ID_VORBIS && !frame_size)
  154. frame_size = 64;
  155. } else {
  156. AVRational f = av_div_q(codec->time_base, *nut->stream[stream_id].time_base);
  157. if (f.den == 1 && f.num>0)
  158. frame_size = f.num;
  159. }
  160. if (!frame_size)
  161. frame_size = 1;
  162. for (key_frame = 0; key_frame < 2; key_frame++) {
  163. if (!intra_only || !keyframe_0_esc || key_frame != 0) {
  164. FrameCode *ft = &nut->frame_code[start2];
  165. ft->flags = FLAG_KEY * key_frame;
  166. ft->flags |= FLAG_SIZE_MSB | FLAG_CODED_PTS;
  167. ft->stream_id = stream_id;
  168. ft->size_mul = 1;
  169. if (is_audio)
  170. ft->header_idx = find_header_idx(s, codec, -1, key_frame);
  171. start2++;
  172. }
  173. }
  174. key_frame = intra_only;
  175. #if 1
  176. if (is_audio) {
  177. int frame_bytes = codec->frame_size * (int64_t)codec->bit_rate /
  178. (8 * codec->sample_rate);
  179. int pts;
  180. for (pts = 0; pts < 2; pts++) {
  181. for (pred = 0; pred < 2; pred++) {
  182. FrameCode *ft = &nut->frame_code[start2];
  183. ft->flags = FLAG_KEY * key_frame;
  184. ft->stream_id = stream_id;
  185. ft->size_mul = frame_bytes + 2;
  186. ft->size_lsb = frame_bytes + pred;
  187. ft->pts_delta = pts * frame_size;
  188. ft->header_idx = find_header_idx(s, codec, frame_bytes + pred, key_frame);
  189. start2++;
  190. }
  191. }
  192. } else {
  193. FrameCode *ft = &nut->frame_code[start2];
  194. ft->flags = FLAG_KEY | FLAG_SIZE_MSB;
  195. ft->stream_id = stream_id;
  196. ft->size_mul = 1;
  197. ft->pts_delta = frame_size;
  198. start2++;
  199. }
  200. #endif
  201. if (codec->has_b_frames) {
  202. pred_count = 5;
  203. pred_table[0] = -2;
  204. pred_table[1] = -1;
  205. pred_table[2] = 1;
  206. pred_table[3] = 3;
  207. pred_table[4] = 4;
  208. } else if (codec->codec_id == AV_CODEC_ID_VORBIS) {
  209. pred_count = 3;
  210. pred_table[0] = 2;
  211. pred_table[1] = 9;
  212. pred_table[2] = 16;
  213. } else {
  214. pred_count = 1;
  215. pred_table[0] = 1;
  216. }
  217. for (pred = 0; pred < pred_count; pred++) {
  218. int start3 = start2 + (end2 - start2) * pred / pred_count;
  219. int end3 = start2 + (end2 - start2) * (pred + 1) / pred_count;
  220. pred_table[pred] *= frame_size;
  221. for (index = start3; index < end3; index++) {
  222. FrameCode *ft = &nut->frame_code[index];
  223. ft->flags = FLAG_KEY * key_frame;
  224. ft->flags |= FLAG_SIZE_MSB;
  225. ft->stream_id = stream_id;
  226. //FIXME use single byte size and pred from last
  227. ft->size_mul = end3 - start3;
  228. ft->size_lsb = index - start3;
  229. ft->pts_delta = pred_table[pred];
  230. if (is_audio)
  231. ft->header_idx = find_header_idx(s, codec, -1, key_frame);
  232. }
  233. }
  234. }
  235. memmove(&nut->frame_code['N' + 1], &nut->frame_code['N'], sizeof(FrameCode) * (255 - 'N'));
  236. nut->frame_code[0].flags =
  237. nut->frame_code[255].flags =
  238. nut->frame_code['N'].flags = FLAG_INVALID;
  239. }
  240. static void put_tt(NUTContext *nut, AVRational *time_base, AVIOContext *bc, uint64_t val)
  241. {
  242. val *= nut->time_base_count;
  243. val += time_base - nut->time_base;
  244. ff_put_v(bc, val);
  245. }
  246. /**
  247. * Store a string as vb.
  248. */
  249. static void put_str(AVIOContext *bc, const char *string)
  250. {
  251. int len = strlen(string);
  252. ff_put_v(bc, len);
  253. avio_write(bc, string, len);
  254. }
  255. static void put_s(AVIOContext *bc, int64_t val)
  256. {
  257. ff_put_v(bc, 2 * FFABS(val) - (val > 0));
  258. }
  259. #ifdef TRACE
  260. static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, const char *file,
  261. const char *func, int line)
  262. {
  263. av_log(NULL, AV_LOG_DEBUG, "ff_put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  264. ff_put_v(bc, v);
  265. }
  266. static inline void put_s_trace(AVIOContext *bc, int64_t v, const char *file, const char *func, int line)
  267. {
  268. av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  269. put_s(bc, v);
  270. }
  271. #define ff_put_v(bc, v) ff_put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  272. #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  273. #endif
  274. //FIXME remove calculate_checksum
  275. static void put_packet(NUTContext *nut, AVIOContext *bc, AVIOContext *dyn_bc,
  276. int calculate_checksum, uint64_t startcode)
  277. {
  278. uint8_t *dyn_buf = NULL;
  279. int dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  280. int forw_ptr = dyn_size + 4 * calculate_checksum;
  281. if (forw_ptr > 4096)
  282. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  283. avio_wb64(bc, startcode);
  284. ff_put_v(bc, forw_ptr);
  285. if (forw_ptr > 4096)
  286. avio_wl32(bc, ffio_get_checksum(bc));
  287. if (calculate_checksum)
  288. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  289. avio_write(bc, dyn_buf, dyn_size);
  290. if (calculate_checksum)
  291. avio_wl32(bc, ffio_get_checksum(bc));
  292. av_free(dyn_buf);
  293. }
  294. static void write_mainheader(NUTContext *nut, AVIOContext *bc)
  295. {
  296. int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields,
  297. tmp_head_idx;
  298. int64_t tmp_match;
  299. ff_put_v(bc, nut->version = NUT_VERSION);
  300. if (nut->version > 3)
  301. ff_put_v(bc, nut->minor_version);
  302. ff_put_v(bc, nut->avf->nb_streams);
  303. ff_put_v(bc, nut->max_distance);
  304. ff_put_v(bc, nut->time_base_count);
  305. for (i = 0; i < nut->time_base_count; i++) {
  306. ff_put_v(bc, nut->time_base[i].num);
  307. ff_put_v(bc, nut->time_base[i].den);
  308. }
  309. tmp_pts = 0;
  310. tmp_mul = 1;
  311. tmp_stream = 0;
  312. tmp_match = 1 - (1LL << 62);
  313. tmp_head_idx = 0;
  314. for (i = 0; i < 256; ) {
  315. tmp_fields = 0;
  316. tmp_size = 0;
  317. // tmp_res=0;
  318. if (tmp_pts != nut->frame_code[i].pts_delta ) tmp_fields = 1;
  319. if (tmp_mul != nut->frame_code[i].size_mul ) tmp_fields = 2;
  320. if (tmp_stream != nut->frame_code[i].stream_id ) tmp_fields = 3;
  321. if (tmp_size != nut->frame_code[i].size_lsb ) tmp_fields = 4;
  322. // if (tmp_res != nut->frame_code[i].res ) tmp_fields=5;
  323. if (tmp_head_idx != nut->frame_code[i].header_idx) tmp_fields = 8;
  324. tmp_pts = nut->frame_code[i].pts_delta;
  325. tmp_flags = nut->frame_code[i].flags;
  326. tmp_stream = nut->frame_code[i].stream_id;
  327. tmp_mul = nut->frame_code[i].size_mul;
  328. tmp_size = nut->frame_code[i].size_lsb;
  329. // tmp_res = nut->frame_code[i].res;
  330. tmp_head_idx = nut->frame_code[i].header_idx;
  331. for (j = 0; i < 256; j++, i++) {
  332. if (i == 'N') {
  333. j--;
  334. continue;
  335. }
  336. if (nut->frame_code[i].pts_delta != tmp_pts ||
  337. nut->frame_code[i].flags != tmp_flags ||
  338. nut->frame_code[i].stream_id != tmp_stream ||
  339. nut->frame_code[i].size_mul != tmp_mul ||
  340. nut->frame_code[i].size_lsb != tmp_size + j ||
  341. // nut->frame_code[i].res != tmp_res ||
  342. nut->frame_code[i].header_idx != tmp_head_idx)
  343. break;
  344. }
  345. if (j != tmp_mul - tmp_size)
  346. tmp_fields = 6;
  347. ff_put_v(bc, tmp_flags);
  348. ff_put_v(bc, tmp_fields);
  349. if (tmp_fields > 0) put_s(bc, tmp_pts);
  350. if (tmp_fields > 1) ff_put_v(bc, tmp_mul);
  351. if (tmp_fields > 2) ff_put_v(bc, tmp_stream);
  352. if (tmp_fields > 3) ff_put_v(bc, tmp_size);
  353. if (tmp_fields > 4) ff_put_v(bc, 0 /*tmp_res*/);
  354. if (tmp_fields > 5) ff_put_v(bc, j);
  355. if (tmp_fields > 6) ff_put_v(bc, tmp_match);
  356. if (tmp_fields > 7) ff_put_v(bc, tmp_head_idx);
  357. }
  358. ff_put_v(bc, nut->header_count - 1);
  359. for (i = 1; i < nut->header_count; i++) {
  360. ff_put_v(bc, nut->header_len[i]);
  361. avio_write(bc, nut->header[i], nut->header_len[i]);
  362. }
  363. }
  364. static int write_streamheader(AVFormatContext *avctx, AVIOContext *bc,
  365. AVStream *st, int i)
  366. {
  367. NUTContext *nut = avctx->priv_data;
  368. AVCodecContext *codec = st->codec;
  369. ff_put_v(bc, i);
  370. switch (codec->codec_type) {
  371. case AVMEDIA_TYPE_VIDEO: ff_put_v(bc, 0); break;
  372. case AVMEDIA_TYPE_AUDIO: ff_put_v(bc, 1); break;
  373. case AVMEDIA_TYPE_SUBTITLE: ff_put_v(bc, 2); break;
  374. default: ff_put_v(bc, 3); break;
  375. }
  376. ff_put_v(bc, 4);
  377. if (codec->codec_tag) {
  378. avio_wl32(bc, codec->codec_tag);
  379. } else {
  380. av_log(avctx, AV_LOG_ERROR, "No codec tag defined for stream %d\n", i);
  381. return AVERROR(EINVAL);
  382. }
  383. ff_put_v(bc, nut->stream[i].time_base - nut->time_base);
  384. ff_put_v(bc, nut->stream[i].msb_pts_shift);
  385. ff_put_v(bc, nut->stream[i].max_pts_distance);
  386. ff_put_v(bc, codec->has_b_frames);
  387. avio_w8(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
  388. ff_put_v(bc, codec->extradata_size);
  389. avio_write(bc, codec->extradata, codec->extradata_size);
  390. switch (codec->codec_type) {
  391. case AVMEDIA_TYPE_AUDIO:
  392. ff_put_v(bc, codec->sample_rate);
  393. ff_put_v(bc, 1);
  394. ff_put_v(bc, codec->channels);
  395. break;
  396. case AVMEDIA_TYPE_VIDEO:
  397. ff_put_v(bc, codec->width);
  398. ff_put_v(bc, codec->height);
  399. if (st->sample_aspect_ratio.num <= 0 ||
  400. st->sample_aspect_ratio.den <= 0) {
  401. ff_put_v(bc, 0);
  402. ff_put_v(bc, 0);
  403. } else {
  404. ff_put_v(bc, st->sample_aspect_ratio.num);
  405. ff_put_v(bc, st->sample_aspect_ratio.den);
  406. }
  407. ff_put_v(bc, 0); /* csp type -- unknown */
  408. break;
  409. default:
  410. break;
  411. }
  412. return 0;
  413. }
  414. static int add_info(AVIOContext *bc, const char *type, const char *value)
  415. {
  416. put_str(bc, type);
  417. put_s(bc, -1);
  418. put_str(bc, value);
  419. return 1;
  420. }
  421. static int write_globalinfo(NUTContext *nut, AVIOContext *bc)
  422. {
  423. AVFormatContext *s = nut->avf;
  424. AVDictionaryEntry *t = NULL;
  425. AVIOContext *dyn_bc;
  426. uint8_t *dyn_buf = NULL;
  427. int count = 0, dyn_size;
  428. int ret = avio_open_dyn_buf(&dyn_bc);
  429. if (ret < 0)
  430. return ret;
  431. while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  432. count += add_info(dyn_bc, t->key, t->value);
  433. ff_put_v(bc, 0); //stream_if_plus1
  434. ff_put_v(bc, 0); //chapter_id
  435. ff_put_v(bc, 0); //timestamp_start
  436. ff_put_v(bc, 0); //length
  437. ff_put_v(bc, count);
  438. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  439. avio_write(bc, dyn_buf, dyn_size);
  440. av_free(dyn_buf);
  441. return 0;
  442. }
  443. static int write_streaminfo(NUTContext *nut, AVIOContext *bc, int stream_id) {
  444. AVFormatContext *s= nut->avf;
  445. AVStream* st = s->streams[stream_id];
  446. AVDictionaryEntry *t = NULL;
  447. AVIOContext *dyn_bc;
  448. uint8_t *dyn_buf=NULL;
  449. int count=0, dyn_size, i;
  450. int ret = avio_open_dyn_buf(&dyn_bc);
  451. if (ret < 0)
  452. return ret;
  453. while ((t = av_dict_get(st->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  454. count += add_info(dyn_bc, t->key, t->value);
  455. for (i=0; ff_nut_dispositions[i].flag; ++i) {
  456. if (st->disposition & ff_nut_dispositions[i].flag)
  457. count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
  458. }
  459. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  460. uint8_t buf[256];
  461. snprintf(buf, sizeof(buf), "%d/%d", st->codec->time_base.den, st->codec->time_base.num);
  462. count += add_info(dyn_bc, "r_frame_rate", buf);
  463. }
  464. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  465. if (count) {
  466. ff_put_v(bc, stream_id + 1); //stream_id_plus1
  467. ff_put_v(bc, 0); //chapter_id
  468. ff_put_v(bc, 0); //timestamp_start
  469. ff_put_v(bc, 0); //length
  470. ff_put_v(bc, count);
  471. avio_write(bc, dyn_buf, dyn_size);
  472. }
  473. av_free(dyn_buf);
  474. return count;
  475. }
  476. static int write_chapter(NUTContext *nut, AVIOContext *bc, int id)
  477. {
  478. AVIOContext *dyn_bc;
  479. uint8_t *dyn_buf = NULL;
  480. AVDictionaryEntry *t = NULL;
  481. AVChapter *ch = nut->avf->chapters[id];
  482. int ret, dyn_size, count = 0;
  483. ret = avio_open_dyn_buf(&dyn_bc);
  484. if (ret < 0)
  485. return ret;
  486. ff_put_v(bc, 0); // stream_id_plus1
  487. put_s(bc, id + 1); // chapter_id
  488. put_tt(nut, nut->chapter[id].time_base, bc, ch->start); // chapter_start
  489. ff_put_v(bc, ch->end - ch->start); // chapter_len
  490. while ((t = av_dict_get(ch->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  491. count += add_info(dyn_bc, t->key, t->value);
  492. ff_put_v(bc, count);
  493. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  494. avio_write(bc, dyn_buf, dyn_size);
  495. av_freep(&dyn_buf);
  496. return 0;
  497. }
  498. static int write_index(NUTContext *nut, AVIOContext *bc) {
  499. int i;
  500. Syncpoint dummy= { .pos= 0 };
  501. Syncpoint *next_node[2] = { NULL };
  502. int64_t startpos = avio_tell(bc);
  503. int64_t payload_size;
  504. put_tt(nut, nut->max_pts_tb, bc, nut->max_pts);
  505. ff_put_v(bc, nut->sp_count);
  506. for (i=0; i<nut->sp_count; i++) {
  507. av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp, (void**)next_node);
  508. ff_put_v(bc, (next_node[1]->pos >> 4) - (dummy.pos>>4));
  509. dummy.pos = next_node[1]->pos;
  510. }
  511. for (i=0; i<nut->avf->nb_streams; i++) {
  512. StreamContext *nus= &nut->stream[i];
  513. int64_t last_pts= -1;
  514. int j, k;
  515. for (j=0; j<nut->sp_count; j++) {
  516. int flag;
  517. int n = 0;
  518. if (j && nus->keyframe_pts[j] == nus->keyframe_pts[j-1]) {
  519. av_log(nut->avf, AV_LOG_WARNING, "Multiple keyframes with same PTS\n");
  520. nus->keyframe_pts[j] = AV_NOPTS_VALUE;
  521. }
  522. flag = (nus->keyframe_pts[j] != AV_NOPTS_VALUE) ^ (j+1 == nut->sp_count);
  523. for (; j<nut->sp_count && (nus->keyframe_pts[j] != AV_NOPTS_VALUE) == flag; j++)
  524. n++;
  525. ff_put_v(bc, 1 + 2*flag + 4*n);
  526. for (k= j - n; k<=j && k<nut->sp_count; k++) {
  527. if (nus->keyframe_pts[k] == AV_NOPTS_VALUE)
  528. continue;
  529. av_assert0(nus->keyframe_pts[k] > last_pts);
  530. ff_put_v(bc, nus->keyframe_pts[k] - last_pts);
  531. last_pts = nus->keyframe_pts[k];
  532. }
  533. }
  534. }
  535. payload_size = avio_tell(bc) - startpos + 8 + 4;
  536. avio_wb64(bc, 8 + payload_size + av_log2(payload_size) / 7 + 1 + 4*(payload_size > 4096));
  537. return 0;
  538. }
  539. static int write_headers(AVFormatContext *avctx, AVIOContext *bc)
  540. {
  541. NUTContext *nut = avctx->priv_data;
  542. AVIOContext *dyn_bc;
  543. int i, ret;
  544. ff_metadata_conv_ctx(avctx, ff_nut_metadata_conv, NULL);
  545. ret = avio_open_dyn_buf(&dyn_bc);
  546. if (ret < 0)
  547. return ret;
  548. write_mainheader(nut, dyn_bc);
  549. put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
  550. for (i = 0; i < nut->avf->nb_streams; i++) {
  551. ret = avio_open_dyn_buf(&dyn_bc);
  552. if (ret < 0)
  553. return ret;
  554. ret = write_streamheader(avctx, dyn_bc, nut->avf->streams[i], i);
  555. if (ret < 0)
  556. return ret;
  557. put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
  558. }
  559. ret = avio_open_dyn_buf(&dyn_bc);
  560. if (ret < 0)
  561. return ret;
  562. write_globalinfo(nut, dyn_bc);
  563. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  564. for (i = 0; i < nut->avf->nb_streams; i++) {
  565. ret = avio_open_dyn_buf(&dyn_bc);
  566. if (ret < 0)
  567. return ret;
  568. ret = write_streaminfo(nut, dyn_bc, i);
  569. if (ret < 0)
  570. return ret;
  571. if (ret > 0)
  572. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  573. else {
  574. uint8_t *buf;
  575. avio_close_dyn_buf(dyn_bc, &buf);
  576. av_free(buf);
  577. }
  578. }
  579. for (i = 0; i < nut->avf->nb_chapters; i++) {
  580. ret = avio_open_dyn_buf(&dyn_bc);
  581. if (ret < 0)
  582. return ret;
  583. ret = write_chapter(nut, dyn_bc, i);
  584. if (ret < 0) {
  585. uint8_t *buf;
  586. avio_close_dyn_buf(dyn_bc, &buf);
  587. av_freep(&buf);
  588. return ret;
  589. }
  590. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  591. }
  592. nut->last_syncpoint_pos = INT_MIN;
  593. nut->header_count++;
  594. return 0;
  595. }
  596. static int nut_write_header(AVFormatContext *s)
  597. {
  598. NUTContext *nut = s->priv_data;
  599. AVIOContext *bc = s->pb;
  600. int i, j, ret;
  601. nut->avf = s;
  602. nut->stream = av_calloc(s->nb_streams, sizeof(*nut->stream ));
  603. nut->chapter = av_calloc(s->nb_chapters, sizeof(*nut->chapter));
  604. nut->time_base= av_calloc(s->nb_streams +
  605. s->nb_chapters, sizeof(*nut->time_base));
  606. if (!nut->stream || !nut->chapter || !nut->time_base) {
  607. av_freep(&nut->stream);
  608. av_freep(&nut->chapter);
  609. av_freep(&nut->time_base);
  610. return AVERROR(ENOMEM);
  611. }
  612. for (i = 0; i < s->nb_streams; i++) {
  613. AVStream *st = s->streams[i];
  614. int ssize;
  615. AVRational time_base;
  616. ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
  617. if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->codec->sample_rate) {
  618. time_base = (AVRational) {1, st->codec->sample_rate};
  619. } else {
  620. time_base = ff_choose_timebase(s, st, 48000);
  621. }
  622. avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
  623. for (j = 0; j < nut->time_base_count; j++)
  624. if (!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))) {
  625. break;
  626. }
  627. nut->time_base[j] = time_base;
  628. nut->stream[i].time_base = &nut->time_base[j];
  629. if (j == nut->time_base_count)
  630. nut->time_base_count++;
  631. if (INT64_C(1000) * time_base.num >= time_base.den)
  632. nut->stream[i].msb_pts_shift = 7;
  633. else
  634. nut->stream[i].msb_pts_shift = 14;
  635. nut->stream[i].max_pts_distance =
  636. FFMAX(time_base.den, time_base.num) / time_base.num;
  637. }
  638. for (i = 0; i < s->nb_chapters; i++) {
  639. AVChapter *ch = s->chapters[i];
  640. for (j = 0; j < nut->time_base_count; j++)
  641. if (!memcmp(&ch->time_base, &nut->time_base[j], sizeof(AVRational)))
  642. break;
  643. nut->time_base[j] = ch->time_base;
  644. nut->chapter[i].time_base = &nut->time_base[j];
  645. if (j == nut->time_base_count)
  646. nut->time_base_count++;
  647. }
  648. nut->max_distance = MAX_DISTANCE;
  649. build_elision_headers(s);
  650. build_frame_code(s);
  651. av_assert0(nut->frame_code['N'].flags == FLAG_INVALID);
  652. avio_write(bc, ID_STRING, strlen(ID_STRING));
  653. avio_w8(bc, 0);
  654. if ((ret = write_headers(s, bc)) < 0)
  655. return ret;
  656. if (s->avoid_negative_ts < 0)
  657. s->avoid_negative_ts = 1;
  658. avio_flush(bc);
  659. return 0;
  660. }
  661. static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc,
  662. AVPacket *pkt)
  663. {
  664. int flags = 0;
  665. if (pkt->flags & AV_PKT_FLAG_KEY)
  666. flags |= FLAG_KEY;
  667. if (pkt->stream_index != fc->stream_id)
  668. flags |= FLAG_STREAM_ID;
  669. if (pkt->size / fc->size_mul)
  670. flags |= FLAG_SIZE_MSB;
  671. if (pkt->pts - nus->last_pts != fc->pts_delta)
  672. flags |= FLAG_CODED_PTS;
  673. if (pkt->side_data_elems && nut->version > 3)
  674. flags |= FLAG_SM_DATA;
  675. if (pkt->size > 2 * nut->max_distance)
  676. flags |= FLAG_CHECKSUM;
  677. if (FFABS(pkt->pts - nus->last_pts) > nus->max_pts_distance)
  678. flags |= FLAG_CHECKSUM;
  679. if (pkt->size < nut->header_len[fc->header_idx] ||
  680. (pkt->size > 4096 && fc->header_idx) ||
  681. memcmp(pkt->data, nut->header[fc->header_idx],
  682. nut->header_len[fc->header_idx]))
  683. flags |= FLAG_HEADER_IDX;
  684. return flags | (fc->flags & FLAG_CODED);
  685. }
  686. static int find_best_header_idx(NUTContext *nut, AVPacket *pkt)
  687. {
  688. int i;
  689. int best_i = 0;
  690. int best_len = 0;
  691. if (pkt->size > 4096)
  692. return 0;
  693. for (i = 1; i < nut->header_count; i++)
  694. if (pkt->size >= nut->header_len[i]
  695. && nut->header_len[i] > best_len
  696. && !memcmp(pkt->data, nut->header[i], nut->header_len[i])) {
  697. best_i = i;
  698. best_len = nut->header_len[i];
  699. }
  700. return best_i;
  701. }
  702. static int write_sm_data(AVFormatContext *s, AVIOContext *bc, AVPacket *pkt, int is_meta)
  703. {
  704. AVStream *st = s->streams[pkt->stream_index];
  705. int ret, i, dyn_size;
  706. unsigned flags;
  707. AVIOContext *dyn_bc;
  708. int sm_data_count = 0;
  709. uint8_t tmp[256];
  710. uint8_t *dyn_buf;
  711. ret = avio_open_dyn_buf(&dyn_bc);
  712. if (ret < 0)
  713. return ret;
  714. for (i = 0; i<pkt->side_data_elems; i++) {
  715. const uint8_t *data = pkt->side_data[i].data;
  716. int size = pkt->side_data[i].size;
  717. const uint8_t *data_end = data + size;
  718. if (is_meta) {
  719. if ( pkt->side_data[i].type == AV_PKT_DATA_METADATA_UPDATE
  720. || pkt->side_data[i].type == AV_PKT_DATA_STRINGS_METADATA) {
  721. if (!size || data[size-1])
  722. return AVERROR(EINVAL);
  723. while (data < data_end) {
  724. const uint8_t *key = data;
  725. const uint8_t *val = data + strlen(key) + 1;
  726. if(val >= data_end)
  727. return AVERROR(EINVAL);
  728. put_str(dyn_bc, key);
  729. put_s(dyn_bc, -1);
  730. put_str(dyn_bc, val);
  731. data = val + strlen(val) + 1;
  732. sm_data_count++;
  733. }
  734. }
  735. } else {
  736. switch (pkt->side_data[i].type) {
  737. case AV_PKT_DATA_PALETTE:
  738. case AV_PKT_DATA_NEW_EXTRADATA:
  739. case AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL:
  740. default:
  741. if (pkt->side_data[i].type == AV_PKT_DATA_PALETTE) {
  742. put_str(dyn_bc, "Palette");
  743. } else if(pkt->side_data[i].type == AV_PKT_DATA_NEW_EXTRADATA) {
  744. put_str(dyn_bc, "Extradata");
  745. } else if(pkt->side_data[i].type == AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL) {
  746. snprintf(tmp, sizeof(tmp), "CodecSpecificSide%"PRId64"", AV_RB64(data));
  747. put_str(dyn_bc, tmp);
  748. } else {
  749. snprintf(tmp, sizeof(tmp), "UserData%s-SD-%d",
  750. (st->codec->flags & CODEC_FLAG_BITEXACT) ? "Lavf" : LIBAVFORMAT_IDENT,
  751. pkt->side_data[i].type);
  752. put_str(dyn_bc, tmp);
  753. }
  754. put_s(dyn_bc, -2);
  755. put_str(dyn_bc, "bin");
  756. ff_put_v(dyn_bc, pkt->side_data[i].size);
  757. avio_write(dyn_bc, data, pkt->side_data[i].size);
  758. sm_data_count++;
  759. break;
  760. case AV_PKT_DATA_PARAM_CHANGE:
  761. flags = bytestream_get_le32(&data);
  762. if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) {
  763. put_str(dyn_bc, "Channels");
  764. put_s(dyn_bc, bytestream_get_le32(&data));
  765. sm_data_count++;
  766. }
  767. if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) {
  768. put_str(dyn_bc, "ChannelLayout");
  769. put_s(dyn_bc, -2);
  770. put_str(dyn_bc, "u64");
  771. ff_put_v(bc, 8);
  772. avio_write(dyn_bc, data, 8); data+=8;
  773. sm_data_count++;
  774. }
  775. if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) {
  776. put_str(dyn_bc, "SampleRate");
  777. put_s(dyn_bc, bytestream_get_le32(&data));
  778. sm_data_count++;
  779. }
  780. if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) {
  781. put_str(dyn_bc, "Width");
  782. put_s(dyn_bc, bytestream_get_le32(&data));
  783. put_str(dyn_bc, "Height");
  784. put_s(dyn_bc, bytestream_get_le32(&data));
  785. sm_data_count+=2;
  786. }
  787. break;
  788. case AV_PKT_DATA_SKIP_SAMPLES:
  789. if (AV_RL32(data)) {
  790. put_str(dyn_bc, "SkipStart");
  791. put_s(dyn_bc, (unsigned)AV_RL32(data));
  792. sm_data_count++;
  793. }
  794. if (AV_RL32(data+4)) {
  795. put_str(dyn_bc, "SkipEnd");
  796. put_s(dyn_bc, (unsigned)AV_RL32(data+4));
  797. sm_data_count++;
  798. }
  799. break;
  800. case AV_PKT_DATA_METADATA_UPDATE:
  801. case AV_PKT_DATA_STRINGS_METADATA:
  802. // belongs into meta, not side data
  803. break;
  804. }
  805. }
  806. }
  807. ff_put_v(bc, sm_data_count);
  808. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  809. avio_write(bc, dyn_buf, dyn_size);
  810. av_freep(&dyn_buf);
  811. return 0;
  812. }
  813. static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
  814. {
  815. NUTContext *nut = s->priv_data;
  816. StreamContext *nus = &nut->stream[pkt->stream_index];
  817. AVIOContext *bc = s->pb, *dyn_bc, *sm_bc = NULL;
  818. FrameCode *fc;
  819. int64_t coded_pts;
  820. int best_length, frame_code, flags, needed_flags, i, header_idx;
  821. int best_header_idx;
  822. int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
  823. int store_sp = 0;
  824. int ret;
  825. int sm_size = 0;
  826. int data_size = pkt->size;
  827. uint8_t *sm_buf;
  828. if (pkt->pts < 0) {
  829. av_log(s, AV_LOG_ERROR,
  830. "Negative pts not supported stream %d, pts %"PRId64"\n",
  831. pkt->stream_index, pkt->pts);
  832. return AVERROR(EINVAL);
  833. }
  834. if (pkt->side_data_elems && nut->version > 3) {
  835. ret = avio_open_dyn_buf(&sm_bc);
  836. if (ret < 0)
  837. return ret;
  838. write_sm_data(s, sm_bc, pkt, 0);
  839. write_sm_data(s, sm_bc, pkt, 1);
  840. sm_size = avio_close_dyn_buf(sm_bc, &sm_buf);
  841. data_size += sm_size;
  842. }
  843. if (1LL << (20 + 3 * nut->header_count) <= avio_tell(bc))
  844. write_headers(s, bc);
  845. if (key_frame && !(nus->last_flags & FLAG_KEY))
  846. store_sp = 1;
  847. if (data_size + 30 /*FIXME check*/ + avio_tell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
  848. store_sp = 1;
  849. //FIXME: Ensure store_sp is 1 in the first place.
  850. if (store_sp) {
  851. Syncpoint *sp, dummy = { .pos = INT64_MAX };
  852. ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
  853. for (i = 0; i < s->nb_streams; i++) {
  854. AVStream *st = s->streams[i];
  855. int64_t dts_tb = av_rescale_rnd(pkt->dts,
  856. nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
  857. nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
  858. AV_ROUND_DOWN);
  859. int index = av_index_search_timestamp(st, dts_tb,
  860. AVSEEK_FLAG_BACKWARD);
  861. if (index >= 0)
  862. dummy.pos = FFMIN(dummy.pos, st->index_entries[index].pos);
  863. }
  864. if (dummy.pos == INT64_MAX)
  865. dummy.pos = 0;
  866. sp = av_tree_find(nut->syncpoints, &dummy, (void *)ff_nut_sp_pos_cmp,
  867. NULL);
  868. nut->last_syncpoint_pos = avio_tell(bc);
  869. ret = avio_open_dyn_buf(&dyn_bc);
  870. if (ret < 0)
  871. return ret;
  872. put_tt(nut, nus->time_base, dyn_bc, pkt->dts);
  873. ff_put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos) >> 4 : 0);
  874. put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
  875. if ((ret = ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0 /*unused*/, pkt->dts)) < 0)
  876. return ret;
  877. if ((1ll<<60) % nut->sp_count == 0)
  878. for (i=0; i<s->nb_streams; i++) {
  879. int j;
  880. StreamContext *nus = &nut->stream[i];
  881. av_reallocp_array(&nus->keyframe_pts, 2*nut->sp_count, sizeof(*nus->keyframe_pts));
  882. if (!nus->keyframe_pts)
  883. return AVERROR(ENOMEM);
  884. for (j=nut->sp_count == 1 ? 0 : nut->sp_count; j<2*nut->sp_count; j++)
  885. nus->keyframe_pts[j] = AV_NOPTS_VALUE;
  886. }
  887. }
  888. av_assert0(nus->last_pts != AV_NOPTS_VALUE);
  889. coded_pts = pkt->pts & ((1 << nus->msb_pts_shift) - 1);
  890. if (ff_lsb2full(nus, coded_pts) != pkt->pts)
  891. coded_pts = pkt->pts + (1 << nus->msb_pts_shift);
  892. best_header_idx = find_best_header_idx(nut, pkt);
  893. best_length = INT_MAX;
  894. frame_code = -1;
  895. for (i = 0; i < 256; i++) {
  896. int length = 0;
  897. FrameCode *fc = &nut->frame_code[i];
  898. int flags = fc->flags;
  899. if (flags & FLAG_INVALID)
  900. continue;
  901. needed_flags = get_needed_flags(nut, nus, fc, pkt);
  902. if (flags & FLAG_CODED) {
  903. length++;
  904. flags = needed_flags;
  905. }
  906. if ((flags & needed_flags) != needed_flags)
  907. continue;
  908. if ((flags ^ needed_flags) & FLAG_KEY)
  909. continue;
  910. if (flags & FLAG_STREAM_ID)
  911. length += ff_get_v_length(pkt->stream_index);
  912. if (data_size % fc->size_mul != fc->size_lsb)
  913. continue;
  914. if (flags & FLAG_SIZE_MSB)
  915. length += ff_get_v_length(data_size / fc->size_mul);
  916. if (flags & FLAG_CHECKSUM)
  917. length += 4;
  918. if (flags & FLAG_CODED_PTS)
  919. length += ff_get_v_length(coded_pts);
  920. if ( (flags & FLAG_CODED)
  921. && nut->header_len[best_header_idx] > nut->header_len[fc->header_idx] + 1) {
  922. flags |= FLAG_HEADER_IDX;
  923. }
  924. if (flags & FLAG_HEADER_IDX) {
  925. length += 1 - nut->header_len[best_header_idx];
  926. } else {
  927. length -= nut->header_len[fc->header_idx];
  928. }
  929. length *= 4;
  930. length += !(flags & FLAG_CODED_PTS);
  931. length += !(flags & FLAG_CHECKSUM);
  932. if (length < best_length) {
  933. best_length = length;
  934. frame_code = i;
  935. }
  936. }
  937. av_assert0(frame_code != -1);
  938. fc = &nut->frame_code[frame_code];
  939. flags = fc->flags;
  940. needed_flags = get_needed_flags(nut, nus, fc, pkt);
  941. header_idx = fc->header_idx;
  942. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  943. avio_w8(bc, frame_code);
  944. if (flags & FLAG_CODED) {
  945. ff_put_v(bc, (flags ^ needed_flags) & ~(FLAG_CODED));
  946. flags = needed_flags;
  947. }
  948. if (flags & FLAG_STREAM_ID) ff_put_v(bc, pkt->stream_index);
  949. if (flags & FLAG_CODED_PTS) ff_put_v(bc, coded_pts);
  950. if (flags & FLAG_SIZE_MSB ) ff_put_v(bc, data_size / fc->size_mul);
  951. if (flags & FLAG_HEADER_IDX) ff_put_v(bc, header_idx = best_header_idx);
  952. if (flags & FLAG_CHECKSUM) avio_wl32(bc, ffio_get_checksum(bc));
  953. else ffio_get_checksum(bc);
  954. if (flags & FLAG_SM_DATA) {
  955. avio_write(bc, sm_buf, sm_size);
  956. av_freep(&sm_buf);
  957. }
  958. avio_write(bc, pkt->data + nut->header_len[header_idx], pkt->size - nut->header_len[header_idx]);
  959. nus->last_flags = flags;
  960. nus->last_pts = pkt->pts;
  961. //FIXME just store one per syncpoint
  962. if (flags & FLAG_KEY) {
  963. av_add_index_entry(
  964. s->streams[pkt->stream_index],
  965. nut->last_syncpoint_pos,
  966. pkt->pts,
  967. 0,
  968. 0,
  969. AVINDEX_KEYFRAME);
  970. if (nus->keyframe_pts && nus->keyframe_pts[nut->sp_count] == AV_NOPTS_VALUE)
  971. nus->keyframe_pts[nut->sp_count] = pkt->pts;
  972. }
  973. if (!nut->max_pts_tb || av_compare_ts(nut->max_pts, *nut->max_pts_tb, pkt->pts, *nus->time_base) < 0) {
  974. nut->max_pts = pkt->pts;
  975. nut->max_pts_tb = nus->time_base;
  976. }
  977. return 0;
  978. }
  979. static int nut_write_trailer(AVFormatContext *s)
  980. {
  981. NUTContext *nut = s->priv_data;
  982. AVIOContext *bc = s->pb, *dyn_bc;
  983. int i, ret;
  984. while (nut->header_count < 3)
  985. write_headers(s, bc);
  986. ret = avio_open_dyn_buf(&dyn_bc);
  987. if (ret >= 0 && nut->sp_count) {
  988. write_index(nut, dyn_bc);
  989. put_packet(nut, bc, dyn_bc, 1, INDEX_STARTCODE);
  990. }
  991. ff_nut_free_sp(nut);
  992. for (i=0; i<s->nb_streams; i++)
  993. av_freep(&nut->stream[i].keyframe_pts);
  994. av_freep(&nut->stream);
  995. av_freep(&nut->chapter);
  996. av_freep(&nut->time_base);
  997. return 0;
  998. }
  999. AVOutputFormat ff_nut_muxer = {
  1000. .name = "nut",
  1001. .long_name = NULL_IF_CONFIG_SMALL("NUT"),
  1002. .mime_type = "video/x-nut",
  1003. .extensions = "nut",
  1004. .priv_data_size = sizeof(NUTContext),
  1005. .audio_codec = CONFIG_LIBVORBIS ? AV_CODEC_ID_VORBIS :
  1006. CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_MP2,
  1007. .video_codec = AV_CODEC_ID_MPEG4,
  1008. .write_header = nut_write_header,
  1009. .write_packet = nut_write_packet,
  1010. .write_trailer = nut_write_trailer,
  1011. .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
  1012. .codec_tag = ff_nut_codec_tags,
  1013. };