You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1010 lines
32KB

  1. /*
  2. * nut muxer
  3. * Copyright (c) 2004-2007 Michael Niedermayer
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdint.h>
  22. #include "libavutil/intreadwrite.h"
  23. #include "libavutil/mathematics.h"
  24. #include "libavutil/tree.h"
  25. #include "libavutil/dict.h"
  26. #include "libavutil/time.h"
  27. #include "libavutil/opt.h"
  28. #include "libavcodec/mpegaudiodata.h"
  29. #include "nut.h"
  30. #include "internal.h"
  31. #include "avio_internal.h"
  32. #include "riff.h"
  33. static int find_expected_header(AVCodecParameters *p, int size, int key_frame,
  34. uint8_t out[64])
  35. {
  36. int sample_rate = p->sample_rate;
  37. if (size > 4096)
  38. return 0;
  39. AV_WB24(out, 1);
  40. if (p->codec_id == AV_CODEC_ID_MPEG4) {
  41. if (key_frame) {
  42. return 3;
  43. } else {
  44. out[3] = 0xB6;
  45. return 4;
  46. }
  47. } else if (p->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  48. p->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  49. return 3;
  50. } else if (p->codec_id == AV_CODEC_ID_H264) {
  51. return 3;
  52. } else if (p->codec_id == AV_CODEC_ID_MP3 ||
  53. p->codec_id == AV_CODEC_ID_MP2) {
  54. int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
  55. int layer = p->codec_id == AV_CODEC_ID_MP3 ? 3 : 2;
  56. unsigned int header = 0xFFF00000;
  57. lsf = sample_rate < (24000 + 32000) / 2;
  58. mpeg25 = sample_rate < (12000 + 16000) / 2;
  59. sample_rate <<= lsf + mpeg25;
  60. if (sample_rate < (32000 + 44100) / 2)
  61. sample_rate_index = 2;
  62. else if (sample_rate < (44100 + 48000) / 2)
  63. sample_rate_index = 0;
  64. else
  65. sample_rate_index = 1;
  66. sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
  67. for (bitrate_index = 2; bitrate_index < 30; bitrate_index++) {
  68. frame_size =
  69. avpriv_mpa_bitrate_tab[lsf][layer - 1][bitrate_index >> 1];
  70. frame_size = (frame_size * 144000) / (sample_rate << lsf) +
  71. (bitrate_index & 1);
  72. if (frame_size == size)
  73. break;
  74. }
  75. header |= (!lsf) << 19;
  76. header |= (4 - layer) << 17;
  77. header |= 1 << 16; //no crc
  78. AV_WB32(out, header);
  79. if (size <= 0)
  80. return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
  81. if (bitrate_index == 30)
  82. return -1; //something is wrong ...
  83. header |= (bitrate_index >> 1) << 12;
  84. header |= sample_rate_index << 10;
  85. header |= (bitrate_index & 1) << 9;
  86. return 2; //FIXME actually put the needed ones in build_elision_headers()
  87. //return 3; //we guess that the private bit is not set
  88. //FIXME the above assumptions should be checked, if these turn out false too often something should be done
  89. }
  90. return 0;
  91. }
  92. static int find_header_idx(AVFormatContext *s, AVCodecParameters *p, int size,
  93. int frame_type)
  94. {
  95. NUTContext *nut = s->priv_data;
  96. uint8_t out[64];
  97. int i;
  98. int len = find_expected_header(p, size, frame_type, out);
  99. for (i = 1; i < nut->header_count; i++) {
  100. if (len == nut->header_len[i] && !memcmp(out, nut->header[i], len)) {
  101. return i;
  102. }
  103. }
  104. return 0;
  105. }
  106. static void build_elision_headers(AVFormatContext *s)
  107. {
  108. NUTContext *nut = s->priv_data;
  109. int i;
  110. //FIXME this is lame
  111. //FIXME write a 2pass mode to find the maximal headers
  112. static const uint8_t headers[][5] = {
  113. { 3, 0x00, 0x00, 0x01 },
  114. { 4, 0x00, 0x00, 0x01, 0xB6},
  115. { 2, 0xFF, 0xFA }, //mp3+crc
  116. { 2, 0xFF, 0xFB }, //mp3
  117. { 2, 0xFF, 0xFC }, //mp2+crc
  118. { 2, 0xFF, 0xFD }, //mp2
  119. };
  120. nut->header_count = 7;
  121. for (i = 1; i < nut->header_count; i++) {
  122. nut->header_len[i] = headers[i - 1][0];
  123. nut->header[i] = &headers[i - 1][1];
  124. }
  125. }
  126. static void build_frame_code(AVFormatContext *s)
  127. {
  128. NUTContext *nut = s->priv_data;
  129. int key_frame, index, pred, stream_id;
  130. int start = 1;
  131. int end = 254;
  132. int keyframe_0_esc = s->nb_streams > 2;
  133. int pred_table[10];
  134. FrameCode *ft;
  135. ft = &nut->frame_code[start];
  136. ft->flags = FLAG_CODED;
  137. ft->size_mul = 1;
  138. ft->pts_delta = 1;
  139. start++;
  140. if (keyframe_0_esc) {
  141. /* keyframe = 0 escape */
  142. FrameCode *ft = &nut->frame_code[start];
  143. ft->flags = FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
  144. ft->size_mul = 1;
  145. start++;
  146. }
  147. for (stream_id = 0; stream_id < s->nb_streams; stream_id++) {
  148. int start2 = start + (end - start) * stream_id / s->nb_streams;
  149. int end2 = start + (end - start) * (stream_id + 1) / s->nb_streams;
  150. AVCodecParameters *par = s->streams[stream_id]->codecpar;
  151. const AVCodecDescriptor *desc = avcodec_descriptor_get(par->codec_id);
  152. int is_audio = par->codec_type == AVMEDIA_TYPE_AUDIO;
  153. int intra_only = /*codec->intra_only || */ is_audio;
  154. int pred_count;
  155. for (key_frame = 0; key_frame < 2; key_frame++) {
  156. if (!intra_only || !keyframe_0_esc || key_frame != 0) {
  157. FrameCode *ft = &nut->frame_code[start2];
  158. ft->flags = FLAG_KEY * key_frame;
  159. ft->flags |= FLAG_SIZE_MSB | FLAG_CODED_PTS;
  160. ft->stream_id = stream_id;
  161. ft->size_mul = 1;
  162. if (is_audio)
  163. ft->header_idx = find_header_idx(s, par, -1, key_frame);
  164. start2++;
  165. }
  166. }
  167. key_frame = intra_only;
  168. if (is_audio) {
  169. int frame_bytes;
  170. int pts;
  171. if (par->block_align > 0) {
  172. frame_bytes = par->block_align;
  173. } else {
  174. int frame_size = av_get_audio_frame_duration2(par, 0);
  175. frame_bytes = frame_size * (int64_t)par->bit_rate / (8 * par->sample_rate);
  176. }
  177. for (pts = 0; pts < 2; pts++)
  178. for (pred = 0; pred < 2; pred++) {
  179. FrameCode *ft = &nut->frame_code[start2];
  180. ft->flags = FLAG_KEY * key_frame;
  181. ft->stream_id = stream_id;
  182. ft->size_mul = frame_bytes + 2;
  183. ft->size_lsb = frame_bytes + pred;
  184. ft->pts_delta = pts;
  185. ft->header_idx = find_header_idx(s, par, frame_bytes + pred, key_frame);
  186. start2++;
  187. }
  188. } else {
  189. FrameCode *ft = &nut->frame_code[start2];
  190. ft->flags = FLAG_KEY | FLAG_SIZE_MSB;
  191. ft->stream_id = stream_id;
  192. ft->size_mul = 1;
  193. ft->pts_delta = 1;
  194. start2++;
  195. }
  196. if (desc && desc->props & AV_CODEC_PROP_REORDER) {
  197. pred_count = 5;
  198. pred_table[0] = -2;
  199. pred_table[1] = -1;
  200. pred_table[2] = 1;
  201. pred_table[3] = 3;
  202. pred_table[4] = 4;
  203. } else if (par->codec_id == AV_CODEC_ID_VORBIS) {
  204. pred_count = 3;
  205. pred_table[0] = 2;
  206. pred_table[1] = 9;
  207. pred_table[2] = 16;
  208. } else {
  209. pred_count = 1;
  210. pred_table[0] = 1;
  211. }
  212. for (pred = 0; pred < pred_count; pred++) {
  213. int start3 = start2 + (end2 - start2) * pred / pred_count;
  214. int end3 = start2 + (end2 - start2) * (pred + 1) / pred_count;
  215. for (index = start3; index < end3; index++) {
  216. FrameCode *ft = &nut->frame_code[index];
  217. ft->flags = FLAG_KEY * key_frame;
  218. ft->flags |= FLAG_SIZE_MSB;
  219. ft->stream_id = stream_id;
  220. //FIXME use single byte size and pred from last
  221. ft->size_mul = end3 - start3;
  222. ft->size_lsb = index - start3;
  223. ft->pts_delta = pred_table[pred];
  224. if (is_audio)
  225. ft->header_idx = find_header_idx(s, par, -1, key_frame);
  226. }
  227. }
  228. }
  229. memmove(&nut->frame_code['N' + 1], &nut->frame_code['N'],
  230. sizeof(FrameCode) * (255 - 'N'));
  231. nut->frame_code[0].flags =
  232. nut->frame_code[255].flags =
  233. nut->frame_code['N'].flags = FLAG_INVALID;
  234. }
  235. static void put_tt(NUTContext *nut, AVRational *time_base, AVIOContext *bc,
  236. uint64_t val)
  237. {
  238. val *= nut->time_base_count;
  239. val += time_base - nut->time_base;
  240. ff_put_v(bc, val);
  241. }
  242. /**
  243. * Store a string as vb.
  244. */
  245. static void put_str(AVIOContext *bc, const char *string)
  246. {
  247. int len = strlen(string);
  248. ff_put_v(bc, len);
  249. avio_write(bc, string, len);
  250. }
  251. static void put_s(AVIOContext *bc, int64_t val)
  252. {
  253. ff_put_v(bc, 2 * FFABS(val) - (val > 0));
  254. }
  255. #ifdef TRACE
  256. static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, const char *file,
  257. const char *func, int line)
  258. {
  259. av_log(NULL, AV_LOG_DEBUG, "ff_put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  260. ff_put_v(bc, v);
  261. }
  262. static inline void put_s_trace(AVIOContext *bc, int64_t v, const char *file,
  263. const char *func, int line)
  264. {
  265. av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  266. put_s(bc, v);
  267. }
  268. #define ff_put_v(bc, v) ff_put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  269. #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  270. #endif
  271. //FIXME remove calculate_checksum
  272. static void put_packet(NUTContext *nut, AVIOContext *bc, AVIOContext *dyn_bc,
  273. int calculate_checksum, uint64_t startcode)
  274. {
  275. uint8_t *dyn_buf = NULL;
  276. int dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  277. int forw_ptr = dyn_size + 4 * calculate_checksum;
  278. if (forw_ptr > 4096)
  279. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  280. avio_wb64(bc, startcode);
  281. ff_put_v(bc, forw_ptr);
  282. if (forw_ptr > 4096)
  283. avio_wl32(bc, ffio_get_checksum(bc));
  284. if (calculate_checksum)
  285. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  286. avio_write(bc, dyn_buf, dyn_size);
  287. if (calculate_checksum)
  288. avio_wl32(bc, ffio_get_checksum(bc));
  289. av_free(dyn_buf);
  290. }
  291. static void write_mainheader(NUTContext *nut, AVIOContext *bc)
  292. {
  293. int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields,
  294. tmp_head_idx;
  295. int64_t tmp_match;
  296. ff_put_v(bc, nut->version);
  297. ff_put_v(bc, nut->avf->nb_streams);
  298. ff_put_v(bc, nut->max_distance);
  299. ff_put_v(bc, nut->time_base_count);
  300. for (i = 0; i < nut->time_base_count; i++) {
  301. ff_put_v(bc, nut->time_base[i].num);
  302. ff_put_v(bc, nut->time_base[i].den);
  303. }
  304. tmp_pts = 0;
  305. tmp_mul = 1;
  306. tmp_stream = 0;
  307. tmp_match = 1 - (1LL << 62);
  308. tmp_head_idx = 0;
  309. for (i = 0; i < 256; ) {
  310. tmp_fields = 0;
  311. tmp_size = 0;
  312. // tmp_res=0;
  313. if (tmp_pts != nut->frame_code[i].pts_delta)
  314. tmp_fields = 1;
  315. if (tmp_mul != nut->frame_code[i].size_mul)
  316. tmp_fields = 2;
  317. if (tmp_stream != nut->frame_code[i].stream_id)
  318. tmp_fields = 3;
  319. if (tmp_size != nut->frame_code[i].size_lsb)
  320. tmp_fields = 4;
  321. // if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
  322. if (tmp_head_idx != nut->frame_code[i].header_idx)
  323. tmp_fields = 8;
  324. tmp_pts = nut->frame_code[i].pts_delta;
  325. tmp_flags = nut->frame_code[i].flags;
  326. tmp_stream = nut->frame_code[i].stream_id;
  327. tmp_mul = nut->frame_code[i].size_mul;
  328. tmp_size = nut->frame_code[i].size_lsb;
  329. // tmp_res = nut->frame_code[i].res;
  330. tmp_head_idx = nut->frame_code[i].header_idx;
  331. for (j = 0; i < 256; j++, i++) {
  332. if (i == 'N') {
  333. j--;
  334. continue;
  335. }
  336. if (nut->frame_code[i].pts_delta != tmp_pts ||
  337. nut->frame_code[i].flags != tmp_flags ||
  338. nut->frame_code[i].stream_id != tmp_stream ||
  339. nut->frame_code[i].size_mul != tmp_mul ||
  340. nut->frame_code[i].size_lsb != tmp_size + j ||
  341. // nut->frame_code[i].res != tmp_res ||
  342. nut->frame_code[i].header_idx != tmp_head_idx)
  343. break;
  344. }
  345. if (j != tmp_mul - tmp_size)
  346. tmp_fields = 6;
  347. ff_put_v(bc, tmp_flags);
  348. ff_put_v(bc, tmp_fields);
  349. if (tmp_fields > 0)
  350. put_s(bc, tmp_pts);
  351. if (tmp_fields > 1)
  352. ff_put_v(bc, tmp_mul);
  353. if (tmp_fields > 2)
  354. ff_put_v(bc, tmp_stream);
  355. if (tmp_fields > 3)
  356. ff_put_v(bc, tmp_size);
  357. if (tmp_fields > 4)
  358. ff_put_v(bc, 0 /*tmp_res*/);
  359. if (tmp_fields > 5)
  360. ff_put_v(bc, j);
  361. if (tmp_fields > 6)
  362. ff_put_v(bc, tmp_match);
  363. if (tmp_fields > 7)
  364. ff_put_v(bc, tmp_head_idx);
  365. }
  366. ff_put_v(bc, nut->header_count - 1);
  367. for (i = 1; i < nut->header_count; i++) {
  368. ff_put_v(bc, nut->header_len[i]);
  369. avio_write(bc, nut->header[i], nut->header_len[i]);
  370. }
  371. // flags had been effectively introduced in version 4
  372. if (nut->version > NUT_STABLE_VERSION)
  373. ff_put_v(bc, nut->flags);
  374. }
  375. static int write_streamheader(AVFormatContext *avctx, AVIOContext *bc,
  376. AVStream *st, int i)
  377. {
  378. NUTContext *nut = avctx->priv_data;
  379. AVCodecParameters *par = st->codecpar;
  380. const AVCodecDescriptor *desc = avcodec_descriptor_get(par->codec_id);
  381. unsigned codec_tag = av_codec_get_tag(ff_nut_codec_tags, par->codec_id);
  382. ff_put_v(bc, i);
  383. switch (par->codec_type) {
  384. case AVMEDIA_TYPE_VIDEO:
  385. ff_put_v(bc, 0);
  386. break;
  387. case AVMEDIA_TYPE_AUDIO:
  388. ff_put_v(bc, 1);
  389. break;
  390. case AVMEDIA_TYPE_SUBTITLE:
  391. ff_put_v(bc, 2);
  392. break;
  393. default:
  394. ff_put_v(bc, 3);
  395. break;
  396. }
  397. ff_put_v(bc, 4);
  398. if (av_codec_get_id(ff_nut_codec_tags, par->codec_tag) == par->codec_id ||
  399. !codec_tag || par->codec_id == AV_CODEC_ID_RAWVIDEO)
  400. codec_tag = par->codec_tag;
  401. if (codec_tag) {
  402. avio_wl32(bc, codec_tag);
  403. } else {
  404. av_log(avctx, AV_LOG_ERROR, "No codec tag defined for stream %d\n", i);
  405. return AVERROR(EINVAL);
  406. }
  407. ff_put_v(bc, nut->stream[i].time_base - nut->time_base);
  408. ff_put_v(bc, nut->stream[i].msb_pts_shift);
  409. ff_put_v(bc, nut->stream[i].max_pts_distance);
  410. ff_put_v(bc, (desc && desc->props & AV_CODEC_PROP_REORDER) ? 16 : 0);
  411. avio_w8(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
  412. ff_put_v(bc, par->extradata_size);
  413. avio_write(bc, par->extradata, par->extradata_size);
  414. switch (par->codec_type) {
  415. case AVMEDIA_TYPE_AUDIO:
  416. ff_put_v(bc, par->sample_rate);
  417. ff_put_v(bc, 1);
  418. ff_put_v(bc, par->channels);
  419. break;
  420. case AVMEDIA_TYPE_VIDEO:
  421. ff_put_v(bc, par->width);
  422. ff_put_v(bc, par->height);
  423. if (st->sample_aspect_ratio.num <= 0 ||
  424. st->sample_aspect_ratio.den <= 0) {
  425. ff_put_v(bc, 0);
  426. ff_put_v(bc, 0);
  427. } else {
  428. ff_put_v(bc, st->sample_aspect_ratio.num);
  429. ff_put_v(bc, st->sample_aspect_ratio.den);
  430. }
  431. ff_put_v(bc, 0); /* csp type -- unknown */
  432. break;
  433. default:
  434. break;
  435. }
  436. return 0;
  437. }
  438. static int add_info(AVIOContext *bc, const char *type, const char *value)
  439. {
  440. put_str(bc, type);
  441. put_s(bc, -1);
  442. put_str(bc, value);
  443. return 1;
  444. }
  445. static int write_globalinfo(NUTContext *nut, AVIOContext *bc)
  446. {
  447. AVFormatContext *s = nut->avf;
  448. AVDictionaryEntry *t = NULL;
  449. AVIOContext *dyn_bc;
  450. uint8_t *dyn_buf = NULL;
  451. int count = 0, dyn_size;
  452. int ret = avio_open_dyn_buf(&dyn_bc);
  453. if (ret < 0)
  454. return ret;
  455. while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  456. count += add_info(dyn_bc, t->key, t->value);
  457. ff_put_v(bc, 0); //stream_if_plus1
  458. ff_put_v(bc, 0); //chapter_id
  459. ff_put_v(bc, 0); //timestamp_start
  460. ff_put_v(bc, 0); //length
  461. ff_put_v(bc, count);
  462. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  463. avio_write(bc, dyn_buf, dyn_size);
  464. av_free(dyn_buf);
  465. return 0;
  466. }
  467. static int write_streaminfo(NUTContext *nut, AVIOContext *bc, int stream_id){
  468. AVFormatContext *s= nut->avf;
  469. AVStream* st = s->streams[stream_id];
  470. AVIOContext *dyn_bc;
  471. uint8_t *dyn_buf=NULL;
  472. int count=0, dyn_size, i;
  473. int ret = avio_open_dyn_buf(&dyn_bc);
  474. if(ret < 0)
  475. return ret;
  476. for (i=0; ff_nut_dispositions[i].flag; ++i) {
  477. if (st->disposition & ff_nut_dispositions[i].flag)
  478. count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
  479. }
  480. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  481. if (count) {
  482. ff_put_v(bc, stream_id + 1); //stream_id_plus1
  483. ff_put_v(bc, 0); //chapter_id
  484. ff_put_v(bc, 0); //timestamp_start
  485. ff_put_v(bc, 0); //length
  486. ff_put_v(bc, count);
  487. avio_write(bc, dyn_buf, dyn_size);
  488. }
  489. av_free(dyn_buf);
  490. return count;
  491. }
  492. static int write_chapter(NUTContext *nut, AVIOContext *bc, int id)
  493. {
  494. AVIOContext *dyn_bc;
  495. uint8_t *dyn_buf = NULL;
  496. AVDictionaryEntry *t = NULL;
  497. AVChapter *ch = nut->avf->chapters[id];
  498. int ret, dyn_size, count = 0;
  499. ret = avio_open_dyn_buf(&dyn_bc);
  500. if (ret < 0)
  501. return ret;
  502. ff_put_v(bc, 0); // stream_id_plus1
  503. put_s(bc, id + 1); // chapter_id
  504. put_tt(nut, nut->chapter[id].time_base, bc, ch->start); // chapter_start
  505. ff_put_v(bc, ch->end - ch->start); // chapter_len
  506. while ((t = av_dict_get(ch->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  507. count += add_info(dyn_bc, t->key, t->value);
  508. ff_put_v(bc, count);
  509. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  510. avio_write(bc, dyn_buf, dyn_size);
  511. av_freep(&dyn_buf);
  512. return 0;
  513. }
  514. static int write_headers(AVFormatContext *avctx, AVIOContext *bc)
  515. {
  516. NUTContext *nut = avctx->priv_data;
  517. AVIOContext *dyn_bc;
  518. int i, ret;
  519. ff_metadata_conv_ctx(avctx, ff_nut_metadata_conv, NULL);
  520. ret = avio_open_dyn_buf(&dyn_bc);
  521. if (ret < 0)
  522. return ret;
  523. write_mainheader(nut, dyn_bc);
  524. put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
  525. for (i = 0; i < nut->avf->nb_streams; i++) {
  526. ret = avio_open_dyn_buf(&dyn_bc);
  527. if (ret < 0)
  528. return ret;
  529. ret = write_streamheader(avctx, dyn_bc, nut->avf->streams[i], i);
  530. if (ret < 0)
  531. return ret;
  532. put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
  533. }
  534. ret = avio_open_dyn_buf(&dyn_bc);
  535. if (ret < 0)
  536. return ret;
  537. write_globalinfo(nut, dyn_bc);
  538. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  539. for (i = 0; i < nut->avf->nb_streams; i++) {
  540. ret = avio_open_dyn_buf(&dyn_bc);
  541. if (ret < 0)
  542. return ret;
  543. ret = write_streaminfo(nut, dyn_bc, i);
  544. if (ret < 0)
  545. return ret;
  546. if (ret > 0)
  547. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  548. else
  549. ffio_free_dyn_buf(&dyn_bc);
  550. }
  551. for (i = 0; i < nut->avf->nb_chapters; i++) {
  552. ret = avio_open_dyn_buf(&dyn_bc);
  553. if (ret < 0)
  554. return ret;
  555. ret = write_chapter(nut, dyn_bc, i);
  556. if (ret < 0) {
  557. ffio_free_dyn_buf(&dyn_bc);
  558. return ret;
  559. }
  560. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  561. }
  562. nut->last_syncpoint_pos = INT_MIN;
  563. nut->header_count++;
  564. return 0;
  565. }
  566. static int nut_write_header(AVFormatContext *s)
  567. {
  568. NUTContext *nut = s->priv_data;
  569. AVIOContext *bc = s->pb;
  570. int i, j, ret;
  571. nut->avf = s;
  572. nut->version = NUT_STABLE_VERSION + !!nut->flags;
  573. if (nut->flags && s->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) {
  574. av_log(s, AV_LOG_ERROR,
  575. "The additional syncpoint modes require version %d, "
  576. "that is currently not finalized, "
  577. "please set -f_strict experimental in order to enable it.\n",
  578. nut->version);
  579. return AVERROR_EXPERIMENTAL;
  580. }
  581. nut->stream = av_mallocz(sizeof(StreamContext) * s->nb_streams);
  582. if (s->nb_chapters)
  583. nut->chapter = av_mallocz(sizeof(ChapterContext) * s->nb_chapters);
  584. nut->time_base = av_mallocz(sizeof(AVRational) * (s->nb_streams +
  585. s->nb_chapters));
  586. if (!nut->stream || (s->nb_chapters && !nut->chapter) || !nut->time_base) {
  587. av_freep(&nut->stream);
  588. av_freep(&nut->chapter);
  589. av_freep(&nut->time_base);
  590. return AVERROR(ENOMEM);
  591. }
  592. for (i = 0; i < s->nb_streams; i++) {
  593. AVStream *st = s->streams[i];
  594. int ssize;
  595. AVRational time_base;
  596. ff_parse_specific_params(st, &time_base.den, &ssize,
  597. &time_base.num);
  598. avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
  599. for (j = 0; j < nut->time_base_count; j++)
  600. if (!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))) {
  601. break;
  602. }
  603. nut->time_base[j] = time_base;
  604. nut->stream[i].time_base = &nut->time_base[j];
  605. if (j == nut->time_base_count)
  606. nut->time_base_count++;
  607. if (INT64_C(1000) * time_base.num >= time_base.den)
  608. nut->stream[i].msb_pts_shift = 7;
  609. else
  610. nut->stream[i].msb_pts_shift = 14;
  611. nut->stream[i].max_pts_distance =
  612. FFMAX(time_base.den, time_base.num) / time_base.num;
  613. }
  614. for (i = 0; i < s->nb_chapters; i++) {
  615. AVChapter *ch = s->chapters[i];
  616. for (j = 0; j < nut->time_base_count; j++)
  617. if (!memcmp(&ch->time_base, &nut->time_base[j], sizeof(AVRational)))
  618. break;
  619. nut->time_base[j] = ch->time_base;
  620. nut->chapter[i].time_base = &nut->time_base[j];
  621. if (j == nut->time_base_count)
  622. nut->time_base_count++;
  623. }
  624. nut->max_distance = MAX_DISTANCE;
  625. build_elision_headers(s);
  626. build_frame_code(s);
  627. assert(nut->frame_code['N'].flags == FLAG_INVALID);
  628. avio_write(bc, ID_STRING, strlen(ID_STRING));
  629. avio_w8(bc, 0);
  630. if ((ret = write_headers(s, bc)) < 0)
  631. return ret;
  632. avio_flush(bc);
  633. //FIXME index
  634. return 0;
  635. }
  636. static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc,
  637. AVPacket *pkt)
  638. {
  639. int flags = 0;
  640. if (pkt->flags & AV_PKT_FLAG_KEY)
  641. flags |= FLAG_KEY;
  642. if (pkt->stream_index != fc->stream_id)
  643. flags |= FLAG_STREAM_ID;
  644. if (pkt->size / fc->size_mul)
  645. flags |= FLAG_SIZE_MSB;
  646. if (pkt->pts - nus->last_pts != fc->pts_delta)
  647. flags |= FLAG_CODED_PTS;
  648. if (pkt->size > 2 * nut->max_distance)
  649. flags |= FLAG_CHECKSUM;
  650. if (FFABS(pkt->pts - nus->last_pts) > nus->max_pts_distance)
  651. flags |= FLAG_CHECKSUM;
  652. if (pkt->size < nut->header_len[fc->header_idx] ||
  653. (pkt->size > 4096 && fc->header_idx) ||
  654. memcmp(pkt->data, nut->header[fc->header_idx],
  655. nut->header_len[fc->header_idx]))
  656. flags |= FLAG_HEADER_IDX;
  657. return flags | (fc->flags & FLAG_CODED);
  658. }
  659. static int find_best_header_idx(NUTContext *nut, AVPacket *pkt)
  660. {
  661. int i;
  662. int best_i = 0;
  663. int best_len = 0;
  664. if (pkt->size > 4096)
  665. return 0;
  666. for (i = 1; i < nut->header_count; i++)
  667. if (pkt->size >= nut->header_len[i]
  668. && nut->header_len[i] > best_len
  669. && !memcmp(pkt->data, nut->header[i], nut->header_len[i])) {
  670. best_i = i;
  671. best_len = nut->header_len[i];
  672. }
  673. return best_i;
  674. }
  675. static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
  676. {
  677. NUTContext *nut = s->priv_data;
  678. StreamContext *nus = &nut->stream[pkt->stream_index];
  679. AVIOContext *bc = s->pb, *dyn_bc;
  680. FrameCode *fc;
  681. int64_t coded_pts;
  682. int best_length, frame_code, flags, needed_flags, i, header_idx,
  683. best_header_idx;
  684. int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
  685. int store_sp = 0;
  686. int ret;
  687. if (pkt->pts < 0) {
  688. av_log(s, AV_LOG_ERROR,
  689. "Negative pts not supported stream %d, pts %"PRId64"\n",
  690. pkt->stream_index, pkt->pts);
  691. return AVERROR_INVALIDDATA;
  692. }
  693. if (1LL << (20 + 3 * nut->header_count) <= avio_tell(bc))
  694. write_headers(s, bc);
  695. if (key_frame && !(nus->last_flags & FLAG_KEY))
  696. store_sp = 1;
  697. if (pkt->size + 30 /*FIXME check*/ + avio_tell(bc) >=
  698. nut->last_syncpoint_pos + nut->max_distance)
  699. store_sp = 1;
  700. //FIXME: Ensure store_sp is 1 in the first place.
  701. if (store_sp &&
  702. (!(nut->flags & NUT_PIPE) || nut->last_syncpoint_pos == INT_MIN)) {
  703. Syncpoint *sp, dummy = { .pos = INT64_MAX };
  704. ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
  705. for (i = 0; i < s->nb_streams; i++) {
  706. AVStream *st = s->streams[i];
  707. int64_t dts_tb = av_rescale_rnd(pkt->dts,
  708. nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
  709. nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
  710. AV_ROUND_DOWN);
  711. int index = av_index_search_timestamp(st, dts_tb,
  712. AVSEEK_FLAG_BACKWARD);
  713. if (index >= 0)
  714. dummy.pos = FFMIN(dummy.pos, st->index_entries[index].pos);
  715. }
  716. if (dummy.pos == INT64_MAX)
  717. dummy.pos = 0;
  718. sp = av_tree_find(nut->syncpoints, &dummy, (void *)ff_nut_sp_pos_cmp,
  719. NULL);
  720. nut->last_syncpoint_pos = avio_tell(bc);
  721. ret = avio_open_dyn_buf(&dyn_bc);
  722. if (ret < 0)
  723. return ret;
  724. put_tt(nut, nus->time_base, dyn_bc, pkt->dts);
  725. ff_put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos) >> 4 : 0);
  726. if (nut->flags & NUT_BROADCAST) {
  727. put_tt(nut, nus->time_base, dyn_bc,
  728. av_rescale_q(av_gettime(), AV_TIME_BASE_Q, *nus->time_base));
  729. }
  730. put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
  731. if ((ret = ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0 /*unused*/, pkt->dts)) < 0)
  732. return ret;
  733. }
  734. assert(nus->last_pts != AV_NOPTS_VALUE);
  735. coded_pts = pkt->pts & ((1 << nus->msb_pts_shift) - 1);
  736. if (ff_lsb2full(nus, coded_pts) != pkt->pts)
  737. coded_pts = pkt->pts + (1 << nus->msb_pts_shift);
  738. best_header_idx = find_best_header_idx(nut, pkt);
  739. best_length = INT_MAX;
  740. frame_code = -1;
  741. for (i = 0; i < 256; i++) {
  742. int length = 0;
  743. FrameCode *fc = &nut->frame_code[i];
  744. int flags = fc->flags;
  745. if (flags & FLAG_INVALID)
  746. continue;
  747. needed_flags = get_needed_flags(nut, nus, fc, pkt);
  748. if (flags & FLAG_CODED) {
  749. length++;
  750. flags = needed_flags;
  751. }
  752. if ((flags & needed_flags) != needed_flags)
  753. continue;
  754. if ((flags ^ needed_flags) & FLAG_KEY)
  755. continue;
  756. if (flags & FLAG_STREAM_ID)
  757. length += ff_get_v_length(pkt->stream_index);
  758. if (pkt->size % fc->size_mul != fc->size_lsb)
  759. continue;
  760. if (flags & FLAG_SIZE_MSB)
  761. length += ff_get_v_length(pkt->size / fc->size_mul);
  762. if (flags & FLAG_CHECKSUM)
  763. length += 4;
  764. if (flags & FLAG_CODED_PTS)
  765. length += ff_get_v_length(coded_pts);
  766. if ((flags & FLAG_CODED)
  767. && nut->header_len[best_header_idx] >
  768. nut->header_len[fc->header_idx] + 1) {
  769. flags |= FLAG_HEADER_IDX;
  770. }
  771. if (flags & FLAG_HEADER_IDX) {
  772. length += 1 - nut->header_len[best_header_idx];
  773. } else {
  774. length -= nut->header_len[fc->header_idx];
  775. }
  776. length *= 4;
  777. length += !(flags & FLAG_CODED_PTS);
  778. length += !(flags & FLAG_CHECKSUM);
  779. if (length < best_length) {
  780. best_length = length;
  781. frame_code = i;
  782. }
  783. }
  784. if (frame_code < 0)
  785. return AVERROR_BUG;
  786. fc = &nut->frame_code[frame_code];
  787. flags = fc->flags;
  788. needed_flags = get_needed_flags(nut, nus, fc, pkt);
  789. header_idx = fc->header_idx;
  790. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  791. avio_w8(bc, frame_code);
  792. if (flags & FLAG_CODED) {
  793. ff_put_v(bc, (flags ^ needed_flags) & ~(FLAG_CODED));
  794. flags = needed_flags;
  795. }
  796. if (flags & FLAG_STREAM_ID)
  797. ff_put_v(bc, pkt->stream_index);
  798. if (flags & FLAG_CODED_PTS)
  799. ff_put_v(bc, coded_pts);
  800. if (flags & FLAG_SIZE_MSB)
  801. ff_put_v(bc, pkt->size / fc->size_mul);
  802. if (flags & FLAG_HEADER_IDX)
  803. ff_put_v(bc, header_idx = best_header_idx);
  804. if (flags & FLAG_CHECKSUM)
  805. avio_wl32(bc, ffio_get_checksum(bc));
  806. else
  807. ffio_get_checksum(bc);
  808. avio_write(bc, pkt->data + nut->header_len[header_idx],
  809. pkt->size - nut->header_len[header_idx]);
  810. nus->last_flags = flags;
  811. nus->last_pts = pkt->pts;
  812. //FIXME just store one per syncpoint
  813. if (flags & FLAG_KEY && !(nut->flags & NUT_PIPE))
  814. av_add_index_entry(
  815. s->streams[pkt->stream_index],
  816. nut->last_syncpoint_pos,
  817. pkt->pts,
  818. 0,
  819. 0,
  820. AVINDEX_KEYFRAME);
  821. return 0;
  822. }
  823. static int nut_write_trailer(AVFormatContext *s)
  824. {
  825. NUTContext *nut = s->priv_data;
  826. AVIOContext *bc = s->pb;
  827. while (nut->header_count < 3)
  828. write_headers(s, bc);
  829. ff_nut_free_sp(nut);
  830. av_freep(&nut->stream);
  831. av_freep(&nut->chapter);
  832. av_freep(&nut->time_base);
  833. return 0;
  834. }
  835. #define OFFSET(x) offsetof(NUTContext, x)
  836. #define E AV_OPT_FLAG_ENCODING_PARAM
  837. static const AVOption options[] = {
  838. { "syncpoints", "NUT syncpoint behaviour", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64 = 0}, INT_MIN, INT_MAX, E, "syncpoints" },
  839. { "default", "", 0, AV_OPT_TYPE_CONST, {.i64 = 0}, INT_MIN, INT_MAX, E, "syncpoints" },
  840. { "none", "Disable syncpoints, low overhead and unseekable", 0, AV_OPT_TYPE_CONST, {.i64 = NUT_PIPE}, INT_MIN, INT_MAX, E, "syncpoints" },
  841. { "timestamped", "Extend syncpoints with a wallclock timestamp", 0, AV_OPT_TYPE_CONST, {.i64 = NUT_BROADCAST}, INT_MIN, INT_MAX, E, "syncpoints" },
  842. { NULL },
  843. };
  844. static const AVClass class = {
  845. .class_name = "nutenc",
  846. .item_name = av_default_item_name,
  847. .option = options,
  848. .version = LIBAVUTIL_VERSION_INT,
  849. };
  850. AVOutputFormat ff_nut_muxer = {
  851. .name = "nut",
  852. .long_name = NULL_IF_CONFIG_SMALL("NUT"),
  853. .mime_type = "video/x-nut",
  854. .extensions = "nut",
  855. .priv_data_size = sizeof(NUTContext),
  856. .audio_codec = CONFIG_LIBVORBIS ? AV_CODEC_ID_VORBIS :
  857. CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_MP2,
  858. .video_codec = AV_CODEC_ID_MPEG4,
  859. .write_header = nut_write_header,
  860. .write_packet = nut_write_packet,
  861. .write_trailer = nut_write_trailer,
  862. .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
  863. .codec_tag = ff_nut_codec_tags,
  864. .priv_class = &class,
  865. };