You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

959 lines
30KB

  1. /*
  2. * nut muxer
  3. * Copyright (c) 2004-2007 Michael Niedermayer
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/intreadwrite.h"
  22. #include "libavutil/mathematics.h"
  23. #include "libavutil/tree.h"
  24. #include "libavutil/dict.h"
  25. #include "libavcodec/mpegaudiodata.h"
  26. #include "nut.h"
  27. #include "internal.h"
  28. #include "avio_internal.h"
  29. static int find_expected_header(AVCodecContext *c, int size, int key_frame,
  30. uint8_t out[64])
  31. {
  32. int sample_rate = c->sample_rate;
  33. if (size > 4096)
  34. return 0;
  35. AV_WB24(out, 1);
  36. if (c->codec_id == AV_CODEC_ID_MPEG4) {
  37. if (key_frame) {
  38. return 3;
  39. } else {
  40. out[3] = 0xB6;
  41. return 4;
  42. }
  43. } else if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
  44. c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
  45. return 3;
  46. } else if (c->codec_id == AV_CODEC_ID_H264) {
  47. return 3;
  48. } else if (c->codec_id == AV_CODEC_ID_MP3 ||
  49. c->codec_id == AV_CODEC_ID_MP2) {
  50. int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
  51. int layer = c->codec_id == AV_CODEC_ID_MP3 ? 3 : 2;
  52. unsigned int header = 0xFFF00000;
  53. lsf = sample_rate < (24000 + 32000) / 2;
  54. mpeg25 = sample_rate < (12000 + 16000) / 2;
  55. sample_rate <<= lsf + mpeg25;
  56. if (sample_rate < (32000 + 44100) / 2)
  57. sample_rate_index = 2;
  58. else if (sample_rate < (44100 + 48000) / 2)
  59. sample_rate_index = 0;
  60. else
  61. sample_rate_index = 1;
  62. sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
  63. for (bitrate_index = 2; bitrate_index < 30; bitrate_index++) {
  64. frame_size =
  65. avpriv_mpa_bitrate_tab[lsf][layer - 1][bitrate_index >> 1];
  66. frame_size = (frame_size * 144000) / (sample_rate << lsf) +
  67. (bitrate_index & 1);
  68. if (frame_size == size)
  69. break;
  70. }
  71. header |= (!lsf) << 19;
  72. header |= (4 - layer) << 17;
  73. header |= 1 << 16; //no crc
  74. AV_WB32(out, header);
  75. if (size <= 0)
  76. return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
  77. if (bitrate_index == 30)
  78. return -1; //something is wrong ...
  79. header |= (bitrate_index >> 1) << 12;
  80. header |= sample_rate_index << 10;
  81. header |= (bitrate_index & 1) << 9;
  82. return 2; //FIXME actually put the needed ones in build_elision_headers()
  83. return 3; //we guess that the private bit is not set
  84. //FIXME the above assumptions should be checked, if these turn out false too often something should be done
  85. }
  86. return 0;
  87. }
  88. static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size,
  89. int frame_type)
  90. {
  91. NUTContext *nut = s->priv_data;
  92. uint8_t out[64];
  93. int i;
  94. int len = find_expected_header(c, size, frame_type, out);
  95. for (i = 1; i < nut->header_count; i++) {
  96. if (len == nut->header_len[i] && !memcmp(out, nut->header[i], len)) {
  97. return i;
  98. }
  99. }
  100. return 0;
  101. }
  102. static void build_elision_headers(AVFormatContext *s)
  103. {
  104. NUTContext *nut = s->priv_data;
  105. int i;
  106. //FIXME this is lame
  107. //FIXME write a 2pass mode to find the maximal headers
  108. static const uint8_t headers[][5] = {
  109. { 3, 0x00, 0x00, 0x01 },
  110. { 4, 0x00, 0x00, 0x01, 0xB6},
  111. { 2, 0xFF, 0xFA }, //mp3+crc
  112. { 2, 0xFF, 0xFB }, //mp3
  113. { 2, 0xFF, 0xFC }, //mp2+crc
  114. { 2, 0xFF, 0xFD }, //mp2
  115. };
  116. nut->header_count = 7;
  117. for (i = 1; i < nut->header_count; i++) {
  118. nut->header_len[i] = headers[i - 1][0];
  119. nut->header[i] = &headers[i - 1][1];
  120. }
  121. }
  122. static void build_frame_code(AVFormatContext *s)
  123. {
  124. NUTContext *nut = s->priv_data;
  125. int key_frame, index, pred, stream_id;
  126. int start = 1;
  127. int end = 254;
  128. int keyframe_0_esc = s->nb_streams > 2;
  129. int pred_table[10];
  130. FrameCode *ft;
  131. ft = &nut->frame_code[start];
  132. ft->flags = FLAG_CODED;
  133. ft->size_mul = 1;
  134. ft->pts_delta = 1;
  135. start++;
  136. if (keyframe_0_esc) {
  137. /* keyframe = 0 escape */
  138. FrameCode *ft = &nut->frame_code[start];
  139. ft->flags = FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
  140. ft->size_mul = 1;
  141. start++;
  142. }
  143. for (stream_id = 0; stream_id < s->nb_streams; stream_id++) {
  144. int start2 = start + (end - start) * stream_id / s->nb_streams;
  145. int end2 = start + (end - start) * (stream_id + 1) / s->nb_streams;
  146. AVCodecContext *codec = s->streams[stream_id]->codec;
  147. int is_audio = codec->codec_type == AVMEDIA_TYPE_AUDIO;
  148. int intra_only = /*codec->intra_only || */ is_audio;
  149. int pred_count;
  150. for (key_frame = 0; key_frame < 2; key_frame++) {
  151. if (!intra_only || !keyframe_0_esc || key_frame != 0) {
  152. FrameCode *ft = &nut->frame_code[start2];
  153. ft->flags = FLAG_KEY * key_frame;
  154. ft->flags |= FLAG_SIZE_MSB | FLAG_CODED_PTS;
  155. ft->stream_id = stream_id;
  156. ft->size_mul = 1;
  157. if (is_audio)
  158. ft->header_idx = find_header_idx(s, codec, -1, key_frame);
  159. start2++;
  160. }
  161. }
  162. key_frame = intra_only;
  163. if (is_audio) {
  164. int frame_bytes = codec->frame_size * (int64_t)codec->bit_rate /
  165. (8 * codec->sample_rate);
  166. int pts;
  167. for (pts = 0; pts < 2; pts++)
  168. for (pred = 0; pred < 2; pred++) {
  169. FrameCode *ft = &nut->frame_code[start2];
  170. ft->flags = FLAG_KEY * key_frame;
  171. ft->stream_id = stream_id;
  172. ft->size_mul = frame_bytes + 2;
  173. ft->size_lsb = frame_bytes + pred;
  174. ft->pts_delta = pts;
  175. ft->header_idx = find_header_idx(s, codec, frame_bytes + pred, key_frame);
  176. start2++;
  177. }
  178. } else {
  179. FrameCode *ft = &nut->frame_code[start2];
  180. ft->flags = FLAG_KEY | FLAG_SIZE_MSB;
  181. ft->stream_id = stream_id;
  182. ft->size_mul = 1;
  183. ft->pts_delta = 1;
  184. start2++;
  185. }
  186. if (codec->has_b_frames) {
  187. pred_count = 5;
  188. pred_table[0] = -2;
  189. pred_table[1] = -1;
  190. pred_table[2] = 1;
  191. pred_table[3] = 3;
  192. pred_table[4] = 4;
  193. } else if (codec->codec_id == AV_CODEC_ID_VORBIS) {
  194. pred_count = 3;
  195. pred_table[0] = 2;
  196. pred_table[1] = 9;
  197. pred_table[2] = 16;
  198. } else {
  199. pred_count = 1;
  200. pred_table[0] = 1;
  201. }
  202. for (pred = 0; pred < pred_count; pred++) {
  203. int start3 = start2 + (end2 - start2) * pred / pred_count;
  204. int end3 = start2 + (end2 - start2) * (pred + 1) / pred_count;
  205. for (index = start3; index < end3; index++) {
  206. FrameCode *ft = &nut->frame_code[index];
  207. ft->flags = FLAG_KEY * key_frame;
  208. ft->flags |= FLAG_SIZE_MSB;
  209. ft->stream_id = stream_id;
  210. //FIXME use single byte size and pred from last
  211. ft->size_mul = end3 - start3;
  212. ft->size_lsb = index - start3;
  213. ft->pts_delta = pred_table[pred];
  214. if (is_audio)
  215. ft->header_idx = find_header_idx(s, codec, -1, key_frame);
  216. }
  217. }
  218. }
  219. memmove(&nut->frame_code['N' + 1], &nut->frame_code['N'],
  220. sizeof(FrameCode) * (255 - 'N'));
  221. nut->frame_code[0].flags =
  222. nut->frame_code[255].flags =
  223. nut->frame_code['N'].flags = FLAG_INVALID;
  224. }
  225. static void put_tt(NUTContext *nut, AVRational *time_base, AVIOContext *bc,
  226. uint64_t val)
  227. {
  228. val *= nut->time_base_count;
  229. val += time_base - nut->time_base;
  230. ff_put_v(bc, val);
  231. }
  232. /**
  233. * Store a string as vb.
  234. */
  235. static void put_str(AVIOContext *bc, const char *string)
  236. {
  237. int len = strlen(string);
  238. ff_put_v(bc, len);
  239. avio_write(bc, string, len);
  240. }
  241. static void put_s(AVIOContext *bc, int64_t val)
  242. {
  243. ff_put_v(bc, 2 * FFABS(val) - (val > 0));
  244. }
  245. #ifdef TRACE
  246. static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, const char *file,
  247. const char *func, int line)
  248. {
  249. av_log(NULL, AV_LOG_DEBUG, "ff_put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  250. ff_put_v(bc, v);
  251. }
  252. static inline void put_s_trace(AVIOContext *bc, int64_t v, const char *file,
  253. const char *func, int line)
  254. {
  255. av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  256. put_s(bc, v);
  257. }
  258. #define ff_put_v(bc, v) ff_put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  259. #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  260. #endif
  261. //FIXME remove calculate_checksum
  262. static void put_packet(NUTContext *nut, AVIOContext *bc, AVIOContext *dyn_bc,
  263. int calculate_checksum, uint64_t startcode)
  264. {
  265. uint8_t *dyn_buf = NULL;
  266. int dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  267. int forw_ptr = dyn_size + 4 * calculate_checksum;
  268. if (forw_ptr > 4096)
  269. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  270. avio_wb64(bc, startcode);
  271. ff_put_v(bc, forw_ptr);
  272. if (forw_ptr > 4096)
  273. avio_wl32(bc, ffio_get_checksum(bc));
  274. if (calculate_checksum)
  275. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  276. avio_write(bc, dyn_buf, dyn_size);
  277. if (calculate_checksum)
  278. avio_wl32(bc, ffio_get_checksum(bc));
  279. av_free(dyn_buf);
  280. }
  281. static void write_mainheader(NUTContext *nut, AVIOContext *bc)
  282. {
  283. int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields,
  284. tmp_head_idx;
  285. int64_t tmp_match;
  286. ff_put_v(bc, 3); /* version */
  287. ff_put_v(bc, nut->avf->nb_streams);
  288. ff_put_v(bc, nut->max_distance);
  289. ff_put_v(bc, nut->time_base_count);
  290. for (i = 0; i < nut->time_base_count; i++) {
  291. ff_put_v(bc, nut->time_base[i].num);
  292. ff_put_v(bc, nut->time_base[i].den);
  293. }
  294. tmp_pts = 0;
  295. tmp_mul = 1;
  296. tmp_stream = 0;
  297. tmp_match = 1 - (1LL << 62);
  298. tmp_head_idx = 0;
  299. for (i = 0; i < 256; ) {
  300. tmp_fields = 0;
  301. tmp_size = 0;
  302. // tmp_res=0;
  303. if (tmp_pts != nut->frame_code[i].pts_delta)
  304. tmp_fields = 1;
  305. if (tmp_mul != nut->frame_code[i].size_mul)
  306. tmp_fields = 2;
  307. if (tmp_stream != nut->frame_code[i].stream_id)
  308. tmp_fields = 3;
  309. if (tmp_size != nut->frame_code[i].size_lsb)
  310. tmp_fields = 4;
  311. // if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
  312. if (tmp_head_idx != nut->frame_code[i].header_idx)
  313. tmp_fields = 8;
  314. tmp_pts = nut->frame_code[i].pts_delta;
  315. tmp_flags = nut->frame_code[i].flags;
  316. tmp_stream = nut->frame_code[i].stream_id;
  317. tmp_mul = nut->frame_code[i].size_mul;
  318. tmp_size = nut->frame_code[i].size_lsb;
  319. // tmp_res = nut->frame_code[i].res;
  320. tmp_head_idx = nut->frame_code[i].header_idx;
  321. for (j = 0; i < 256; j++, i++) {
  322. if (i == 'N') {
  323. j--;
  324. continue;
  325. }
  326. if (nut->frame_code[i].pts_delta != tmp_pts ||
  327. nut->frame_code[i].flags != tmp_flags ||
  328. nut->frame_code[i].stream_id != tmp_stream ||
  329. nut->frame_code[i].size_mul != tmp_mul ||
  330. nut->frame_code[i].size_lsb != tmp_size + j ||
  331. // nut->frame_code[i].res != tmp_res ||
  332. nut->frame_code[i].header_idx != tmp_head_idx)
  333. break;
  334. }
  335. if (j != tmp_mul - tmp_size)
  336. tmp_fields = 6;
  337. ff_put_v(bc, tmp_flags);
  338. ff_put_v(bc, tmp_fields);
  339. if (tmp_fields > 0)
  340. put_s(bc, tmp_pts);
  341. if (tmp_fields > 1)
  342. ff_put_v(bc, tmp_mul);
  343. if (tmp_fields > 2)
  344. ff_put_v(bc, tmp_stream);
  345. if (tmp_fields > 3)
  346. ff_put_v(bc, tmp_size);
  347. if (tmp_fields > 4)
  348. ff_put_v(bc, 0 /*tmp_res*/);
  349. if (tmp_fields > 5)
  350. ff_put_v(bc, j);
  351. if (tmp_fields > 6)
  352. ff_put_v(bc, tmp_match);
  353. if (tmp_fields > 7)
  354. ff_put_v(bc, tmp_head_idx);
  355. }
  356. ff_put_v(bc, nut->header_count - 1);
  357. for (i = 1; i < nut->header_count; i++) {
  358. ff_put_v(bc, nut->header_len[i]);
  359. avio_write(bc, nut->header[i], nut->header_len[i]);
  360. }
  361. }
  362. static int write_streamheader(AVFormatContext *avctx, AVIOContext *bc,
  363. AVStream *st, int i)
  364. {
  365. NUTContext *nut = avctx->priv_data;
  366. AVCodecContext *codec = st->codec;
  367. unsigned codec_tag = av_codec_get_tag(ff_nut_codec_tags, codec->codec_id);
  368. ff_put_v(bc, i);
  369. switch (codec->codec_type) {
  370. case AVMEDIA_TYPE_VIDEO:
  371. ff_put_v(bc, 0);
  372. break;
  373. case AVMEDIA_TYPE_AUDIO:
  374. ff_put_v(bc, 1);
  375. break;
  376. case AVMEDIA_TYPE_SUBTITLE:
  377. ff_put_v(bc, 2);
  378. break;
  379. default:
  380. ff_put_v(bc, 3);
  381. break;
  382. }
  383. ff_put_v(bc, 4);
  384. if (!codec_tag || codec->codec_id == AV_CODEC_ID_RAWVIDEO)
  385. codec_tag = codec->codec_tag;
  386. if (codec_tag) {
  387. avio_wl32(bc, codec_tag);
  388. } else {
  389. av_log(avctx, AV_LOG_ERROR, "No codec tag defined for stream %d\n", i);
  390. return AVERROR(EINVAL);
  391. }
  392. ff_put_v(bc, nut->stream[i].time_base - nut->time_base);
  393. ff_put_v(bc, nut->stream[i].msb_pts_shift);
  394. ff_put_v(bc, nut->stream[i].max_pts_distance);
  395. ff_put_v(bc, codec->has_b_frames);
  396. avio_w8(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
  397. ff_put_v(bc, codec->extradata_size);
  398. avio_write(bc, codec->extradata, codec->extradata_size);
  399. switch (codec->codec_type) {
  400. case AVMEDIA_TYPE_AUDIO:
  401. ff_put_v(bc, codec->sample_rate);
  402. ff_put_v(bc, 1);
  403. ff_put_v(bc, codec->channels);
  404. break;
  405. case AVMEDIA_TYPE_VIDEO:
  406. ff_put_v(bc, codec->width);
  407. ff_put_v(bc, codec->height);
  408. if (st->sample_aspect_ratio.num <= 0 ||
  409. st->sample_aspect_ratio.den <= 0) {
  410. ff_put_v(bc, 0);
  411. ff_put_v(bc, 0);
  412. } else {
  413. ff_put_v(bc, st->sample_aspect_ratio.num);
  414. ff_put_v(bc, st->sample_aspect_ratio.den);
  415. }
  416. ff_put_v(bc, 0); /* csp type -- unknown */
  417. break;
  418. default:
  419. break;
  420. }
  421. return 0;
  422. }
  423. static int add_info(AVIOContext *bc, const char *type, const char *value)
  424. {
  425. put_str(bc, type);
  426. put_s(bc, -1);
  427. put_str(bc, value);
  428. return 1;
  429. }
  430. static int write_globalinfo(NUTContext *nut, AVIOContext *bc)
  431. {
  432. AVFormatContext *s = nut->avf;
  433. AVDictionaryEntry *t = NULL;
  434. AVIOContext *dyn_bc;
  435. uint8_t *dyn_buf = NULL;
  436. int count = 0, dyn_size;
  437. int ret = avio_open_dyn_buf(&dyn_bc);
  438. if (ret < 0)
  439. return ret;
  440. while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  441. count += add_info(dyn_bc, t->key, t->value);
  442. ff_put_v(bc, 0); //stream_if_plus1
  443. ff_put_v(bc, 0); //chapter_id
  444. ff_put_v(bc, 0); //timestamp_start
  445. ff_put_v(bc, 0); //length
  446. ff_put_v(bc, count);
  447. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  448. avio_write(bc, dyn_buf, dyn_size);
  449. av_free(dyn_buf);
  450. return 0;
  451. }
  452. static int write_streaminfo(NUTContext *nut, AVIOContext *bc, int stream_id){
  453. AVFormatContext *s= nut->avf;
  454. AVStream* st = s->streams[stream_id];
  455. AVIOContext *dyn_bc;
  456. uint8_t *dyn_buf=NULL;
  457. int count=0, dyn_size, i;
  458. int ret = avio_open_dyn_buf(&dyn_bc);
  459. if(ret < 0)
  460. return ret;
  461. for (i=0; ff_nut_dispositions[i].flag; ++i) {
  462. if (st->disposition & ff_nut_dispositions[i].flag)
  463. count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
  464. }
  465. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  466. if (count) {
  467. ff_put_v(bc, stream_id + 1); //stream_id_plus1
  468. ff_put_v(bc, 0); //chapter_id
  469. ff_put_v(bc, 0); //timestamp_start
  470. ff_put_v(bc, 0); //length
  471. ff_put_v(bc, count);
  472. avio_write(bc, dyn_buf, dyn_size);
  473. }
  474. av_free(dyn_buf);
  475. return count;
  476. }
  477. static int write_chapter(NUTContext *nut, AVIOContext *bc, int id)
  478. {
  479. AVIOContext *dyn_bc;
  480. uint8_t *dyn_buf = NULL;
  481. AVDictionaryEntry *t = NULL;
  482. AVChapter *ch = nut->avf->chapters[id];
  483. int ret, dyn_size, count = 0;
  484. ret = avio_open_dyn_buf(&dyn_bc);
  485. if (ret < 0)
  486. return ret;
  487. ff_put_v(bc, 0); // stream_id_plus1
  488. put_s(bc, id + 1); // chapter_id
  489. put_tt(nut, nut->chapter[id].time_base, bc, ch->start); // chapter_start
  490. ff_put_v(bc, ch->end - ch->start); // chapter_len
  491. while ((t = av_dict_get(ch->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  492. count += add_info(dyn_bc, t->key, t->value);
  493. ff_put_v(bc, count);
  494. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  495. avio_write(bc, dyn_buf, dyn_size);
  496. av_freep(&dyn_buf);
  497. return 0;
  498. }
  499. static int write_headers(AVFormatContext *avctx, AVIOContext *bc)
  500. {
  501. NUTContext *nut = avctx->priv_data;
  502. AVIOContext *dyn_bc;
  503. int i, ret;
  504. ff_metadata_conv_ctx(avctx, ff_nut_metadata_conv, NULL);
  505. ret = avio_open_dyn_buf(&dyn_bc);
  506. if (ret < 0)
  507. return ret;
  508. write_mainheader(nut, dyn_bc);
  509. put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
  510. for (i = 0; i < nut->avf->nb_streams; i++) {
  511. ret = avio_open_dyn_buf(&dyn_bc);
  512. if (ret < 0)
  513. return ret;
  514. ret = write_streamheader(avctx, dyn_bc, nut->avf->streams[i], i);
  515. if (ret < 0)
  516. return ret;
  517. put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
  518. }
  519. ret = avio_open_dyn_buf(&dyn_bc);
  520. if (ret < 0)
  521. return ret;
  522. write_globalinfo(nut, dyn_bc);
  523. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  524. for (i = 0; i < nut->avf->nb_streams; i++) {
  525. ret = avio_open_dyn_buf(&dyn_bc);
  526. if (ret < 0)
  527. return ret;
  528. ret = write_streaminfo(nut, dyn_bc, i);
  529. if (ret < 0)
  530. return ret;
  531. if (ret > 0)
  532. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  533. else {
  534. uint8_t *buf;
  535. avio_close_dyn_buf(dyn_bc, &buf);
  536. av_free(buf);
  537. }
  538. }
  539. for (i = 0; i < nut->avf->nb_chapters; i++) {
  540. ret = avio_open_dyn_buf(&dyn_bc);
  541. if (ret < 0)
  542. return ret;
  543. ret = write_chapter(nut, dyn_bc, i);
  544. if (ret < 0) {
  545. uint8_t *buf;
  546. avio_close_dyn_buf(dyn_bc, &buf);
  547. av_freep(&buf);
  548. return ret;
  549. }
  550. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  551. }
  552. nut->last_syncpoint_pos = INT_MIN;
  553. nut->header_count++;
  554. return 0;
  555. }
  556. static int nut_write_header(AVFormatContext *s)
  557. {
  558. NUTContext *nut = s->priv_data;
  559. AVIOContext *bc = s->pb;
  560. int i, j, ret;
  561. nut->avf = s;
  562. nut->stream = av_mallocz(sizeof(StreamContext) * s->nb_streams);
  563. if (s->nb_chapters)
  564. nut->chapter = av_mallocz(sizeof(ChapterContext) * s->nb_chapters);
  565. nut->time_base = av_mallocz(sizeof(AVRational) * (s->nb_streams +
  566. s->nb_chapters));
  567. if (!nut->stream || (s->nb_chapters && !nut->chapter) || !nut->time_base) {
  568. av_freep(&nut->stream);
  569. av_freep(&nut->chapter);
  570. av_freep(&nut->time_base);
  571. return AVERROR(ENOMEM);
  572. }
  573. for (i = 0; i < s->nb_streams; i++) {
  574. AVStream *st = s->streams[i];
  575. int ssize;
  576. AVRational time_base;
  577. ff_parse_specific_params(st->codec, &time_base.den, &ssize,
  578. &time_base.num);
  579. avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
  580. for (j = 0; j < nut->time_base_count; j++)
  581. if (!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))) {
  582. break;
  583. }
  584. nut->time_base[j] = time_base;
  585. nut->stream[i].time_base = &nut->time_base[j];
  586. if (j == nut->time_base_count)
  587. nut->time_base_count++;
  588. if (INT64_C(1000) * time_base.num >= time_base.den)
  589. nut->stream[i].msb_pts_shift = 7;
  590. else
  591. nut->stream[i].msb_pts_shift = 14;
  592. nut->stream[i].max_pts_distance =
  593. FFMAX(time_base.den, time_base.num) / time_base.num;
  594. }
  595. for (i = 0; i < s->nb_chapters; i++) {
  596. AVChapter *ch = s->chapters[i];
  597. for (j = 0; j < nut->time_base_count; j++)
  598. if (!memcmp(&ch->time_base, &nut->time_base[j], sizeof(AVRational)))
  599. break;
  600. nut->time_base[j] = ch->time_base;
  601. nut->chapter[i].time_base = &nut->time_base[j];
  602. if (j == nut->time_base_count)
  603. nut->time_base_count++;
  604. }
  605. nut->max_distance = MAX_DISTANCE;
  606. build_elision_headers(s);
  607. build_frame_code(s);
  608. assert(nut->frame_code['N'].flags == FLAG_INVALID);
  609. avio_write(bc, ID_STRING, strlen(ID_STRING));
  610. avio_w8(bc, 0);
  611. if ((ret = write_headers(s, bc)) < 0)
  612. return ret;
  613. avio_flush(bc);
  614. //FIXME index
  615. return 0;
  616. }
  617. static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc,
  618. AVPacket *pkt)
  619. {
  620. int flags = 0;
  621. if (pkt->flags & AV_PKT_FLAG_KEY)
  622. flags |= FLAG_KEY;
  623. if (pkt->stream_index != fc->stream_id)
  624. flags |= FLAG_STREAM_ID;
  625. if (pkt->size / fc->size_mul)
  626. flags |= FLAG_SIZE_MSB;
  627. if (pkt->pts - nus->last_pts != fc->pts_delta)
  628. flags |= FLAG_CODED_PTS;
  629. if (pkt->size > 2 * nut->max_distance)
  630. flags |= FLAG_CHECKSUM;
  631. if (FFABS(pkt->pts - nus->last_pts) > nus->max_pts_distance)
  632. flags |= FLAG_CHECKSUM;
  633. if (pkt->size < nut->header_len[fc->header_idx] ||
  634. (pkt->size > 4096 && fc->header_idx) ||
  635. memcmp(pkt->data, nut->header[fc->header_idx],
  636. nut->header_len[fc->header_idx]))
  637. flags |= FLAG_HEADER_IDX;
  638. return flags | (fc->flags & FLAG_CODED);
  639. }
  640. static int find_best_header_idx(NUTContext *nut, AVPacket *pkt)
  641. {
  642. int i;
  643. int best_i = 0;
  644. int best_len = 0;
  645. if (pkt->size > 4096)
  646. return 0;
  647. for (i = 1; i < nut->header_count; i++)
  648. if (pkt->size >= nut->header_len[i]
  649. && nut->header_len[i] > best_len
  650. && !memcmp(pkt->data, nut->header[i], nut->header_len[i])) {
  651. best_i = i;
  652. best_len = nut->header_len[i];
  653. }
  654. return best_i;
  655. }
  656. static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
  657. {
  658. NUTContext *nut = s->priv_data;
  659. StreamContext *nus = &nut->stream[pkt->stream_index];
  660. AVIOContext *bc = s->pb, *dyn_bc;
  661. FrameCode *fc;
  662. int64_t coded_pts;
  663. int best_length, frame_code, flags, needed_flags, i, header_idx,
  664. best_header_idx;
  665. int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
  666. int store_sp = 0;
  667. int ret;
  668. if (pkt->pts < 0) {
  669. av_log(s, AV_LOG_ERROR,
  670. "Negative pts not supported stream %d, pts %"PRId64"\n",
  671. pkt->stream_index, pkt->pts);
  672. return AVERROR_INVALIDDATA;
  673. }
  674. if (1LL << (20 + 3 * nut->header_count) <= avio_tell(bc))
  675. write_headers(s, bc);
  676. if (key_frame && !(nus->last_flags & FLAG_KEY))
  677. store_sp = 1;
  678. if (pkt->size + 30 /*FIXME check*/ + avio_tell(bc) >=
  679. nut->last_syncpoint_pos + nut->max_distance)
  680. store_sp = 1;
  681. //FIXME: Ensure store_sp is 1 in the first place.
  682. if (store_sp) {
  683. Syncpoint *sp, dummy = { .pos = INT64_MAX };
  684. ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
  685. for (i = 0; i < s->nb_streams; i++) {
  686. AVStream *st = s->streams[i];
  687. int64_t dts_tb = av_rescale_rnd(pkt->dts,
  688. nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
  689. nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
  690. AV_ROUND_DOWN);
  691. int index = av_index_search_timestamp(st, dts_tb,
  692. AVSEEK_FLAG_BACKWARD);
  693. if (index >= 0)
  694. dummy.pos = FFMIN(dummy.pos, st->index_entries[index].pos);
  695. }
  696. if (dummy.pos == INT64_MAX)
  697. dummy.pos = 0;
  698. sp = av_tree_find(nut->syncpoints, &dummy, (void *)ff_nut_sp_pos_cmp,
  699. NULL);
  700. nut->last_syncpoint_pos = avio_tell(bc);
  701. ret = avio_open_dyn_buf(&dyn_bc);
  702. if (ret < 0)
  703. return ret;
  704. put_tt(nut, nus->time_base, dyn_bc, pkt->dts);
  705. ff_put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos) >> 4 : 0);
  706. put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
  707. ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0 /*unused*/, pkt->dts);
  708. }
  709. assert(nus->last_pts != AV_NOPTS_VALUE);
  710. coded_pts = pkt->pts & ((1 << nus->msb_pts_shift) - 1);
  711. if (ff_lsb2full(nus, coded_pts) != pkt->pts)
  712. coded_pts = pkt->pts + (1 << nus->msb_pts_shift);
  713. best_header_idx = find_best_header_idx(nut, pkt);
  714. best_length = INT_MAX;
  715. frame_code = -1;
  716. for (i = 0; i < 256; i++) {
  717. int length = 0;
  718. FrameCode *fc = &nut->frame_code[i];
  719. int flags = fc->flags;
  720. if (flags & FLAG_INVALID)
  721. continue;
  722. needed_flags = get_needed_flags(nut, nus, fc, pkt);
  723. if (flags & FLAG_CODED) {
  724. length++;
  725. flags = needed_flags;
  726. }
  727. if ((flags & needed_flags) != needed_flags)
  728. continue;
  729. if ((flags ^ needed_flags) & FLAG_KEY)
  730. continue;
  731. if (flags & FLAG_STREAM_ID)
  732. length += ff_get_v_length(pkt->stream_index);
  733. if (pkt->size % fc->size_mul != fc->size_lsb)
  734. continue;
  735. if (flags & FLAG_SIZE_MSB)
  736. length += ff_get_v_length(pkt->size / fc->size_mul);
  737. if (flags & FLAG_CHECKSUM)
  738. length += 4;
  739. if (flags & FLAG_CODED_PTS)
  740. length += ff_get_v_length(coded_pts);
  741. if ((flags & FLAG_CODED)
  742. && nut->header_len[best_header_idx] >
  743. nut->header_len[fc->header_idx] + 1) {
  744. flags |= FLAG_HEADER_IDX;
  745. }
  746. if (flags & FLAG_HEADER_IDX) {
  747. length += 1 - nut->header_len[best_header_idx];
  748. } else {
  749. length -= nut->header_len[fc->header_idx];
  750. }
  751. length *= 4;
  752. length += !(flags & FLAG_CODED_PTS);
  753. length += !(flags & FLAG_CHECKSUM);
  754. if (length < best_length) {
  755. best_length = length;
  756. frame_code = i;
  757. }
  758. }
  759. assert(frame_code != -1);
  760. fc = &nut->frame_code[frame_code];
  761. flags = fc->flags;
  762. needed_flags = get_needed_flags(nut, nus, fc, pkt);
  763. header_idx = fc->header_idx;
  764. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  765. avio_w8(bc, frame_code);
  766. if (flags & FLAG_CODED) {
  767. ff_put_v(bc, (flags ^ needed_flags) & ~(FLAG_CODED));
  768. flags = needed_flags;
  769. }
  770. if (flags & FLAG_STREAM_ID)
  771. ff_put_v(bc, pkt->stream_index);
  772. if (flags & FLAG_CODED_PTS)
  773. ff_put_v(bc, coded_pts);
  774. if (flags & FLAG_SIZE_MSB)
  775. ff_put_v(bc, pkt->size / fc->size_mul);
  776. if (flags & FLAG_HEADER_IDX)
  777. ff_put_v(bc, header_idx = best_header_idx);
  778. if (flags & FLAG_CHECKSUM)
  779. avio_wl32(bc, ffio_get_checksum(bc));
  780. else
  781. ffio_get_checksum(bc);
  782. avio_write(bc, pkt->data + nut->header_len[header_idx],
  783. pkt->size - nut->header_len[header_idx]);
  784. nus->last_flags = flags;
  785. nus->last_pts = pkt->pts;
  786. //FIXME just store one per syncpoint
  787. if (flags & FLAG_KEY)
  788. av_add_index_entry(
  789. s->streams[pkt->stream_index],
  790. nut->last_syncpoint_pos,
  791. pkt->pts,
  792. 0,
  793. 0,
  794. AVINDEX_KEYFRAME);
  795. return 0;
  796. }
  797. static int nut_write_trailer(AVFormatContext *s)
  798. {
  799. NUTContext *nut = s->priv_data;
  800. AVIOContext *bc = s->pb;
  801. while (nut->header_count < 3)
  802. write_headers(s, bc);
  803. ff_nut_free_sp(nut);
  804. av_freep(&nut->stream);
  805. av_freep(&nut->chapter);
  806. av_freep(&nut->time_base);
  807. return 0;
  808. }
  809. AVOutputFormat ff_nut_muxer = {
  810. .name = "nut",
  811. .long_name = NULL_IF_CONFIG_SMALL("NUT"),
  812. .mime_type = "video/x-nut",
  813. .extensions = "nut",
  814. .priv_data_size = sizeof(NUTContext),
  815. .audio_codec = CONFIG_LIBVORBIS ? AV_CODEC_ID_VORBIS :
  816. CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_MP2,
  817. .video_codec = AV_CODEC_ID_MPEG4,
  818. .write_header = nut_write_header,
  819. .write_packet = nut_write_packet,
  820. .write_trailer = nut_write_trailer,
  821. .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
  822. .codec_tag = ff_nut_codec_tags,
  823. };