You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

986 lines
32KB

  1. /*
  2. * nut muxer
  3. * Copyright (c) 2004-2007 Michael Niedermayer
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/intreadwrite.h"
  22. #include "libavutil/mathematics.h"
  23. #include "libavutil/tree.h"
  24. #include "libavutil/dict.h"
  25. #include "libavutil/avassert.h"
  26. #include "libavcodec/mpegaudiodata.h"
  27. #include "nut.h"
  28. #include "internal.h"
  29. #include "avio_internal.h"
  30. static int find_expected_header(AVCodecContext *c, int size, int key_frame, uint8_t out[64]){
  31. int sample_rate= c->sample_rate;
  32. if(size>4096)
  33. return 0;
  34. AV_WB24(out, 1);
  35. if(c->codec_id == AV_CODEC_ID_MPEG4){
  36. if(key_frame){
  37. return 3;
  38. }else{
  39. out[3]= 0xB6;
  40. return 4;
  41. }
  42. }else if(c->codec_id == AV_CODEC_ID_MPEG1VIDEO || c->codec_id == AV_CODEC_ID_MPEG2VIDEO){
  43. return 3;
  44. }else if(c->codec_id == AV_CODEC_ID_H264){
  45. return 3;
  46. }else if(c->codec_id == AV_CODEC_ID_MP3 || c->codec_id == AV_CODEC_ID_MP2){
  47. int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
  48. int layer= c->codec_id == AV_CODEC_ID_MP3 ? 3 : 2;
  49. unsigned int header= 0xFFF00000;
  50. lsf = sample_rate < (24000+32000)/2;
  51. mpeg25 = sample_rate < (12000+16000)/2;
  52. sample_rate <<= lsf + mpeg25;
  53. if (sample_rate < (32000 + 44100)/2) sample_rate_index=2;
  54. else if(sample_rate < (44100 + 48000)/2) sample_rate_index=0;
  55. else sample_rate_index=1;
  56. sample_rate= avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
  57. for(bitrate_index=2; bitrate_index<30; bitrate_index++){
  58. frame_size = avpriv_mpa_bitrate_tab[lsf][layer-1][bitrate_index>>1];
  59. frame_size = (frame_size * 144000) / (sample_rate << lsf) + (bitrate_index&1);
  60. if(frame_size == size)
  61. break;
  62. }
  63. header |= (!lsf)<<19;
  64. header |= (4-layer)<<17;
  65. header |= 1<<16; //no crc
  66. AV_WB32(out, header);
  67. if(size <= 0)
  68. return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
  69. if(bitrate_index == 30)
  70. return -1; //something is wrong ...
  71. header |= (bitrate_index>>1)<<12;
  72. header |= sample_rate_index<<10;
  73. header |= (bitrate_index&1)<<9;
  74. return 2; //FIXME actually put the needed ones in build_elision_headers()
  75. return 3; //we guess that the private bit is not set
  76. //FIXME the above assumptions should be checked, if these turn out false too often something should be done
  77. }
  78. return 0;
  79. }
  80. static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size, int frame_type){
  81. NUTContext *nut = s->priv_data;
  82. uint8_t out[64];
  83. int i;
  84. int len= find_expected_header(c, size, frame_type, out);
  85. for(i=1; i<nut->header_count; i++){
  86. if( len == nut->header_len[i]
  87. && !memcmp(out, nut->header[i], len)){
  88. return i;
  89. }
  90. }
  91. return 0;
  92. }
  93. static void build_elision_headers(AVFormatContext *s){
  94. NUTContext *nut = s->priv_data;
  95. int i;
  96. //FIXME this is lame
  97. //FIXME write a 2pass mode to find the maximal headers
  98. static const uint8_t headers[][5]={
  99. {3, 0x00, 0x00, 0x01},
  100. {4, 0x00, 0x00, 0x01, 0xB6},
  101. {2, 0xFF, 0xFA}, //mp3+crc
  102. {2, 0xFF, 0xFB}, //mp3
  103. {2, 0xFF, 0xFC}, //mp2+crc
  104. {2, 0xFF, 0xFD}, //mp2
  105. };
  106. nut->header_count= 7;
  107. for(i=1; i<nut->header_count; i++){
  108. nut->header_len[i]= headers[i-1][0];
  109. nut->header [i]= &headers[i-1][1];
  110. }
  111. }
  112. static void build_frame_code(AVFormatContext *s){
  113. NUTContext *nut = s->priv_data;
  114. int key_frame, index, pred, stream_id;
  115. int start=1;
  116. int end= 254;
  117. int keyframe_0_esc= s->nb_streams > 2;
  118. int pred_table[10];
  119. FrameCode *ft;
  120. ft= &nut->frame_code[start];
  121. ft->flags= FLAG_CODED;
  122. ft->size_mul=1;
  123. ft->pts_delta=1;
  124. start++;
  125. if(keyframe_0_esc){
  126. /* keyframe = 0 escape */
  127. FrameCode *ft= &nut->frame_code[start];
  128. ft->flags= FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
  129. ft->size_mul=1;
  130. start++;
  131. }
  132. for(stream_id= 0; stream_id<s->nb_streams; stream_id++){
  133. int start2= start + (end-start)*stream_id / s->nb_streams;
  134. int end2 = start + (end-start)*(stream_id+1) / s->nb_streams;
  135. AVCodecContext *codec = s->streams[stream_id]->codec;
  136. int is_audio= codec->codec_type == AVMEDIA_TYPE_AUDIO;
  137. int intra_only= /*codec->intra_only || */is_audio;
  138. int pred_count;
  139. int frame_size = 0;
  140. if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
  141. frame_size = av_get_audio_frame_duration(codec, 0);
  142. if (codec->codec_id == AV_CODEC_ID_VORBIS && !frame_size)
  143. frame_size = 64;
  144. } else {
  145. AVRational f = av_div_q(codec->time_base, *nut->stream[stream_id].time_base);
  146. if(f.den == 1 && f.num>0)
  147. frame_size = f.num;
  148. }
  149. if(!frame_size)
  150. frame_size = 1;
  151. for(key_frame=0; key_frame<2; key_frame++){
  152. if(intra_only && keyframe_0_esc && key_frame==0)
  153. continue;
  154. {
  155. FrameCode *ft= &nut->frame_code[start2];
  156. ft->flags= FLAG_KEY*key_frame;
  157. ft->flags|= FLAG_SIZE_MSB | FLAG_CODED_PTS;
  158. ft->stream_id= stream_id;
  159. ft->size_mul=1;
  160. if(is_audio)
  161. ft->header_idx= find_header_idx(s, codec, -1, key_frame);
  162. start2++;
  163. }
  164. }
  165. key_frame= intra_only;
  166. #if 1
  167. if(is_audio){
  168. int frame_bytes= codec->frame_size*(int64_t)codec->bit_rate / (8*codec->sample_rate);
  169. int pts;
  170. for(pts=0; pts<2; pts++){
  171. for(pred=0; pred<2; pred++){
  172. FrameCode *ft= &nut->frame_code[start2];
  173. ft->flags= FLAG_KEY*key_frame;
  174. ft->stream_id= stream_id;
  175. ft->size_mul=frame_bytes + 2;
  176. ft->size_lsb=frame_bytes + pred;
  177. ft->pts_delta=pts * frame_size;
  178. ft->header_idx= find_header_idx(s, codec, frame_bytes + pred, key_frame);
  179. start2++;
  180. }
  181. }
  182. }else{
  183. FrameCode *ft= &nut->frame_code[start2];
  184. ft->flags= FLAG_KEY | FLAG_SIZE_MSB;
  185. ft->stream_id= stream_id;
  186. ft->size_mul=1;
  187. ft->pts_delta=frame_size;
  188. start2++;
  189. }
  190. #endif
  191. if(codec->has_b_frames){
  192. pred_count=5;
  193. pred_table[0]=-2;
  194. pred_table[1]=-1;
  195. pred_table[2]=1;
  196. pred_table[3]=3;
  197. pred_table[4]=4;
  198. }else if(codec->codec_id == AV_CODEC_ID_VORBIS){
  199. pred_count=3;
  200. pred_table[0]=2;
  201. pred_table[1]=9;
  202. pred_table[2]=16;
  203. }else{
  204. pred_count=1;
  205. pred_table[0]=1;
  206. }
  207. for(pred=0; pred<pred_count; pred++){
  208. int start3= start2 + (end2-start2)*pred / pred_count;
  209. int end3 = start2 + (end2-start2)*(pred+1) / pred_count;
  210. pred_table[pred] *= frame_size;
  211. for(index=start3; index<end3; index++){
  212. FrameCode *ft= &nut->frame_code[index];
  213. ft->flags= FLAG_KEY*key_frame;
  214. ft->flags|= FLAG_SIZE_MSB;
  215. ft->stream_id= stream_id;
  216. //FIXME use single byte size and pred from last
  217. ft->size_mul= end3-start3;
  218. ft->size_lsb= index - start3;
  219. ft->pts_delta= pred_table[pred];
  220. if(is_audio)
  221. ft->header_idx= find_header_idx(s, codec, -1, key_frame);
  222. }
  223. }
  224. }
  225. memmove(&nut->frame_code['N'+1], &nut->frame_code['N'], sizeof(FrameCode)*(255-'N'));
  226. nut->frame_code[ 0].flags=
  227. nut->frame_code[255].flags=
  228. nut->frame_code['N'].flags= FLAG_INVALID;
  229. }
  230. static void put_tt(NUTContext *nut, AVRational *time_base, AVIOContext *bc, uint64_t val){
  231. val *= nut->time_base_count;
  232. val += time_base - nut->time_base;
  233. ff_put_v(bc, val);
  234. }
  235. /**
  236. * Store a string as vb.
  237. */
  238. static void put_str(AVIOContext *bc, const char *string){
  239. int len= strlen(string);
  240. ff_put_v(bc, len);
  241. avio_write(bc, string, len);
  242. }
  243. static void put_s(AVIOContext *bc, int64_t val){
  244. ff_put_v(bc, 2*FFABS(val) - (val>0));
  245. }
  246. #ifdef TRACE
  247. static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, const char *file,
  248. const char *func, int line)
  249. {
  250. av_log(NULL, AV_LOG_DEBUG, "ff_put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  251. ff_put_v(bc, v);
  252. }
  253. static inline void put_s_trace(AVIOContext *bc, int64_t v, const char *file,
  254. const char *func, int line)
  255. {
  256. av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  257. put_s(bc, v);
  258. }
  259. #define ff_put_v(bc, v) ff_put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  260. #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  261. #endif
  262. //FIXME remove calculate_checksum
  263. static void put_packet(NUTContext *nut, AVIOContext *bc, AVIOContext *dyn_bc, int calculate_checksum, uint64_t startcode){
  264. uint8_t *dyn_buf=NULL;
  265. int dyn_size= avio_close_dyn_buf(dyn_bc, &dyn_buf);
  266. int forw_ptr= dyn_size + 4*calculate_checksum;
  267. if(forw_ptr > 4096)
  268. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  269. avio_wb64(bc, startcode);
  270. ff_put_v(bc, forw_ptr);
  271. if(forw_ptr > 4096)
  272. avio_wl32(bc, ffio_get_checksum(bc));
  273. if(calculate_checksum)
  274. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  275. avio_write(bc, dyn_buf, dyn_size);
  276. if(calculate_checksum)
  277. avio_wl32(bc, ffio_get_checksum(bc));
  278. av_free(dyn_buf);
  279. }
  280. static void write_mainheader(NUTContext *nut, AVIOContext *bc){
  281. int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields, tmp_head_idx;
  282. int64_t tmp_match;
  283. ff_put_v(bc, 3); /* version */
  284. ff_put_v(bc, nut->avf->nb_streams);
  285. ff_put_v(bc, nut->max_distance);
  286. ff_put_v(bc, nut->time_base_count);
  287. for(i=0; i<nut->time_base_count; i++){
  288. ff_put_v(bc, nut->time_base[i].num);
  289. ff_put_v(bc, nut->time_base[i].den);
  290. }
  291. tmp_pts=0;
  292. tmp_mul=1;
  293. tmp_stream=0;
  294. tmp_match= 1-(1LL<<62);
  295. tmp_head_idx= 0;
  296. for(i=0; i<256;){
  297. tmp_fields=0;
  298. tmp_size=0;
  299. // tmp_res=0;
  300. if(tmp_pts != nut->frame_code[i].pts_delta) tmp_fields=1;
  301. if(tmp_mul != nut->frame_code[i].size_mul ) tmp_fields=2;
  302. if(tmp_stream != nut->frame_code[i].stream_id) tmp_fields=3;
  303. if(tmp_size != nut->frame_code[i].size_lsb ) tmp_fields=4;
  304. // if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
  305. if(tmp_head_idx!=nut->frame_code[i].header_idx)tmp_fields=8;
  306. tmp_pts = nut->frame_code[i].pts_delta;
  307. tmp_flags = nut->frame_code[i].flags;
  308. tmp_stream= nut->frame_code[i].stream_id;
  309. tmp_mul = nut->frame_code[i].size_mul;
  310. tmp_size = nut->frame_code[i].size_lsb;
  311. // tmp_res = nut->frame_code[i].res;
  312. tmp_head_idx= nut->frame_code[i].header_idx;
  313. for(j=0; i<256; j++,i++){
  314. if(i == 'N'){
  315. j--;
  316. continue;
  317. }
  318. if(nut->frame_code[i].pts_delta != tmp_pts ) break;
  319. if(nut->frame_code[i].flags != tmp_flags ) break;
  320. if(nut->frame_code[i].stream_id != tmp_stream) break;
  321. if(nut->frame_code[i].size_mul != tmp_mul ) break;
  322. if(nut->frame_code[i].size_lsb != tmp_size+j) break;
  323. // if(nut->frame_code[i].res != tmp_res ) break;
  324. if(nut->frame_code[i].header_idx!= tmp_head_idx) break;
  325. }
  326. if(j != tmp_mul - tmp_size) tmp_fields=6;
  327. ff_put_v(bc, tmp_flags);
  328. ff_put_v(bc, tmp_fields);
  329. if(tmp_fields>0) put_s(bc, tmp_pts);
  330. if(tmp_fields>1) ff_put_v(bc, tmp_mul);
  331. if(tmp_fields>2) ff_put_v(bc, tmp_stream);
  332. if(tmp_fields>3) ff_put_v(bc, tmp_size);
  333. if(tmp_fields>4) ff_put_v(bc, 0 /*tmp_res*/);
  334. if(tmp_fields>5) ff_put_v(bc, j);
  335. if(tmp_fields>6) ff_put_v(bc, tmp_match);
  336. if(tmp_fields>7) ff_put_v(bc, tmp_head_idx);
  337. }
  338. ff_put_v(bc, nut->header_count-1);
  339. for(i=1; i<nut->header_count; i++){
  340. ff_put_v(bc, nut->header_len[i]);
  341. avio_write(bc, nut->header[i], nut->header_len[i]);
  342. }
  343. }
  344. static int write_streamheader(AVFormatContext *avctx, AVIOContext *bc, AVStream *st, int i){
  345. NUTContext *nut = avctx->priv_data;
  346. AVCodecContext *codec = st->codec;
  347. ff_put_v(bc, i);
  348. switch(codec->codec_type){
  349. case AVMEDIA_TYPE_VIDEO: ff_put_v(bc, 0); break;
  350. case AVMEDIA_TYPE_AUDIO: ff_put_v(bc, 1); break;
  351. case AVMEDIA_TYPE_SUBTITLE: ff_put_v(bc, 2); break;
  352. default : ff_put_v(bc, 3); break;
  353. }
  354. ff_put_v(bc, 4);
  355. if (codec->codec_tag){
  356. avio_wl32(bc, codec->codec_tag);
  357. } else {
  358. av_log(avctx, AV_LOG_ERROR, "No codec tag defined for stream %d\n", i);
  359. return AVERROR(EINVAL);
  360. }
  361. ff_put_v(bc, nut->stream[i].time_base - nut->time_base);
  362. ff_put_v(bc, nut->stream[i].msb_pts_shift);
  363. ff_put_v(bc, nut->stream[i].max_pts_distance);
  364. ff_put_v(bc, codec->has_b_frames);
  365. avio_w8(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
  366. ff_put_v(bc, codec->extradata_size);
  367. avio_write(bc, codec->extradata, codec->extradata_size);
  368. switch(codec->codec_type){
  369. case AVMEDIA_TYPE_AUDIO:
  370. ff_put_v(bc, codec->sample_rate);
  371. ff_put_v(bc, 1);
  372. ff_put_v(bc, codec->channels);
  373. break;
  374. case AVMEDIA_TYPE_VIDEO:
  375. ff_put_v(bc, codec->width);
  376. ff_put_v(bc, codec->height);
  377. if(st->sample_aspect_ratio.num<=0 || st->sample_aspect_ratio.den<=0){
  378. ff_put_v(bc, 0);
  379. ff_put_v(bc, 0);
  380. }else{
  381. ff_put_v(bc, st->sample_aspect_ratio.num);
  382. ff_put_v(bc, st->sample_aspect_ratio.den);
  383. }
  384. ff_put_v(bc, 0); /* csp type -- unknown */
  385. break;
  386. default:
  387. break;
  388. }
  389. return 0;
  390. }
  391. static int add_info(AVIOContext *bc, const char *type, const char *value){
  392. put_str(bc, type);
  393. put_s(bc, -1);
  394. put_str(bc, value);
  395. return 1;
  396. }
  397. static int write_globalinfo(NUTContext *nut, AVIOContext *bc){
  398. AVFormatContext *s= nut->avf;
  399. AVDictionaryEntry *t = NULL;
  400. AVIOContext *dyn_bc;
  401. uint8_t *dyn_buf=NULL;
  402. int count=0, dyn_size;
  403. int ret = avio_open_dyn_buf(&dyn_bc);
  404. if(ret < 0)
  405. return ret;
  406. while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  407. count += add_info(dyn_bc, t->key, t->value);
  408. ff_put_v(bc, 0); //stream_if_plus1
  409. ff_put_v(bc, 0); //chapter_id
  410. ff_put_v(bc, 0); //timestamp_start
  411. ff_put_v(bc, 0); //length
  412. ff_put_v(bc, count);
  413. dyn_size= avio_close_dyn_buf(dyn_bc, &dyn_buf);
  414. avio_write(bc, dyn_buf, dyn_size);
  415. av_free(dyn_buf);
  416. return 0;
  417. }
  418. static int write_streaminfo(NUTContext *nut, AVIOContext *bc, int stream_id){
  419. AVFormatContext *s= nut->avf;
  420. AVStream* st = s->streams[stream_id];
  421. AVDictionaryEntry *t = NULL;
  422. AVIOContext *dyn_bc;
  423. uint8_t *dyn_buf=NULL;
  424. int count=0, dyn_size, i;
  425. int ret = avio_open_dyn_buf(&dyn_bc);
  426. if(ret < 0)
  427. return ret;
  428. while ((t = av_dict_get(st->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  429. count += add_info(dyn_bc, t->key, t->value);
  430. for (i=0; ff_nut_dispositions[i].flag; ++i) {
  431. if (st->disposition & ff_nut_dispositions[i].flag)
  432. count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
  433. }
  434. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  435. uint8_t buf[256];
  436. snprintf(buf, sizeof(buf), "%d/%d", st->codec->time_base.den, st->codec->time_base.num);
  437. count += add_info(dyn_bc, "r_frame_rate", buf);
  438. }
  439. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  440. if (count) {
  441. ff_put_v(bc, stream_id + 1); //stream_id_plus1
  442. ff_put_v(bc, 0); //chapter_id
  443. ff_put_v(bc, 0); //timestamp_start
  444. ff_put_v(bc, 0); //length
  445. ff_put_v(bc, count);
  446. avio_write(bc, dyn_buf, dyn_size);
  447. }
  448. av_free(dyn_buf);
  449. return count;
  450. }
  451. static int write_chapter(NUTContext *nut, AVIOContext *bc, int id)
  452. {
  453. AVIOContext *dyn_bc;
  454. uint8_t *dyn_buf = NULL;
  455. AVDictionaryEntry *t = NULL;
  456. AVChapter *ch = nut->avf->chapters[id];
  457. int ret, dyn_size, count = 0;
  458. ret = avio_open_dyn_buf(&dyn_bc);
  459. if (ret < 0)
  460. return ret;
  461. ff_put_v(bc, 0); // stream_id_plus1
  462. put_s(bc, id + 1); // chapter_id
  463. put_tt(nut, nut->chapter[id].time_base, bc, ch->start); // chapter_start
  464. ff_put_v(bc, ch->end - ch->start); // chapter_len
  465. while ((t = av_dict_get(ch->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  466. count += add_info(dyn_bc, t->key, t->value);
  467. ff_put_v(bc, count);
  468. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  469. avio_write(bc, dyn_buf, dyn_size);
  470. av_freep(&dyn_buf);
  471. return 0;
  472. }
  473. static int write_index(NUTContext *nut, AVIOContext *bc){
  474. int i;
  475. Syncpoint dummy= { .pos= 0 };
  476. Syncpoint *next_node[2] = { NULL };
  477. int64_t startpos = avio_tell(bc);
  478. int64_t payload_size;
  479. put_tt(nut, nut->max_pts_tb, bc, nut->max_pts);
  480. ff_put_v(bc, nut->sp_count);
  481. for(i=0; i<nut->sp_count; i++){
  482. av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp, (void**)next_node);
  483. ff_put_v(bc, (next_node[1]->pos >> 4) - (dummy.pos>>4));
  484. dummy.pos = next_node[1]->pos;
  485. }
  486. for(i=0; i<nut->avf->nb_streams; i++){
  487. StreamContext *nus= &nut->stream[i];
  488. int64_t last_pts= -1;
  489. int j, k;
  490. for(j=0; j<nut->sp_count; j++){
  491. int flag = (nus->keyframe_pts[j] != AV_NOPTS_VALUE) ^ (j+1 == nut->sp_count);
  492. int n = 0;
  493. for(; j<nut->sp_count && (nus->keyframe_pts[j] != AV_NOPTS_VALUE) == flag; j++)
  494. n++;
  495. ff_put_v(bc, 1 + 2*flag + 4*n);
  496. for(k= j - n; k<=j && k<nut->sp_count; k++) {
  497. if(nus->keyframe_pts[k] == AV_NOPTS_VALUE)
  498. continue;
  499. av_assert0(nus->keyframe_pts[k] > last_pts);
  500. ff_put_v(bc, nus->keyframe_pts[k] - last_pts);
  501. last_pts = nus->keyframe_pts[k];
  502. }
  503. }
  504. }
  505. payload_size = avio_tell(bc) - startpos + 8 + 4;
  506. avio_wb64(bc, 8 + payload_size + av_log2(payload_size) / 7 + 1 + 4*(payload_size > 4096));
  507. return 0;
  508. }
  509. static int write_headers(AVFormatContext *avctx, AVIOContext *bc){
  510. NUTContext *nut = avctx->priv_data;
  511. AVIOContext *dyn_bc;
  512. int i, ret;
  513. ff_metadata_conv_ctx(avctx, ff_nut_metadata_conv, NULL);
  514. ret = avio_open_dyn_buf(&dyn_bc);
  515. if(ret < 0)
  516. return ret;
  517. write_mainheader(nut, dyn_bc);
  518. put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
  519. for (i=0; i < nut->avf->nb_streams; i++){
  520. ret = avio_open_dyn_buf(&dyn_bc);
  521. if(ret < 0)
  522. return ret;
  523. if ((ret = write_streamheader(avctx, dyn_bc, nut->avf->streams[i], i)) < 0)
  524. return ret;
  525. put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
  526. }
  527. ret = avio_open_dyn_buf(&dyn_bc);
  528. if(ret < 0)
  529. return ret;
  530. write_globalinfo(nut, dyn_bc);
  531. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  532. for (i = 0; i < nut->avf->nb_streams; i++) {
  533. ret = avio_open_dyn_buf(&dyn_bc);
  534. if(ret < 0)
  535. return ret;
  536. ret = write_streaminfo(nut, dyn_bc, i);
  537. if (ret < 0)
  538. return ret;
  539. if (ret > 0)
  540. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  541. else {
  542. uint8_t* buf;
  543. avio_close_dyn_buf(dyn_bc, &buf);
  544. av_free(buf);
  545. }
  546. }
  547. for (i = 0; i < nut->avf->nb_chapters; i++) {
  548. ret = avio_open_dyn_buf(&dyn_bc);
  549. if (ret < 0)
  550. return ret;
  551. ret = write_chapter(nut, dyn_bc, i);
  552. if (ret < 0) {
  553. uint8_t *buf;
  554. avio_close_dyn_buf(dyn_bc, &buf);
  555. av_freep(&buf);
  556. return ret;
  557. }
  558. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  559. }
  560. nut->last_syncpoint_pos= INT_MIN;
  561. nut->header_count++;
  562. return 0;
  563. }
  564. static int nut_write_header(AVFormatContext *s){
  565. NUTContext *nut = s->priv_data;
  566. AVIOContext *bc = s->pb;
  567. int i, j, ret;
  568. nut->avf= s;
  569. nut->stream = av_mallocz(sizeof(StreamContext)*s->nb_streams);
  570. nut->chapter = av_mallocz(sizeof(ChapterContext)*s->nb_chapters);
  571. nut->time_base= av_mallocz(sizeof(AVRational )*(s->nb_streams +
  572. s->nb_chapters));
  573. if (!nut->stream || !nut->chapter || !nut->time_base) {
  574. av_freep(&nut->stream);
  575. av_freep(&nut->chapter);
  576. av_freep(&nut->time_base);
  577. return AVERROR(ENOMEM);
  578. }
  579. for(i=0; i<s->nb_streams; i++){
  580. AVStream *st= s->streams[i];
  581. int ssize;
  582. AVRational time_base;
  583. ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
  584. if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->codec->sample_rate) {
  585. time_base = (AVRational){1, st->codec->sample_rate};
  586. } else {
  587. time_base = ff_choose_timebase(s, st, 48000);
  588. }
  589. avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
  590. for(j=0; j<nut->time_base_count; j++){
  591. if(!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))){
  592. break;
  593. }
  594. }
  595. nut->time_base[j]= time_base;
  596. nut->stream[i].time_base= &nut->time_base[j];
  597. if(j==nut->time_base_count)
  598. nut->time_base_count++;
  599. if(INT64_C(1000) * time_base.num >= time_base.den)
  600. nut->stream[i].msb_pts_shift = 7;
  601. else
  602. nut->stream[i].msb_pts_shift = 14;
  603. nut->stream[i].max_pts_distance= FFMAX(time_base.den, time_base.num) / time_base.num;
  604. }
  605. for (i = 0; i < s->nb_chapters; i++) {
  606. AVChapter *ch = s->chapters[i];
  607. for (j = 0; j < nut->time_base_count; j++) {
  608. if (!memcmp(&ch->time_base, &nut->time_base[j], sizeof(AVRational)))
  609. break;
  610. }
  611. nut->time_base[j] = ch->time_base;
  612. nut->chapter[i].time_base = &nut->time_base[j];
  613. if(j == nut->time_base_count)
  614. nut->time_base_count++;
  615. }
  616. nut->max_distance = MAX_DISTANCE;
  617. build_elision_headers(s);
  618. build_frame_code(s);
  619. av_assert0(nut->frame_code['N'].flags == FLAG_INVALID);
  620. avio_write(bc, ID_STRING, strlen(ID_STRING));
  621. avio_w8(bc, 0);
  622. if ((ret = write_headers(s, bc)) < 0)
  623. return ret;
  624. if (s->avoid_negative_ts < 0)
  625. s->avoid_negative_ts = 1;
  626. avio_flush(bc);
  627. return 0;
  628. }
  629. static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt){
  630. int flags= 0;
  631. if(pkt->flags & AV_PKT_FLAG_KEY ) flags |= FLAG_KEY;
  632. if(pkt->stream_index != fc->stream_id ) flags |= FLAG_STREAM_ID;
  633. if(pkt->size / fc->size_mul ) flags |= FLAG_SIZE_MSB;
  634. if(pkt->pts - nus->last_pts != fc->pts_delta) flags |= FLAG_CODED_PTS;
  635. if(pkt->size > 2*nut->max_distance ) flags |= FLAG_CHECKSUM;
  636. if(FFABS(pkt->pts - nus->last_pts)
  637. > nus->max_pts_distance) flags |= FLAG_CHECKSUM;
  638. if( pkt->size < nut->header_len[fc->header_idx]
  639. || (pkt->size > 4096 && fc->header_idx)
  640. || memcmp(pkt->data, nut->header[fc->header_idx], nut->header_len[fc->header_idx]))
  641. flags |= FLAG_HEADER_IDX;
  642. return flags | (fc->flags & FLAG_CODED);
  643. }
  644. static int find_best_header_idx(NUTContext *nut, AVPacket *pkt){
  645. int i;
  646. int best_i = 0;
  647. int best_len= 0;
  648. if(pkt->size > 4096)
  649. return 0;
  650. for(i=1; i<nut->header_count; i++){
  651. if( pkt->size >= nut->header_len[i]
  652. && nut->header_len[i] > best_len
  653. && !memcmp(pkt->data, nut->header[i], nut->header_len[i])){
  654. best_i= i;
  655. best_len= nut->header_len[i];
  656. }
  657. }
  658. return best_i;
  659. }
  660. static int nut_write_packet(AVFormatContext *s, AVPacket *pkt){
  661. NUTContext *nut = s->priv_data;
  662. StreamContext *nus= &nut->stream[pkt->stream_index];
  663. AVIOContext *bc = s->pb, *dyn_bc;
  664. FrameCode *fc;
  665. int64_t coded_pts;
  666. int best_length, frame_code, flags, needed_flags, i, header_idx, best_header_idx;
  667. int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
  668. int store_sp=0;
  669. int ret;
  670. if (pkt->pts < 0) {
  671. av_log(s, AV_LOG_ERROR, "Invalid negative packet pts %"PRId64" in input\n", pkt->pts);
  672. return AVERROR(EINVAL);
  673. }
  674. if(1LL<<(20+3*nut->header_count) <= avio_tell(bc))
  675. write_headers(s, bc);
  676. if(key_frame && !(nus->last_flags & FLAG_KEY))
  677. store_sp= 1;
  678. if(pkt->size + 30/*FIXME check*/ + avio_tell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
  679. store_sp= 1;
  680. //FIXME: Ensure store_sp is 1 in the first place.
  681. if(store_sp){
  682. Syncpoint *sp, dummy= {.pos= INT64_MAX};
  683. ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
  684. for(i=0; i<s->nb_streams; i++){
  685. AVStream *st= s->streams[i];
  686. int64_t dts_tb = av_rescale_rnd(pkt->dts,
  687. nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
  688. nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
  689. AV_ROUND_DOWN);
  690. int index= av_index_search_timestamp(st, dts_tb, AVSEEK_FLAG_BACKWARD);
  691. if(index>=0) dummy.pos= FFMIN(dummy.pos, st->index_entries[index].pos);
  692. }
  693. if(dummy.pos == INT64_MAX)
  694. dummy.pos= 0;
  695. sp= av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
  696. NULL);
  697. nut->last_syncpoint_pos= avio_tell(bc);
  698. ret = avio_open_dyn_buf(&dyn_bc);
  699. if(ret < 0)
  700. return ret;
  701. put_tt(nut, nus->time_base, dyn_bc, pkt->dts);
  702. ff_put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos)>>4 : 0);
  703. put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
  704. ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0/*unused*/, pkt->dts);
  705. if((1ll<<60) % nut->sp_count == 0)
  706. for(i=0; i<s->nb_streams; i++){
  707. int j;
  708. StreamContext *nus = &nut->stream[i];
  709. nus->keyframe_pts = av_realloc(nus->keyframe_pts, 2*nut->sp_count*sizeof(*nus->keyframe_pts));
  710. if(!nus->keyframe_pts)
  711. return AVERROR(ENOMEM);
  712. for(j=nut->sp_count == 1 ? 0 : nut->sp_count; j<2*nut->sp_count; j++)
  713. nus->keyframe_pts[j] = AV_NOPTS_VALUE;
  714. }
  715. }
  716. av_assert0(nus->last_pts != AV_NOPTS_VALUE);
  717. coded_pts = pkt->pts & ((1<<nus->msb_pts_shift)-1);
  718. if(ff_lsb2full(nus, coded_pts) != pkt->pts)
  719. coded_pts= pkt->pts + (1<<nus->msb_pts_shift);
  720. best_header_idx= find_best_header_idx(nut, pkt);
  721. best_length=INT_MAX;
  722. frame_code= -1;
  723. for(i=0; i<256; i++){
  724. int length= 0;
  725. FrameCode *fc= &nut->frame_code[i];
  726. int flags= fc->flags;
  727. if(flags & FLAG_INVALID)
  728. continue;
  729. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  730. if(flags & FLAG_CODED){
  731. length++;
  732. flags = needed_flags;
  733. }
  734. if((flags & needed_flags) != needed_flags)
  735. continue;
  736. if((flags ^ needed_flags) & FLAG_KEY)
  737. continue;
  738. if(flags & FLAG_STREAM_ID)
  739. length+= ff_get_v_length(pkt->stream_index);
  740. if(pkt->size % fc->size_mul != fc->size_lsb)
  741. continue;
  742. if(flags & FLAG_SIZE_MSB)
  743. length += ff_get_v_length(pkt->size / fc->size_mul);
  744. if(flags & FLAG_CHECKSUM)
  745. length+=4;
  746. if(flags & FLAG_CODED_PTS)
  747. length += ff_get_v_length(coded_pts);
  748. if( (flags & FLAG_CODED)
  749. && nut->header_len[best_header_idx] > nut->header_len[fc->header_idx]+1){
  750. flags |= FLAG_HEADER_IDX;
  751. }
  752. if(flags & FLAG_HEADER_IDX){
  753. length += 1 - nut->header_len[best_header_idx];
  754. }else{
  755. length -= nut->header_len[fc->header_idx];
  756. }
  757. length*=4;
  758. length+= !(flags & FLAG_CODED_PTS);
  759. length+= !(flags & FLAG_CHECKSUM);
  760. if(length < best_length){
  761. best_length= length;
  762. frame_code=i;
  763. }
  764. }
  765. av_assert0(frame_code != -1);
  766. fc= &nut->frame_code[frame_code];
  767. flags= fc->flags;
  768. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  769. header_idx= fc->header_idx;
  770. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  771. avio_w8(bc, frame_code);
  772. if(flags & FLAG_CODED){
  773. ff_put_v(bc, (flags^needed_flags) & ~(FLAG_CODED));
  774. flags = needed_flags;
  775. }
  776. if(flags & FLAG_STREAM_ID) ff_put_v(bc, pkt->stream_index);
  777. if(flags & FLAG_CODED_PTS) ff_put_v(bc, coded_pts);
  778. if(flags & FLAG_SIZE_MSB) ff_put_v(bc, pkt->size / fc->size_mul);
  779. if(flags & FLAG_HEADER_IDX) ff_put_v(bc, header_idx= best_header_idx);
  780. if(flags & FLAG_CHECKSUM) avio_wl32(bc, ffio_get_checksum(bc));
  781. else ffio_get_checksum(bc);
  782. avio_write(bc, pkt->data + nut->header_len[header_idx], pkt->size - nut->header_len[header_idx]);
  783. nus->last_flags= flags;
  784. nus->last_pts= pkt->pts;
  785. //FIXME just store one per syncpoint
  786. if(flags & FLAG_KEY) {
  787. av_add_index_entry(
  788. s->streams[pkt->stream_index],
  789. nut->last_syncpoint_pos,
  790. pkt->pts,
  791. 0,
  792. 0,
  793. AVINDEX_KEYFRAME);
  794. if(nus->keyframe_pts && nus->keyframe_pts[nut->sp_count] == AV_NOPTS_VALUE)
  795. nus->keyframe_pts[nut->sp_count] = pkt->pts;
  796. }
  797. if(!nut->max_pts_tb || av_compare_ts(nut->max_pts, *nut->max_pts_tb, pkt->pts, *nus->time_base) < 0) {
  798. nut->max_pts = pkt->pts;
  799. nut->max_pts_tb = nus->time_base;
  800. }
  801. return 0;
  802. }
  803. static int nut_write_trailer(AVFormatContext *s){
  804. NUTContext *nut= s->priv_data;
  805. AVIOContext *bc = s->pb, *dyn_bc;
  806. int i, ret;
  807. while(nut->header_count<3)
  808. write_headers(s, bc);
  809. ret = avio_open_dyn_buf(&dyn_bc);
  810. if(ret >= 0) {
  811. write_index(nut, dyn_bc);
  812. put_packet(nut, bc, dyn_bc, 1, INDEX_STARTCODE);
  813. }
  814. ff_nut_free_sp(nut);
  815. for(i=0; i<s->nb_streams; i++)
  816. av_freep(&nut->stream[i].keyframe_pts);
  817. av_freep(&nut->stream);
  818. av_freep(&nut->chapter);
  819. av_freep(&nut->time_base);
  820. return 0;
  821. }
  822. AVOutputFormat ff_nut_muxer = {
  823. .name = "nut",
  824. .long_name = NULL_IF_CONFIG_SMALL("NUT"),
  825. .mime_type = "video/x-nut",
  826. .extensions = "nut",
  827. .priv_data_size = sizeof(NUTContext),
  828. .audio_codec = CONFIG_LIBVORBIS ? AV_CODEC_ID_VORBIS :
  829. CONFIG_LIBMP3LAME ? AV_CODEC_ID_MP3 : AV_CODEC_ID_MP2,
  830. .video_codec = AV_CODEC_ID_MPEG4,
  831. .write_header = nut_write_header,
  832. .write_packet = nut_write_packet,
  833. .write_trailer = nut_write_trailer,
  834. .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
  835. .codec_tag = ff_nut_codec_tags,
  836. };