You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

881 lines
28KB

  1. /*
  2. * nut muxer
  3. * Copyright (c) 2004-2007 Michael Niedermayer
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/intreadwrite.h"
  22. #include "libavutil/mathematics.h"
  23. #include "libavutil/tree.h"
  24. #include "libavutil/dict.h"
  25. #include "libavcodec/mpegaudiodata.h"
  26. #include "nut.h"
  27. #include "internal.h"
  28. #include "avio_internal.h"
  29. static int find_expected_header(AVCodecContext *c, int size, int key_frame, uint8_t out[64]){
  30. int sample_rate= c->sample_rate;
  31. if(size>4096)
  32. return 0;
  33. AV_WB24(out, 1);
  34. if(c->codec_id == CODEC_ID_MPEG4){
  35. if(key_frame){
  36. return 3;
  37. }else{
  38. out[3]= 0xB6;
  39. return 4;
  40. }
  41. }else if(c->codec_id == CODEC_ID_MPEG1VIDEO || c->codec_id == CODEC_ID_MPEG2VIDEO){
  42. return 3;
  43. }else if(c->codec_id == CODEC_ID_H264){
  44. return 3;
  45. }else if(c->codec_id == CODEC_ID_MP3 || c->codec_id == CODEC_ID_MP2){
  46. int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
  47. int layer= c->codec_id == CODEC_ID_MP3 ? 3 : 2;
  48. unsigned int header= 0xFFF00000;
  49. lsf = sample_rate < (24000+32000)/2;
  50. mpeg25 = sample_rate < (12000+16000)/2;
  51. sample_rate <<= lsf + mpeg25;
  52. if (sample_rate < (32000 + 44100)/2) sample_rate_index=2;
  53. else if(sample_rate < (44100 + 48000)/2) sample_rate_index=0;
  54. else sample_rate_index=1;
  55. sample_rate= avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
  56. for(bitrate_index=2; bitrate_index<30; bitrate_index++){
  57. frame_size = avpriv_mpa_bitrate_tab[lsf][layer-1][bitrate_index>>1];
  58. frame_size = (frame_size * 144000) / (sample_rate << lsf) + (bitrate_index&1);
  59. if(frame_size == size)
  60. break;
  61. }
  62. header |= (!lsf)<<19;
  63. header |= (4-layer)<<17;
  64. header |= 1<<16; //no crc
  65. AV_WB32(out, header);
  66. if(size <= 0)
  67. return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
  68. if(bitrate_index == 30)
  69. return -1; //something is wrong ...
  70. header |= (bitrate_index>>1)<<12;
  71. header |= sample_rate_index<<10;
  72. header |= (bitrate_index&1)<<9;
  73. return 2; //FIXME actually put the needed ones in build_elision_headers()
  74. return 3; //we guess that the private bit is not set
  75. //FIXME the above assumptions should be checked, if these turn out false too often something should be done
  76. }
  77. return 0;
  78. }
  79. static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size, int frame_type){
  80. NUTContext *nut = s->priv_data;
  81. uint8_t out[64];
  82. int i;
  83. int len= find_expected_header(c, size, frame_type, out);
  84. //av_log(NULL, AV_LOG_ERROR, "expected_h len=%d size=%d codec_id=%d\n", len, size, c->codec_id);
  85. for(i=1; i<nut->header_count; i++){
  86. if( len == nut->header_len[i]
  87. && !memcmp(out, nut->header[i], len)){
  88. // av_log(NULL, AV_LOG_ERROR, "found %d\n", i);
  89. return i;
  90. }
  91. }
  92. // av_log(NULL, AV_LOG_ERROR, "nothing found\n");
  93. return 0;
  94. }
  95. static void build_elision_headers(AVFormatContext *s){
  96. NUTContext *nut = s->priv_data;
  97. int i;
  98. //FIXME this is lame
  99. //FIXME write a 2pass mode to find the maximal headers
  100. static const uint8_t headers[][5]={
  101. {3, 0x00, 0x00, 0x01},
  102. {4, 0x00, 0x00, 0x01, 0xB6},
  103. {2, 0xFF, 0xFA}, //mp3+crc
  104. {2, 0xFF, 0xFB}, //mp3
  105. {2, 0xFF, 0xFC}, //mp2+crc
  106. {2, 0xFF, 0xFD}, //mp2
  107. };
  108. nut->header_count= 7;
  109. for(i=1; i<nut->header_count; i++){
  110. nut->header_len[i]= headers[i-1][0];
  111. nut->header [i]= &headers[i-1][1];
  112. }
  113. }
  114. static void build_frame_code(AVFormatContext *s){
  115. NUTContext *nut = s->priv_data;
  116. int key_frame, index, pred, stream_id;
  117. int start=1;
  118. int end= 254;
  119. int keyframe_0_esc= s->nb_streams > 2;
  120. int pred_table[10];
  121. FrameCode *ft;
  122. ft= &nut->frame_code[start];
  123. ft->flags= FLAG_CODED;
  124. ft->size_mul=1;
  125. ft->pts_delta=1;
  126. start++;
  127. if(keyframe_0_esc){
  128. /* keyframe = 0 escape */
  129. FrameCode *ft= &nut->frame_code[start];
  130. ft->flags= FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
  131. ft->size_mul=1;
  132. start++;
  133. }
  134. for(stream_id= 0; stream_id<s->nb_streams; stream_id++){
  135. int start2= start + (end-start)*stream_id / s->nb_streams;
  136. int end2 = start + (end-start)*(stream_id+1) / s->nb_streams;
  137. AVCodecContext *codec = s->streams[stream_id]->codec;
  138. int is_audio= codec->codec_type == AVMEDIA_TYPE_AUDIO;
  139. int intra_only= /*codec->intra_only || */is_audio;
  140. int pred_count;
  141. for(key_frame=0; key_frame<2; key_frame++){
  142. if(intra_only && keyframe_0_esc && key_frame==0)
  143. continue;
  144. {
  145. FrameCode *ft= &nut->frame_code[start2];
  146. ft->flags= FLAG_KEY*key_frame;
  147. ft->flags|= FLAG_SIZE_MSB | FLAG_CODED_PTS;
  148. ft->stream_id= stream_id;
  149. ft->size_mul=1;
  150. if(is_audio)
  151. ft->header_idx= find_header_idx(s, codec, -1, key_frame);
  152. start2++;
  153. }
  154. }
  155. key_frame= intra_only;
  156. if(is_audio){
  157. int frame_bytes= codec->frame_size*(int64_t)codec->bit_rate / (8*codec->sample_rate);
  158. int pts;
  159. for(pts=0; pts<2; pts++){
  160. for(pred=0; pred<2; pred++){
  161. FrameCode *ft= &nut->frame_code[start2];
  162. ft->flags= FLAG_KEY*key_frame;
  163. ft->stream_id= stream_id;
  164. ft->size_mul=frame_bytes + 2;
  165. ft->size_lsb=frame_bytes + pred;
  166. ft->pts_delta=pts;
  167. ft->header_idx= find_header_idx(s, codec, frame_bytes + pred, key_frame);
  168. start2++;
  169. }
  170. }
  171. }else{
  172. FrameCode *ft= &nut->frame_code[start2];
  173. ft->flags= FLAG_KEY | FLAG_SIZE_MSB;
  174. ft->stream_id= stream_id;
  175. ft->size_mul=1;
  176. ft->pts_delta=1;
  177. start2++;
  178. }
  179. if(codec->has_b_frames){
  180. pred_count=5;
  181. pred_table[0]=-2;
  182. pred_table[1]=-1;
  183. pred_table[2]=1;
  184. pred_table[3]=3;
  185. pred_table[4]=4;
  186. }else if(codec->codec_id == CODEC_ID_VORBIS){
  187. pred_count=3;
  188. pred_table[0]=2;
  189. pred_table[1]=9;
  190. pred_table[2]=16;
  191. }else{
  192. pred_count=1;
  193. pred_table[0]=1;
  194. }
  195. for(pred=0; pred<pred_count; pred++){
  196. int start3= start2 + (end2-start2)*pred / pred_count;
  197. int end3 = start2 + (end2-start2)*(pred+1) / pred_count;
  198. for(index=start3; index<end3; index++){
  199. FrameCode *ft= &nut->frame_code[index];
  200. ft->flags= FLAG_KEY*key_frame;
  201. ft->flags|= FLAG_SIZE_MSB;
  202. ft->stream_id= stream_id;
  203. //FIXME use single byte size and pred from last
  204. ft->size_mul= end3-start3;
  205. ft->size_lsb= index - start3;
  206. ft->pts_delta= pred_table[pred];
  207. if(is_audio)
  208. ft->header_idx= find_header_idx(s, codec, -1, key_frame);
  209. }
  210. }
  211. }
  212. memmove(&nut->frame_code['N'+1], &nut->frame_code['N'], sizeof(FrameCode)*(255-'N'));
  213. nut->frame_code[ 0].flags=
  214. nut->frame_code[255].flags=
  215. nut->frame_code['N'].flags= FLAG_INVALID;
  216. }
  217. static void put_tt(NUTContext *nut, AVRational *time_base, AVIOContext *bc, uint64_t val){
  218. val *= nut->time_base_count;
  219. val += time_base - nut->time_base;
  220. ff_put_v(bc, val);
  221. }
  222. /**
  223. * Store a string as vb.
  224. */
  225. static void put_str(AVIOContext *bc, const char *string){
  226. int len= strlen(string);
  227. ff_put_v(bc, len);
  228. avio_write(bc, string, len);
  229. }
  230. static void put_s(AVIOContext *bc, int64_t val){
  231. ff_put_v(bc, 2*FFABS(val) - (val>0));
  232. }
  233. #ifdef TRACE
  234. static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, char *file, char *func, int line){
  235. av_log(NULL, AV_LOG_DEBUG, "ff_put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  236. ff_put_v(bc, v);
  237. }
  238. static inline void put_s_trace(AVIOContext *bc, int64_t v, char *file, char *func, int line){
  239. av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  240. put_s(bc, v);
  241. }
  242. #define ff_put_v(bc, v) ff_put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  243. #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  244. #endif
  245. //FIXME remove calculate_checksum
  246. static void put_packet(NUTContext *nut, AVIOContext *bc, AVIOContext *dyn_bc, int calculate_checksum, uint64_t startcode){
  247. uint8_t *dyn_buf=NULL;
  248. int dyn_size= avio_close_dyn_buf(dyn_bc, &dyn_buf);
  249. int forw_ptr= dyn_size + 4*calculate_checksum;
  250. if(forw_ptr > 4096)
  251. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  252. avio_wb64(bc, startcode);
  253. ff_put_v(bc, forw_ptr);
  254. if(forw_ptr > 4096)
  255. avio_wl32(bc, ffio_get_checksum(bc));
  256. if(calculate_checksum)
  257. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  258. avio_write(bc, dyn_buf, dyn_size);
  259. if(calculate_checksum)
  260. avio_wl32(bc, ffio_get_checksum(bc));
  261. av_free(dyn_buf);
  262. }
  263. static void write_mainheader(NUTContext *nut, AVIOContext *bc){
  264. int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields, tmp_head_idx;
  265. int64_t tmp_match;
  266. ff_put_v(bc, 3); /* version */
  267. ff_put_v(bc, nut->avf->nb_streams);
  268. ff_put_v(bc, nut->max_distance);
  269. ff_put_v(bc, nut->time_base_count);
  270. for(i=0; i<nut->time_base_count; i++){
  271. ff_put_v(bc, nut->time_base[i].num);
  272. ff_put_v(bc, nut->time_base[i].den);
  273. }
  274. tmp_pts=0;
  275. tmp_mul=1;
  276. tmp_stream=0;
  277. tmp_match= 1-(1LL<<62);
  278. tmp_head_idx= 0;
  279. for(i=0; i<256;){
  280. tmp_fields=0;
  281. tmp_size=0;
  282. // tmp_res=0;
  283. if(tmp_pts != nut->frame_code[i].pts_delta) tmp_fields=1;
  284. if(tmp_mul != nut->frame_code[i].size_mul ) tmp_fields=2;
  285. if(tmp_stream != nut->frame_code[i].stream_id) tmp_fields=3;
  286. if(tmp_size != nut->frame_code[i].size_lsb ) tmp_fields=4;
  287. // if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
  288. if(tmp_head_idx!=nut->frame_code[i].header_idx)tmp_fields=8;
  289. tmp_pts = nut->frame_code[i].pts_delta;
  290. tmp_flags = nut->frame_code[i].flags;
  291. tmp_stream= nut->frame_code[i].stream_id;
  292. tmp_mul = nut->frame_code[i].size_mul;
  293. tmp_size = nut->frame_code[i].size_lsb;
  294. // tmp_res = nut->frame_code[i].res;
  295. tmp_head_idx= nut->frame_code[i].header_idx;
  296. for(j=0; i<256; j++,i++){
  297. if(i == 'N'){
  298. j--;
  299. continue;
  300. }
  301. if(nut->frame_code[i].pts_delta != tmp_pts ) break;
  302. if(nut->frame_code[i].flags != tmp_flags ) break;
  303. if(nut->frame_code[i].stream_id != tmp_stream) break;
  304. if(nut->frame_code[i].size_mul != tmp_mul ) break;
  305. if(nut->frame_code[i].size_lsb != tmp_size+j) break;
  306. // if(nut->frame_code[i].res != tmp_res ) break;
  307. if(nut->frame_code[i].header_idx!= tmp_head_idx) break;
  308. }
  309. if(j != tmp_mul - tmp_size) tmp_fields=6;
  310. ff_put_v(bc, tmp_flags);
  311. ff_put_v(bc, tmp_fields);
  312. if(tmp_fields>0) put_s(bc, tmp_pts);
  313. if(tmp_fields>1) ff_put_v(bc, tmp_mul);
  314. if(tmp_fields>2) ff_put_v(bc, tmp_stream);
  315. if(tmp_fields>3) ff_put_v(bc, tmp_size);
  316. if(tmp_fields>4) ff_put_v(bc, 0 /*tmp_res*/);
  317. if(tmp_fields>5) ff_put_v(bc, j);
  318. if(tmp_fields>6) ff_put_v(bc, tmp_match);
  319. if(tmp_fields>7) ff_put_v(bc, tmp_head_idx);
  320. }
  321. ff_put_v(bc, nut->header_count-1);
  322. for(i=1; i<nut->header_count; i++){
  323. ff_put_v(bc, nut->header_len[i]);
  324. avio_write(bc, nut->header[i], nut->header_len[i]);
  325. }
  326. }
  327. static int write_streamheader(AVFormatContext *avctx, AVIOContext *bc, AVStream *st, int i){
  328. NUTContext *nut = avctx->priv_data;
  329. AVCodecContext *codec = st->codec;
  330. ff_put_v(bc, i);
  331. switch(codec->codec_type){
  332. case AVMEDIA_TYPE_VIDEO: ff_put_v(bc, 0); break;
  333. case AVMEDIA_TYPE_AUDIO: ff_put_v(bc, 1); break;
  334. case AVMEDIA_TYPE_SUBTITLE: ff_put_v(bc, 2); break;
  335. default : ff_put_v(bc, 3); break;
  336. }
  337. ff_put_v(bc, 4);
  338. if (codec->codec_tag){
  339. avio_wl32(bc, codec->codec_tag);
  340. } else {
  341. av_log(avctx, AV_LOG_ERROR, "No codec tag defined for stream %d\n", i);
  342. return AVERROR(EINVAL);
  343. }
  344. ff_put_v(bc, nut->stream[i].time_base - nut->time_base);
  345. ff_put_v(bc, nut->stream[i].msb_pts_shift);
  346. ff_put_v(bc, nut->stream[i].max_pts_distance);
  347. ff_put_v(bc, codec->has_b_frames);
  348. avio_w8(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
  349. ff_put_v(bc, codec->extradata_size);
  350. avio_write(bc, codec->extradata, codec->extradata_size);
  351. switch(codec->codec_type){
  352. case AVMEDIA_TYPE_AUDIO:
  353. ff_put_v(bc, codec->sample_rate);
  354. ff_put_v(bc, 1);
  355. ff_put_v(bc, codec->channels);
  356. break;
  357. case AVMEDIA_TYPE_VIDEO:
  358. ff_put_v(bc, codec->width);
  359. ff_put_v(bc, codec->height);
  360. if(st->sample_aspect_ratio.num<=0 || st->sample_aspect_ratio.den<=0){
  361. ff_put_v(bc, 0);
  362. ff_put_v(bc, 0);
  363. }else{
  364. ff_put_v(bc, st->sample_aspect_ratio.num);
  365. ff_put_v(bc, st->sample_aspect_ratio.den);
  366. }
  367. ff_put_v(bc, 0); /* csp type -- unknown */
  368. break;
  369. default:
  370. break;
  371. }
  372. return 0;
  373. }
  374. static int add_info(AVIOContext *bc, const char *type, const char *value){
  375. put_str(bc, type);
  376. put_s(bc, -1);
  377. put_str(bc, value);
  378. return 1;
  379. }
  380. static int write_globalinfo(NUTContext *nut, AVIOContext *bc){
  381. AVFormatContext *s= nut->avf;
  382. AVDictionaryEntry *t = NULL;
  383. AVIOContext *dyn_bc;
  384. uint8_t *dyn_buf=NULL;
  385. int count=0, dyn_size;
  386. int ret = avio_open_dyn_buf(&dyn_bc);
  387. if(ret < 0)
  388. return ret;
  389. while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  390. count += add_info(dyn_bc, t->key, t->value);
  391. ff_put_v(bc, 0); //stream_if_plus1
  392. ff_put_v(bc, 0); //chapter_id
  393. ff_put_v(bc, 0); //timestamp_start
  394. ff_put_v(bc, 0); //length
  395. ff_put_v(bc, count);
  396. dyn_size= avio_close_dyn_buf(dyn_bc, &dyn_buf);
  397. avio_write(bc, dyn_buf, dyn_size);
  398. av_free(dyn_buf);
  399. return 0;
  400. }
  401. static int write_streaminfo(NUTContext *nut, AVIOContext *bc, int stream_id){
  402. AVFormatContext *s= nut->avf;
  403. AVStream* st = s->streams[stream_id];
  404. AVIOContext *dyn_bc;
  405. uint8_t *dyn_buf=NULL;
  406. int count=0, dyn_size, i;
  407. int ret = avio_open_dyn_buf(&dyn_bc);
  408. if(ret < 0)
  409. return ret;
  410. for (i=0; ff_nut_dispositions[i].flag; ++i) {
  411. if (st->disposition & ff_nut_dispositions[i].flag)
  412. count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
  413. }
  414. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  415. if (count) {
  416. ff_put_v(bc, stream_id + 1); //stream_id_plus1
  417. ff_put_v(bc, 0); //chapter_id
  418. ff_put_v(bc, 0); //timestamp_start
  419. ff_put_v(bc, 0); //length
  420. ff_put_v(bc, count);
  421. avio_write(bc, dyn_buf, dyn_size);
  422. }
  423. av_free(dyn_buf);
  424. return count;
  425. }
  426. static int write_chapter(NUTContext *nut, AVIOContext *bc, int id)
  427. {
  428. AVIOContext *dyn_bc;
  429. uint8_t *dyn_buf = NULL;
  430. AVDictionaryEntry *t = NULL;
  431. AVChapter *ch = nut->avf->chapters[id];
  432. int ret, dyn_size, count = 0;
  433. ret = avio_open_dyn_buf(&dyn_bc);
  434. if (ret < 0)
  435. return ret;
  436. ff_put_v(bc, 0); // stream_id_plus1
  437. put_s(bc, id + 1); // chapter_id
  438. put_tt(nut, nut->chapter[id].time_base, bc, ch->start); // chapter_start
  439. ff_put_v(bc, ch->end - ch->start); // chapter_len
  440. while ((t = av_dict_get(ch->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
  441. count += add_info(dyn_bc, t->key, t->value);
  442. ff_put_v(bc, count);
  443. dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
  444. avio_write(bc, dyn_buf, dyn_size);
  445. av_freep(&dyn_buf);
  446. return 0;
  447. }
  448. static int write_headers(AVFormatContext *avctx, AVIOContext *bc){
  449. NUTContext *nut = avctx->priv_data;
  450. AVIOContext *dyn_bc;
  451. int i, ret;
  452. ff_metadata_conv_ctx(avctx, ff_nut_metadata_conv, NULL);
  453. ret = avio_open_dyn_buf(&dyn_bc);
  454. if(ret < 0)
  455. return ret;
  456. write_mainheader(nut, dyn_bc);
  457. put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
  458. for (i=0; i < nut->avf->nb_streams; i++){
  459. ret = avio_open_dyn_buf(&dyn_bc);
  460. if(ret < 0)
  461. return ret;
  462. if ((ret = write_streamheader(avctx, dyn_bc, nut->avf->streams[i], i)) < 0)
  463. return ret;
  464. put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
  465. }
  466. ret = avio_open_dyn_buf(&dyn_bc);
  467. if(ret < 0)
  468. return ret;
  469. write_globalinfo(nut, dyn_bc);
  470. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  471. for (i = 0; i < nut->avf->nb_streams; i++) {
  472. ret = avio_open_dyn_buf(&dyn_bc);
  473. if(ret < 0)
  474. return ret;
  475. ret = write_streaminfo(nut, dyn_bc, i);
  476. if (ret < 0)
  477. return ret;
  478. if (ret > 0)
  479. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  480. else {
  481. uint8_t* buf;
  482. avio_close_dyn_buf(dyn_bc, &buf);
  483. av_free(buf);
  484. }
  485. }
  486. for (i = 0; i < nut->avf->nb_chapters; i++) {
  487. ret = avio_open_dyn_buf(&dyn_bc);
  488. if (ret < 0)
  489. return ret;
  490. ret = write_chapter(nut, dyn_bc, i);
  491. if (ret < 0) {
  492. uint8_t *buf;
  493. avio_close_dyn_buf(dyn_bc, &buf);
  494. av_freep(&buf);
  495. return ret;
  496. }
  497. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  498. }
  499. nut->last_syncpoint_pos= INT_MIN;
  500. nut->header_count++;
  501. return 0;
  502. }
  503. static int nut_write_header(AVFormatContext *s){
  504. NUTContext *nut = s->priv_data;
  505. AVIOContext *bc = s->pb;
  506. int i, j, ret;
  507. nut->avf= s;
  508. nut->stream = av_mallocz(sizeof(StreamContext)*s->nb_streams);
  509. if (s->nb_chapters)
  510. nut->chapter = av_mallocz(sizeof(ChapterContext)*s->nb_chapters);
  511. nut->time_base= av_mallocz(sizeof(AVRational )*(s->nb_streams +
  512. s->nb_chapters));
  513. if (!nut->stream || (s->nb_chapters && !nut->chapter) || !nut->time_base) {
  514. av_freep(&nut->stream);
  515. av_freep(&nut->chapter);
  516. av_freep(&nut->time_base);
  517. return AVERROR(ENOMEM);
  518. }
  519. for(i=0; i<s->nb_streams; i++){
  520. AVStream *st= s->streams[i];
  521. int ssize;
  522. AVRational time_base;
  523. ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
  524. avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
  525. for(j=0; j<nut->time_base_count; j++){
  526. if(!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))){
  527. break;
  528. }
  529. }
  530. nut->time_base[j]= time_base;
  531. nut->stream[i].time_base= &nut->time_base[j];
  532. if(j==nut->time_base_count)
  533. nut->time_base_count++;
  534. if(INT64_C(1000) * time_base.num >= time_base.den)
  535. nut->stream[i].msb_pts_shift = 7;
  536. else
  537. nut->stream[i].msb_pts_shift = 14;
  538. nut->stream[i].max_pts_distance= FFMAX(time_base.den, time_base.num) / time_base.num;
  539. }
  540. for (i = 0; i < s->nb_chapters; i++) {
  541. AVChapter *ch = s->chapters[i];
  542. for (j = 0; j < nut->time_base_count; j++) {
  543. if (!memcmp(&ch->time_base, &nut->time_base[j], sizeof(AVRational)))
  544. break;
  545. }
  546. nut->time_base[j] = ch->time_base;
  547. nut->chapter[i].time_base = &nut->time_base[j];
  548. if(j == nut->time_base_count)
  549. nut->time_base_count++;
  550. }
  551. nut->max_distance = MAX_DISTANCE;
  552. build_elision_headers(s);
  553. build_frame_code(s);
  554. assert(nut->frame_code['N'].flags == FLAG_INVALID);
  555. avio_write(bc, ID_STRING, strlen(ID_STRING));
  556. avio_w8(bc, 0);
  557. if ((ret = write_headers(s, bc)) < 0)
  558. return ret;
  559. avio_flush(bc);
  560. //FIXME index
  561. return 0;
  562. }
  563. static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt){
  564. int flags= 0;
  565. if(pkt->flags & AV_PKT_FLAG_KEY ) flags |= FLAG_KEY;
  566. if(pkt->stream_index != fc->stream_id ) flags |= FLAG_STREAM_ID;
  567. if(pkt->size / fc->size_mul ) flags |= FLAG_SIZE_MSB;
  568. if(pkt->pts - nus->last_pts != fc->pts_delta) flags |= FLAG_CODED_PTS;
  569. if(pkt->size > 2*nut->max_distance ) flags |= FLAG_CHECKSUM;
  570. if(FFABS(pkt->pts - nus->last_pts)
  571. > nus->max_pts_distance) flags |= FLAG_CHECKSUM;
  572. if( pkt->size < nut->header_len[fc->header_idx]
  573. || (pkt->size > 4096 && fc->header_idx)
  574. || memcmp(pkt->data, nut->header[fc->header_idx], nut->header_len[fc->header_idx]))
  575. flags |= FLAG_HEADER_IDX;
  576. return flags | (fc->flags & FLAG_CODED);
  577. }
  578. static int find_best_header_idx(NUTContext *nut, AVPacket *pkt){
  579. int i;
  580. int best_i = 0;
  581. int best_len= 0;
  582. if(pkt->size > 4096)
  583. return 0;
  584. for(i=1; i<nut->header_count; i++){
  585. if( pkt->size >= nut->header_len[i]
  586. && nut->header_len[i] > best_len
  587. && !memcmp(pkt->data, nut->header[i], nut->header_len[i])){
  588. best_i= i;
  589. best_len= nut->header_len[i];
  590. }
  591. }
  592. return best_i;
  593. }
  594. static int nut_write_packet(AVFormatContext *s, AVPacket *pkt){
  595. NUTContext *nut = s->priv_data;
  596. StreamContext *nus= &nut->stream[pkt->stream_index];
  597. AVIOContext *bc = s->pb, *dyn_bc;
  598. FrameCode *fc;
  599. int64_t coded_pts;
  600. int best_length, frame_code, flags, needed_flags, i, header_idx, best_header_idx;
  601. int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
  602. int store_sp=0;
  603. int ret;
  604. if(pkt->pts < 0)
  605. return -1;
  606. if(1LL<<(20+3*nut->header_count) <= avio_tell(bc))
  607. write_headers(s, bc);
  608. if(key_frame && !(nus->last_flags & FLAG_KEY))
  609. store_sp= 1;
  610. if(pkt->size + 30/*FIXME check*/ + avio_tell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
  611. store_sp= 1;
  612. //FIXME: Ensure store_sp is 1 in the first place.
  613. if(store_sp){
  614. Syncpoint *sp, dummy= {.pos= INT64_MAX};
  615. ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
  616. for(i=0; i<s->nb_streams; i++){
  617. AVStream *st= s->streams[i];
  618. int64_t dts_tb = av_rescale_rnd(pkt->dts,
  619. nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
  620. nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
  621. AV_ROUND_DOWN);
  622. int index= av_index_search_timestamp(st, dts_tb, AVSEEK_FLAG_BACKWARD);
  623. if(index>=0) dummy.pos= FFMIN(dummy.pos, st->index_entries[index].pos);
  624. }
  625. if(dummy.pos == INT64_MAX)
  626. dummy.pos= 0;
  627. sp= av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp,
  628. NULL);
  629. nut->last_syncpoint_pos= avio_tell(bc);
  630. ret = avio_open_dyn_buf(&dyn_bc);
  631. if(ret < 0)
  632. return ret;
  633. put_tt(nut, nus->time_base, dyn_bc, pkt->dts);
  634. ff_put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos)>>4 : 0);
  635. put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
  636. ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0/*unused*/, pkt->dts);
  637. }
  638. assert(nus->last_pts != AV_NOPTS_VALUE);
  639. coded_pts = pkt->pts & ((1<<nus->msb_pts_shift)-1);
  640. if(ff_lsb2full(nus, coded_pts) != pkt->pts)
  641. coded_pts= pkt->pts + (1<<nus->msb_pts_shift);
  642. best_header_idx= find_best_header_idx(nut, pkt);
  643. best_length=INT_MAX;
  644. frame_code= -1;
  645. for(i=0; i<256; i++){
  646. int length= 0;
  647. FrameCode *fc= &nut->frame_code[i];
  648. int flags= fc->flags;
  649. if(flags & FLAG_INVALID)
  650. continue;
  651. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  652. if(flags & FLAG_CODED){
  653. length++;
  654. flags = needed_flags;
  655. }
  656. if((flags & needed_flags) != needed_flags)
  657. continue;
  658. if((flags ^ needed_flags) & FLAG_KEY)
  659. continue;
  660. if(flags & FLAG_STREAM_ID)
  661. length+= ff_get_v_length(pkt->stream_index);
  662. if(pkt->size % fc->size_mul != fc->size_lsb)
  663. continue;
  664. if(flags & FLAG_SIZE_MSB)
  665. length += ff_get_v_length(pkt->size / fc->size_mul);
  666. if(flags & FLAG_CHECKSUM)
  667. length+=4;
  668. if(flags & FLAG_CODED_PTS)
  669. length += ff_get_v_length(coded_pts);
  670. if( (flags & FLAG_CODED)
  671. && nut->header_len[best_header_idx] > nut->header_len[fc->header_idx]+1){
  672. flags |= FLAG_HEADER_IDX;
  673. }
  674. if(flags & FLAG_HEADER_IDX){
  675. length += 1 - nut->header_len[best_header_idx];
  676. }else{
  677. length -= nut->header_len[fc->header_idx];
  678. }
  679. length*=4;
  680. length+= !(flags & FLAG_CODED_PTS);
  681. length+= !(flags & FLAG_CHECKSUM);
  682. if(length < best_length){
  683. best_length= length;
  684. frame_code=i;
  685. }
  686. }
  687. assert(frame_code != -1);
  688. fc= &nut->frame_code[frame_code];
  689. flags= fc->flags;
  690. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  691. header_idx= fc->header_idx;
  692. ffio_init_checksum(bc, ff_crc04C11DB7_update, 0);
  693. avio_w8(bc, frame_code);
  694. if(flags & FLAG_CODED){
  695. ff_put_v(bc, (flags^needed_flags) & ~(FLAG_CODED));
  696. flags = needed_flags;
  697. }
  698. if(flags & FLAG_STREAM_ID) ff_put_v(bc, pkt->stream_index);
  699. if(flags & FLAG_CODED_PTS) ff_put_v(bc, coded_pts);
  700. if(flags & FLAG_SIZE_MSB) ff_put_v(bc, pkt->size / fc->size_mul);
  701. if(flags & FLAG_HEADER_IDX) ff_put_v(bc, header_idx= best_header_idx);
  702. if(flags & FLAG_CHECKSUM) avio_wl32(bc, ffio_get_checksum(bc));
  703. else ffio_get_checksum(bc);
  704. avio_write(bc, pkt->data + nut->header_len[header_idx], pkt->size - nut->header_len[header_idx]);
  705. nus->last_flags= flags;
  706. nus->last_pts= pkt->pts;
  707. //FIXME just store one per syncpoint
  708. if(flags & FLAG_KEY)
  709. av_add_index_entry(
  710. s->streams[pkt->stream_index],
  711. nut->last_syncpoint_pos,
  712. pkt->pts,
  713. 0,
  714. 0,
  715. AVINDEX_KEYFRAME);
  716. return 0;
  717. }
  718. static int nut_write_trailer(AVFormatContext *s){
  719. NUTContext *nut= s->priv_data;
  720. AVIOContext *bc= s->pb;
  721. while(nut->header_count<3)
  722. write_headers(s, bc);
  723. avio_flush(bc);
  724. ff_nut_free_sp(nut);
  725. av_freep(&nut->stream);
  726. av_freep(&nut->chapter);
  727. av_freep(&nut->time_base);
  728. return 0;
  729. }
  730. AVOutputFormat ff_nut_muxer = {
  731. .name = "nut",
  732. .long_name = NULL_IF_CONFIG_SMALL("NUT format"),
  733. .mime_type = "video/x-nut",
  734. .extensions = "nut",
  735. .priv_data_size = sizeof(NUTContext),
  736. .audio_codec = CONFIG_LIBVORBIS ? CODEC_ID_VORBIS :
  737. CONFIG_LIBMP3LAME ? CODEC_ID_MP3 : CODEC_ID_MP2,
  738. .video_codec = CODEC_ID_MPEG4,
  739. .write_header = nut_write_header,
  740. .write_packet = nut_write_packet,
  741. .write_trailer = nut_write_trailer,
  742. .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
  743. .codec_tag = (const AVCodecTag * const []){
  744. ff_codec_bmp_tags, ff_nut_video_tags, ff_codec_wav_tags,
  745. ff_nut_subtitle_tags, 0
  746. },
  747. };