You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

828 lines
25KB

  1. /*
  2. * nut muxer
  3. * Copyright (c) 2004-2007 Michael Niedermayer
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/intreadwrite.h"
  22. #include "libavutil/tree.h"
  23. #include "libavcodec/mpegaudiodata.h"
  24. #include "nut.h"
  25. static int find_expected_header(AVCodecContext *c, int size, int key_frame, uint8_t out[64]){
  26. int sample_rate= c->sample_rate;
  27. if(size>4096)
  28. return 0;
  29. AV_WB24(out, 1);
  30. if(c->codec_id == CODEC_ID_MPEG4){
  31. if(key_frame){
  32. return 3;
  33. }else{
  34. out[3]= 0xB6;
  35. return 4;
  36. }
  37. }else if(c->codec_id == CODEC_ID_MPEG1VIDEO || c->codec_id == CODEC_ID_MPEG2VIDEO){
  38. return 3;
  39. }else if(c->codec_id == CODEC_ID_H264){
  40. return 3;
  41. }else if(c->codec_id == CODEC_ID_MP3 || c->codec_id == CODEC_ID_MP2){
  42. int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
  43. int layer= c->codec_id == CODEC_ID_MP3 ? 3 : 2;
  44. unsigned int header= 0xFFF00000;
  45. lsf = sample_rate < (24000+32000)/2;
  46. mpeg25 = sample_rate < (12000+16000)/2;
  47. sample_rate <<= lsf + mpeg25;
  48. if (sample_rate < (32000 + 44100)/2) sample_rate_index=2;
  49. else if(sample_rate < (44100 + 48000)/2) sample_rate_index=0;
  50. else sample_rate_index=1;
  51. sample_rate= ff_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
  52. for(bitrate_index=2; bitrate_index<30; bitrate_index++){
  53. frame_size = ff_mpa_bitrate_tab[lsf][layer-1][bitrate_index>>1];
  54. frame_size = (frame_size * 144000) / (sample_rate << lsf) + (bitrate_index&1);
  55. if(frame_size == size)
  56. break;
  57. }
  58. header |= (!lsf)<<19;
  59. header |= (4-layer)<<17;
  60. header |= 1<<16; //no crc
  61. AV_WB32(out, header);
  62. if(size <= 0)
  63. return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
  64. if(bitrate_index == 30)
  65. return -1; //something is wrong ...
  66. header |= (bitrate_index>>1)<<12;
  67. header |= sample_rate_index<<10;
  68. header |= (bitrate_index&1)<<9;
  69. return 2; //FIXME actually put the needed ones in build_elision_headers()
  70. return 3; //we guess that the private bit is not set
  71. //FIXME the above assumptions should be checked, if these turn out false too often something should be done
  72. }
  73. return 0;
  74. }
  75. static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size, int frame_type){
  76. NUTContext *nut = s->priv_data;
  77. uint8_t out[64];
  78. int i;
  79. int len= find_expected_header(c, size, frame_type, out);
  80. //av_log(NULL, AV_LOG_ERROR, "expected_h len=%d size=%d codec_id=%d\n", len, size, c->codec_id);
  81. for(i=1; i<nut->header_count; i++){
  82. if( len == nut->header_len[i]
  83. && !memcmp(out, nut->header[i], len)){
  84. // av_log(NULL, AV_LOG_ERROR, "found %d\n", i);
  85. return i;
  86. }
  87. }
  88. // av_log(NULL, AV_LOG_ERROR, "nothing found\n");
  89. return 0;
  90. }
  91. static void build_elision_headers(AVFormatContext *s){
  92. NUTContext *nut = s->priv_data;
  93. int i;
  94. //FIXME this is lame
  95. //FIXME write a 2pass mode to find the maximal headers
  96. static const uint8_t headers[][5]={
  97. {3, 0x00, 0x00, 0x01},
  98. {4, 0x00, 0x00, 0x01, 0xB6},
  99. {2, 0xFF, 0xFA}, //mp3+crc
  100. {2, 0xFF, 0xFB}, //mp3
  101. {2, 0xFF, 0xFC}, //mp2+crc
  102. {2, 0xFF, 0xFD}, //mp2
  103. };
  104. nut->header_count= 7;
  105. for(i=1; i<nut->header_count; i++){
  106. nut->header_len[i]= headers[i-1][0];
  107. nut->header [i]= &headers[i-1][1];
  108. }
  109. }
  110. static void build_frame_code(AVFormatContext *s){
  111. NUTContext *nut = s->priv_data;
  112. int key_frame, index, pred, stream_id;
  113. int start=1;
  114. int end= 254;
  115. int keyframe_0_esc= s->nb_streams > 2;
  116. int pred_table[10];
  117. FrameCode *ft;
  118. ft= &nut->frame_code[start];
  119. ft->flags= FLAG_CODED;
  120. ft->size_mul=1;
  121. ft->pts_delta=1;
  122. start++;
  123. if(keyframe_0_esc){
  124. /* keyframe = 0 escape */
  125. FrameCode *ft= &nut->frame_code[start];
  126. ft->flags= FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
  127. ft->size_mul=1;
  128. start++;
  129. }
  130. for(stream_id= 0; stream_id<s->nb_streams; stream_id++){
  131. int start2= start + (end-start)*stream_id / s->nb_streams;
  132. int end2 = start + (end-start)*(stream_id+1) / s->nb_streams;
  133. AVCodecContext *codec = s->streams[stream_id]->codec;
  134. int is_audio= codec->codec_type == CODEC_TYPE_AUDIO;
  135. int intra_only= /*codec->intra_only || */is_audio;
  136. int pred_count;
  137. for(key_frame=0; key_frame<2; key_frame++){
  138. if(intra_only && keyframe_0_esc && key_frame==0)
  139. continue;
  140. {
  141. FrameCode *ft= &nut->frame_code[start2];
  142. ft->flags= FLAG_KEY*key_frame;
  143. ft->flags|= FLAG_SIZE_MSB | FLAG_CODED_PTS;
  144. ft->stream_id= stream_id;
  145. ft->size_mul=1;
  146. if(is_audio)
  147. ft->header_idx= find_header_idx(s, codec, -1, key_frame);
  148. start2++;
  149. }
  150. }
  151. key_frame= intra_only;
  152. #if 1
  153. if(is_audio){
  154. int frame_bytes= codec->frame_size*(int64_t)codec->bit_rate / (8*codec->sample_rate);
  155. int pts;
  156. for(pts=0; pts<2; pts++){
  157. for(pred=0; pred<2; pred++){
  158. FrameCode *ft= &nut->frame_code[start2];
  159. ft->flags= FLAG_KEY*key_frame;
  160. ft->stream_id= stream_id;
  161. ft->size_mul=frame_bytes + 2;
  162. ft->size_lsb=frame_bytes + pred;
  163. ft->pts_delta=pts;
  164. ft->header_idx= find_header_idx(s, codec, frame_bytes + pred, key_frame);
  165. start2++;
  166. }
  167. }
  168. }else{
  169. FrameCode *ft= &nut->frame_code[start2];
  170. ft->flags= FLAG_KEY | FLAG_SIZE_MSB;
  171. ft->stream_id= stream_id;
  172. ft->size_mul=1;
  173. ft->pts_delta=1;
  174. start2++;
  175. }
  176. #endif
  177. if(codec->has_b_frames){
  178. pred_count=5;
  179. pred_table[0]=-2;
  180. pred_table[1]=-1;
  181. pred_table[2]=1;
  182. pred_table[3]=3;
  183. pred_table[4]=4;
  184. }else if(codec->codec_id == CODEC_ID_VORBIS){
  185. pred_count=3;
  186. pred_table[0]=2;
  187. pred_table[1]=9;
  188. pred_table[2]=16;
  189. }else{
  190. pred_count=1;
  191. pred_table[0]=1;
  192. }
  193. for(pred=0; pred<pred_count; pred++){
  194. int start3= start2 + (end2-start2)*pred / pred_count;
  195. int end3 = start2 + (end2-start2)*(pred+1) / pred_count;
  196. for(index=start3; index<end3; index++){
  197. FrameCode *ft= &nut->frame_code[index];
  198. ft->flags= FLAG_KEY*key_frame;
  199. ft->flags|= FLAG_SIZE_MSB;
  200. ft->stream_id= stream_id;
  201. //FIXME use single byte size and pred from last
  202. ft->size_mul= end3-start3;
  203. ft->size_lsb= index - start3;
  204. ft->pts_delta= pred_table[pred];
  205. if(is_audio)
  206. ft->header_idx= find_header_idx(s, codec, -1, key_frame);
  207. }
  208. }
  209. }
  210. memmove(&nut->frame_code['N'+1], &nut->frame_code['N'], sizeof(FrameCode)*(255-'N'));
  211. nut->frame_code[ 0].flags=
  212. nut->frame_code[255].flags=
  213. nut->frame_code['N'].flags= FLAG_INVALID;
  214. }
  215. /**
  216. * Gets the length in bytes which is needed to store val as v.
  217. */
  218. static int get_length(uint64_t val){
  219. int i=1;
  220. while(val>>=7)
  221. i++;
  222. return i;
  223. }
  224. static void put_v(ByteIOContext *bc, uint64_t val){
  225. int i= get_length(val);
  226. while(--i>0)
  227. put_byte(bc, 128 | (val>>(7*i)));
  228. put_byte(bc, val&127);
  229. }
  230. static void put_tt(NUTContext *nut, StreamContext *nus, ByteIOContext *bc, uint64_t val){
  231. val *= nut->time_base_count;
  232. val += nus->time_base - nut->time_base;
  233. put_v(bc, val);
  234. }
  235. /**
  236. * Stores a string as vb.
  237. */
  238. static void put_str(ByteIOContext *bc, const char *string){
  239. int len= strlen(string);
  240. put_v(bc, len);
  241. put_buffer(bc, string, len);
  242. }
  243. static void put_s(ByteIOContext *bc, int64_t val){
  244. put_v(bc, 2*FFABS(val) - (val>0));
  245. }
  246. #ifdef TRACE
  247. static inline void put_v_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
  248. av_log(NULL, AV_LOG_DEBUG, "put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  249. put_v(bc, v);
  250. }
  251. static inline void put_s_trace(ByteIOContext *bc, int64_t v, char *file, char *func, int line){
  252. av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  253. put_s(bc, v);
  254. }
  255. #define put_v(bc, v) put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  256. #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  257. #endif
  258. //FIXME remove calculate_checksum
  259. static void put_packet(NUTContext *nut, ByteIOContext *bc, ByteIOContext *dyn_bc, int calculate_checksum, uint64_t startcode){
  260. uint8_t *dyn_buf=NULL;
  261. int dyn_size= url_close_dyn_buf(dyn_bc, &dyn_buf);
  262. int forw_ptr= dyn_size + 4*calculate_checksum;
  263. if(forw_ptr > 4096)
  264. init_checksum(bc, ff_crc04C11DB7_update, 0);
  265. put_be64(bc, startcode);
  266. put_v(bc, forw_ptr);
  267. if(forw_ptr > 4096)
  268. put_le32(bc, get_checksum(bc));
  269. if(calculate_checksum)
  270. init_checksum(bc, ff_crc04C11DB7_update, 0);
  271. put_buffer(bc, dyn_buf, dyn_size);
  272. if(calculate_checksum)
  273. put_le32(bc, get_checksum(bc));
  274. av_free(dyn_buf);
  275. }
  276. static void write_mainheader(NUTContext *nut, ByteIOContext *bc){
  277. int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields, tmp_head_idx;
  278. int64_t tmp_match;
  279. put_v(bc, 3); /* version */
  280. put_v(bc, nut->avf->nb_streams);
  281. put_v(bc, nut->max_distance);
  282. put_v(bc, nut->time_base_count);
  283. for(i=0; i<nut->time_base_count; i++){
  284. put_v(bc, nut->time_base[i].num);
  285. put_v(bc, nut->time_base[i].den);
  286. }
  287. tmp_pts=0;
  288. tmp_mul=1;
  289. tmp_stream=0;
  290. tmp_match= 1-(1LL<<62);
  291. tmp_head_idx= 0;
  292. for(i=0; i<256;){
  293. tmp_fields=0;
  294. tmp_size=0;
  295. // tmp_res=0;
  296. if(tmp_pts != nut->frame_code[i].pts_delta) tmp_fields=1;
  297. if(tmp_mul != nut->frame_code[i].size_mul ) tmp_fields=2;
  298. if(tmp_stream != nut->frame_code[i].stream_id) tmp_fields=3;
  299. if(tmp_size != nut->frame_code[i].size_lsb ) tmp_fields=4;
  300. // if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
  301. if(tmp_head_idx!=nut->frame_code[i].header_idx)tmp_fields=8;
  302. tmp_pts = nut->frame_code[i].pts_delta;
  303. tmp_flags = nut->frame_code[i].flags;
  304. tmp_stream= nut->frame_code[i].stream_id;
  305. tmp_mul = nut->frame_code[i].size_mul;
  306. tmp_size = nut->frame_code[i].size_lsb;
  307. // tmp_res = nut->frame_code[i].res;
  308. tmp_head_idx= nut->frame_code[i].header_idx;
  309. for(j=0; i<256; j++,i++){
  310. if(i == 'N'){
  311. j--;
  312. continue;
  313. }
  314. if(nut->frame_code[i].pts_delta != tmp_pts ) break;
  315. if(nut->frame_code[i].flags != tmp_flags ) break;
  316. if(nut->frame_code[i].stream_id != tmp_stream) break;
  317. if(nut->frame_code[i].size_mul != tmp_mul ) break;
  318. if(nut->frame_code[i].size_lsb != tmp_size+j) break;
  319. // if(nut->frame_code[i].res != tmp_res ) break;
  320. if(nut->frame_code[i].header_idx!= tmp_head_idx) break;
  321. }
  322. if(j != tmp_mul - tmp_size) tmp_fields=6;
  323. put_v(bc, tmp_flags);
  324. put_v(bc, tmp_fields);
  325. if(tmp_fields>0) put_s(bc, tmp_pts);
  326. if(tmp_fields>1) put_v(bc, tmp_mul);
  327. if(tmp_fields>2) put_v(bc, tmp_stream);
  328. if(tmp_fields>3) put_v(bc, tmp_size);
  329. if(tmp_fields>4) put_v(bc, 0 /*tmp_res*/);
  330. if(tmp_fields>5) put_v(bc, j);
  331. if(tmp_fields>6) put_v(bc, tmp_match);
  332. if(tmp_fields>7) put_v(bc, tmp_head_idx);
  333. }
  334. put_v(bc, nut->header_count-1);
  335. for(i=1; i<nut->header_count; i++){
  336. put_v(bc, nut->header_len[i]);
  337. put_buffer(bc, nut->header[i], nut->header_len[i]);
  338. }
  339. }
  340. static int write_streamheader(NUTContext *nut, ByteIOContext *bc, AVStream *st, int i){
  341. AVCodecContext *codec = st->codec;
  342. put_v(bc, i);
  343. switch(codec->codec_type){
  344. case CODEC_TYPE_VIDEO: put_v(bc, 0); break;
  345. case CODEC_TYPE_AUDIO: put_v(bc, 1); break;
  346. case CODEC_TYPE_SUBTITLE: put_v(bc, 2); break;
  347. default : put_v(bc, 3); break;
  348. }
  349. put_v(bc, 4);
  350. if (codec->codec_tag){
  351. put_le32(bc, codec->codec_tag);
  352. }else
  353. return -1;
  354. put_v(bc, nut->stream[i].time_base - nut->time_base);
  355. put_v(bc, nut->stream[i].msb_pts_shift);
  356. put_v(bc, nut->stream[i].max_pts_distance);
  357. put_v(bc, codec->has_b_frames);
  358. put_byte(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
  359. put_v(bc, codec->extradata_size);
  360. put_buffer(bc, codec->extradata, codec->extradata_size);
  361. switch(codec->codec_type){
  362. case CODEC_TYPE_AUDIO:
  363. put_v(bc, codec->sample_rate);
  364. put_v(bc, 1);
  365. put_v(bc, codec->channels);
  366. break;
  367. case CODEC_TYPE_VIDEO:
  368. put_v(bc, codec->width);
  369. put_v(bc, codec->height);
  370. if(st->sample_aspect_ratio.num<=0 || st->sample_aspect_ratio.den<=0){
  371. put_v(bc, 0);
  372. put_v(bc, 0);
  373. }else{
  374. put_v(bc, st->sample_aspect_ratio.num);
  375. put_v(bc, st->sample_aspect_ratio.den);
  376. }
  377. put_v(bc, 0); /* csp type -- unknown */
  378. break;
  379. default:
  380. break;
  381. }
  382. return 0;
  383. }
  384. static int add_info(ByteIOContext *bc, const char *type, const char *value){
  385. put_str(bc, type);
  386. put_s(bc, -1);
  387. put_str(bc, value);
  388. return 1;
  389. }
  390. static int write_globalinfo(NUTContext *nut, ByteIOContext *bc){
  391. AVFormatContext *s= nut->avf;
  392. AVMetadataTag *t = NULL;
  393. ByteIOContext *dyn_bc;
  394. uint8_t *dyn_buf=NULL;
  395. int count=0, dyn_size;
  396. int ret = url_open_dyn_buf(&dyn_bc);
  397. if(ret < 0)
  398. return ret;
  399. while ((t = av_metadata_get(s->metadata, "", t, AV_METADATA_IGNORE_SUFFIX)))
  400. count += add_info(dyn_bc, t->key, t->value);
  401. put_v(bc, 0); //stream_if_plus1
  402. put_v(bc, 0); //chapter_id
  403. put_v(bc, 0); //timestamp_start
  404. put_v(bc, 0); //length
  405. put_v(bc, count);
  406. dyn_size= url_close_dyn_buf(dyn_bc, &dyn_buf);
  407. put_buffer(bc, dyn_buf, dyn_size);
  408. av_free(dyn_buf);
  409. return 0;
  410. }
  411. static int write_streaminfo(NUTContext *nut, ByteIOContext *bc, int stream_id){
  412. AVFormatContext *s= nut->avf;
  413. AVStream* st = s->streams[stream_id];
  414. ByteIOContext *dyn_bc;
  415. uint8_t *dyn_buf=NULL;
  416. int count=0, dyn_size, i;
  417. int ret = url_open_dyn_buf(&dyn_bc);
  418. if(ret < 0)
  419. return ret;
  420. for (i=0; ff_nut_dispositions[i].flag; ++i) {
  421. if (st->disposition & ff_nut_dispositions[i].flag)
  422. count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
  423. }
  424. dyn_size = url_close_dyn_buf(dyn_bc, &dyn_buf);
  425. if (count) {
  426. put_v(bc, stream_id + 1); //stream_id_plus1
  427. put_v(bc, 0); //chapter_id
  428. put_v(bc, 0); //timestamp_start
  429. put_v(bc, 0); //length
  430. put_v(bc, count);
  431. put_buffer(bc, dyn_buf, dyn_size);
  432. }
  433. av_free(dyn_buf);
  434. return count;
  435. }
  436. static int write_headers(NUTContext *nut, ByteIOContext *bc){
  437. ByteIOContext *dyn_bc;
  438. int i, ret;
  439. ret = url_open_dyn_buf(&dyn_bc);
  440. if(ret < 0)
  441. return ret;
  442. write_mainheader(nut, dyn_bc);
  443. put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
  444. for (i=0; i < nut->avf->nb_streams; i++){
  445. ret = url_open_dyn_buf(&dyn_bc);
  446. if(ret < 0)
  447. return ret;
  448. write_streamheader(nut, dyn_bc, nut->avf->streams[i], i);
  449. put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
  450. }
  451. ret = url_open_dyn_buf(&dyn_bc);
  452. if(ret < 0)
  453. return ret;
  454. write_globalinfo(nut, dyn_bc);
  455. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  456. for (i = 0; i < nut->avf->nb_streams; i++) {
  457. ret = url_open_dyn_buf(&dyn_bc);
  458. if(ret < 0)
  459. return ret;
  460. ret = write_streaminfo(nut, dyn_bc, i);
  461. if (ret < 0)
  462. return ret;
  463. if (ret > 0)
  464. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  465. else {
  466. uint8_t* buf;
  467. url_close_dyn_buf(dyn_bc, &buf);
  468. av_free(buf);
  469. }
  470. }
  471. nut->last_syncpoint_pos= INT_MIN;
  472. nut->header_count++;
  473. return 0;
  474. }
  475. static int write_header(AVFormatContext *s){
  476. NUTContext *nut = s->priv_data;
  477. ByteIOContext *bc = s->pb;
  478. int i, j;
  479. nut->avf= s;
  480. nut->stream = av_mallocz(sizeof(StreamContext)*s->nb_streams);
  481. nut->time_base= av_mallocz(sizeof(AVRational )*s->nb_streams);
  482. for(i=0; i<s->nb_streams; i++){
  483. AVStream *st= s->streams[i];
  484. int ssize;
  485. AVRational time_base;
  486. ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
  487. av_set_pts_info(st, 64, time_base.num, time_base.den);
  488. for(j=0; j<nut->time_base_count; j++){
  489. if(!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))){
  490. break;
  491. }
  492. }
  493. nut->time_base[j]= time_base;
  494. nut->stream[i].time_base= &nut->time_base[j];
  495. if(j==nut->time_base_count)
  496. nut->time_base_count++;
  497. if(av_q2d(time_base) >= 0.001)
  498. nut->stream[i].msb_pts_shift = 7;
  499. else
  500. nut->stream[i].msb_pts_shift = 14;
  501. nut->stream[i].max_pts_distance= FFMAX(1/av_q2d(time_base), 1);
  502. }
  503. nut->max_distance = MAX_DISTANCE;
  504. build_elision_headers(s);
  505. build_frame_code(s);
  506. assert(nut->frame_code['N'].flags == FLAG_INVALID);
  507. put_buffer(bc, ID_STRING, strlen(ID_STRING));
  508. put_byte(bc, 0);
  509. write_headers(nut, bc);
  510. put_flush_packet(bc);
  511. //FIXME index
  512. return 0;
  513. }
  514. static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt){
  515. int flags= 0;
  516. if(pkt->flags & PKT_FLAG_KEY ) flags |= FLAG_KEY;
  517. if(pkt->stream_index != fc->stream_id ) flags |= FLAG_STREAM_ID;
  518. if(pkt->size / fc->size_mul ) flags |= FLAG_SIZE_MSB;
  519. if(pkt->pts - nus->last_pts != fc->pts_delta) flags |= FLAG_CODED_PTS;
  520. if(pkt->size > 2*nut->max_distance ) flags |= FLAG_CHECKSUM;
  521. if(FFABS(pkt->pts - nus->last_pts)
  522. > nus->max_pts_distance) flags |= FLAG_CHECKSUM;
  523. if( pkt->size < nut->header_len[fc->header_idx]
  524. || (pkt->size > 4096 && fc->header_idx)
  525. || memcmp(pkt->data, nut->header[fc->header_idx], nut->header_len[fc->header_idx]))
  526. flags |= FLAG_HEADER_IDX;
  527. return flags | (fc->flags & FLAG_CODED);
  528. }
  529. static int find_best_header_idx(NUTContext *nut, AVPacket *pkt){
  530. int i;
  531. int best_i = 0;
  532. int best_len= 0;
  533. if(pkt->size > 4096)
  534. return 0;
  535. for(i=1; i<nut->header_count; i++){
  536. if( pkt->size >= nut->header_len[i]
  537. && nut->header_len[i] > best_len
  538. && !memcmp(pkt->data, nut->header[i], nut->header_len[i])){
  539. best_i= i;
  540. best_len= nut->header_len[i];
  541. }
  542. }
  543. return best_i;
  544. }
  545. static int write_packet(AVFormatContext *s, AVPacket *pkt){
  546. NUTContext *nut = s->priv_data;
  547. StreamContext *nus= &nut->stream[pkt->stream_index];
  548. ByteIOContext *bc = s->pb, *dyn_bc;
  549. FrameCode *fc;
  550. int64_t coded_pts;
  551. int best_length, frame_code, flags, needed_flags, i, header_idx, best_header_idx;
  552. int key_frame = !!(pkt->flags & PKT_FLAG_KEY);
  553. int store_sp=0;
  554. int ret;
  555. if(pkt->pts < 0)
  556. return -1;
  557. if(1LL<<(20+3*nut->header_count) <= url_ftell(bc))
  558. write_headers(nut, bc);
  559. if(key_frame && !(nus->last_flags & FLAG_KEY))
  560. store_sp= 1;
  561. if(pkt->size + 30/*FIXME check*/ + url_ftell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
  562. store_sp= 1;
  563. //FIXME: Ensure store_sp is 1 in the first place.
  564. if(store_sp){
  565. Syncpoint *sp, dummy= {.pos= INT64_MAX};
  566. ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
  567. for(i=0; i<s->nb_streams; i++){
  568. AVStream *st= s->streams[i];
  569. int64_t dts_tb = av_rescale_rnd(pkt->dts,
  570. nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
  571. nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
  572. AV_ROUND_DOWN);
  573. int index= av_index_search_timestamp(st, dts_tb, AVSEEK_FLAG_BACKWARD);
  574. if(index>=0) dummy.pos= FFMIN(dummy.pos, st->index_entries[index].pos);
  575. }
  576. if(dummy.pos == INT64_MAX)
  577. dummy.pos= 0;
  578. sp= av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pos_cmp, NULL);
  579. nut->last_syncpoint_pos= url_ftell(bc);
  580. ret = url_open_dyn_buf(&dyn_bc);
  581. if(ret < 0)
  582. return ret;
  583. put_tt(nut, nus, dyn_bc, pkt->dts);
  584. put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos)>>4 : 0);
  585. put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
  586. ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0/*unused*/, pkt->dts);
  587. }
  588. assert(nus->last_pts != AV_NOPTS_VALUE);
  589. coded_pts = pkt->pts & ((1<<nus->msb_pts_shift)-1);
  590. if(ff_lsb2full(nus, coded_pts) != pkt->pts)
  591. coded_pts= pkt->pts + (1<<nus->msb_pts_shift);
  592. best_header_idx= find_best_header_idx(nut, pkt);
  593. best_length=INT_MAX;
  594. frame_code= -1;
  595. for(i=0; i<256; i++){
  596. int length= 0;
  597. FrameCode *fc= &nut->frame_code[i];
  598. int flags= fc->flags;
  599. if(flags & FLAG_INVALID)
  600. continue;
  601. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  602. if(flags & FLAG_CODED){
  603. length++;
  604. flags = needed_flags;
  605. }
  606. if((flags & needed_flags) != needed_flags)
  607. continue;
  608. if((flags ^ needed_flags) & FLAG_KEY)
  609. continue;
  610. if(flags & FLAG_STREAM_ID)
  611. length+= get_length(pkt->stream_index);
  612. if(pkt->size % fc->size_mul != fc->size_lsb)
  613. continue;
  614. if(flags & FLAG_SIZE_MSB)
  615. length += get_length(pkt->size / fc->size_mul);
  616. if(flags & FLAG_CHECKSUM)
  617. length+=4;
  618. if(flags & FLAG_CODED_PTS)
  619. length += get_length(coded_pts);
  620. if( (flags & FLAG_CODED)
  621. && nut->header_len[best_header_idx] > nut->header_len[fc->header_idx]+1){
  622. flags |= FLAG_HEADER_IDX;
  623. }
  624. if(flags & FLAG_HEADER_IDX){
  625. length += 1 - nut->header_len[best_header_idx];
  626. }else{
  627. length -= nut->header_len[fc->header_idx];
  628. }
  629. length*=4;
  630. length+= !(flags & FLAG_CODED_PTS);
  631. length+= !(flags & FLAG_CHECKSUM);
  632. if(length < best_length){
  633. best_length= length;
  634. frame_code=i;
  635. }
  636. }
  637. assert(frame_code != -1);
  638. fc= &nut->frame_code[frame_code];
  639. flags= fc->flags;
  640. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  641. header_idx= fc->header_idx;
  642. init_checksum(bc, ff_crc04C11DB7_update, 0);
  643. put_byte(bc, frame_code);
  644. if(flags & FLAG_CODED){
  645. put_v(bc, (flags^needed_flags) & ~(FLAG_CODED));
  646. flags = needed_flags;
  647. }
  648. if(flags & FLAG_STREAM_ID) put_v(bc, pkt->stream_index);
  649. if(flags & FLAG_CODED_PTS) put_v(bc, coded_pts);
  650. if(flags & FLAG_SIZE_MSB) put_v(bc, pkt->size / fc->size_mul);
  651. if(flags & FLAG_HEADER_IDX) put_v(bc, header_idx= best_header_idx);
  652. if(flags & FLAG_CHECKSUM) put_le32(bc, get_checksum(bc));
  653. else get_checksum(bc);
  654. put_buffer(bc, pkt->data + nut->header_len[header_idx], pkt->size - nut->header_len[header_idx]);
  655. nus->last_flags= flags;
  656. nus->last_pts= pkt->pts;
  657. //FIXME just store one per syncpoint
  658. if(flags & FLAG_KEY)
  659. av_add_index_entry(
  660. s->streams[pkt->stream_index],
  661. nut->last_syncpoint_pos,
  662. pkt->pts,
  663. 0,
  664. 0,
  665. AVINDEX_KEYFRAME);
  666. return 0;
  667. }
  668. static int write_trailer(AVFormatContext *s){
  669. NUTContext *nut= s->priv_data;
  670. ByteIOContext *bc= s->pb;
  671. while(nut->header_count<3)
  672. write_headers(nut, bc);
  673. put_flush_packet(bc);
  674. ff_nut_free_sp(nut);
  675. av_freep(&nut->stream);
  676. av_freep(&nut->time_base);
  677. return 0;
  678. }
  679. AVOutputFormat nut_muxer = {
  680. "nut",
  681. NULL_IF_CONFIG_SMALL("NUT format"),
  682. "video/x-nut",
  683. "nut",
  684. sizeof(NUTContext),
  685. #if CONFIG_LIBVORBIS
  686. CODEC_ID_VORBIS,
  687. #elif CONFIG_LIBMP3LAME
  688. CODEC_ID_MP3,
  689. #else
  690. CODEC_ID_MP2,
  691. #endif
  692. CODEC_ID_MPEG4,
  693. write_header,
  694. write_packet,
  695. write_trailer,
  696. .flags = AVFMT_GLOBALHEADER | AVFMT_VARIABLE_FPS,
  697. .codec_tag= (const AVCodecTag* const []){ff_codec_bmp_tags, ff_codec_wav_tags, ff_nut_subtitle_tags, 0},
  698. .metadata_conv = ff_nut_metadata_conv,
  699. };