You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

598 lines
18KB

  1. /*
  2. * nut muxer
  3. * Copyright (c) 2004-2007 Michael Niedermayer
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "nut.h"
  22. #include "tree.h"
  23. static void build_frame_code(AVFormatContext *s){
  24. NUTContext *nut = s->priv_data;
  25. int key_frame, index, pred, stream_id;
  26. int start=1;
  27. int end= 254;
  28. int keyframe_0_esc= s->nb_streams > 2;
  29. int pred_table[10];
  30. FrameCode *ft;
  31. ft= &nut->frame_code[start];
  32. ft->flags= FLAG_CODED;
  33. ft->size_mul=1;
  34. ft->pts_delta=1;
  35. start++;
  36. if(keyframe_0_esc){
  37. /* keyframe = 0 escape */
  38. FrameCode *ft= &nut->frame_code[start];
  39. ft->flags= FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
  40. ft->size_mul=1;
  41. start++;
  42. }
  43. for(stream_id= 0; stream_id<s->nb_streams; stream_id++){
  44. int start2= start + (end-start)*stream_id / s->nb_streams;
  45. int end2 = start + (end-start)*(stream_id+1) / s->nb_streams;
  46. AVCodecContext *codec = s->streams[stream_id]->codec;
  47. int is_audio= codec->codec_type == CODEC_TYPE_AUDIO;
  48. int intra_only= /*codec->intra_only || */is_audio;
  49. int pred_count;
  50. for(key_frame=0; key_frame<2; key_frame++){
  51. if(intra_only && keyframe_0_esc && key_frame==0)
  52. continue;
  53. {
  54. FrameCode *ft= &nut->frame_code[start2];
  55. ft->flags= FLAG_KEY*key_frame;
  56. ft->flags|= FLAG_SIZE_MSB | FLAG_CODED_PTS;
  57. ft->stream_id= stream_id;
  58. ft->size_mul=1;
  59. start2++;
  60. }
  61. }
  62. key_frame= intra_only;
  63. #if 1
  64. if(is_audio){
  65. int frame_bytes= codec->frame_size*(int64_t)codec->bit_rate / (8*codec->sample_rate);
  66. int pts;
  67. for(pts=0; pts<2; pts++){
  68. for(pred=0; pred<2; pred++){
  69. FrameCode *ft= &nut->frame_code[start2];
  70. ft->flags= FLAG_KEY*key_frame;
  71. ft->stream_id= stream_id;
  72. ft->size_mul=frame_bytes + 2;
  73. ft->size_lsb=frame_bytes + pred;
  74. ft->pts_delta=pts;
  75. start2++;
  76. }
  77. }
  78. }else{
  79. FrameCode *ft= &nut->frame_code[start2];
  80. ft->flags= FLAG_KEY | FLAG_SIZE_MSB;
  81. ft->stream_id= stream_id;
  82. ft->size_mul=1;
  83. ft->pts_delta=1;
  84. start2++;
  85. }
  86. #endif
  87. if(codec->has_b_frames){
  88. pred_count=5;
  89. pred_table[0]=-2;
  90. pred_table[1]=-1;
  91. pred_table[2]=1;
  92. pred_table[3]=3;
  93. pred_table[4]=4;
  94. }else if(codec->codec_id == CODEC_ID_VORBIS){
  95. pred_count=3;
  96. pred_table[0]=2;
  97. pred_table[1]=9;
  98. pred_table[2]=16;
  99. }else{
  100. pred_count=1;
  101. pred_table[0]=1;
  102. }
  103. for(pred=0; pred<pred_count; pred++){
  104. int start3= start2 + (end2-start2)*pred / pred_count;
  105. int end3 = start2 + (end2-start2)*(pred+1) / pred_count;
  106. for(index=start3; index<end3; index++){
  107. FrameCode *ft= &nut->frame_code[index];
  108. ft->flags= FLAG_KEY*key_frame;
  109. ft->flags|= FLAG_SIZE_MSB;
  110. ft->stream_id= stream_id;
  111. //FIXME use single byte size and pred from last
  112. ft->size_mul= end3-start3;
  113. ft->size_lsb= index - start3;
  114. ft->pts_delta= pred_table[pred];
  115. }
  116. }
  117. }
  118. memmove(&nut->frame_code['N'+1], &nut->frame_code['N'], sizeof(FrameCode)*(255-'N'));
  119. nut->frame_code[ 0].flags=
  120. nut->frame_code[255].flags=
  121. nut->frame_code['N'].flags= FLAG_INVALID;
  122. }
  123. /**
  124. * Gets the length in bytes which is needed to store val as v.
  125. */
  126. static int get_length(uint64_t val){
  127. int i=1;
  128. while(val>>=7)
  129. i++;
  130. return i;
  131. }
  132. static void put_v(ByteIOContext *bc, uint64_t val){
  133. int i= get_length(val);
  134. while(--i>0)
  135. put_byte(bc, 128 | (val>>(7*i)));
  136. put_byte(bc, val&127);
  137. }
  138. static void put_t(NUTContext *nut, StreamContext *nus, ByteIOContext *bc, uint64_t val){
  139. val *= nut->time_base_count;
  140. val += nus->time_base - nut->time_base;
  141. put_v(bc, val);
  142. }
  143. /**
  144. * Stores a string as vb.
  145. */
  146. static void put_str(ByteIOContext *bc, const char *string){
  147. int len= strlen(string);
  148. put_v(bc, len);
  149. put_buffer(bc, string, len);
  150. }
  151. static void put_s(ByteIOContext *bc, int64_t val){
  152. put_v(bc, 2*FFABS(val) - (val>0));
  153. }
  154. #ifdef TRACE
  155. static inline void put_v_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
  156. av_log(NULL, AV_LOG_DEBUG, "put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  157. put_v(bc, v);
  158. }
  159. static inline void put_s_trace(ByteIOContext *bc, int64_t v, char *file, char *func, int line){
  160. av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  161. put_s(bc, v);
  162. }
  163. #define put_v(bc, v) put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  164. #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  165. #endif
  166. //FIXME remove calculate_checksum
  167. static void put_packet(NUTContext *nut, ByteIOContext *bc, ByteIOContext *dyn_bc, int calculate_checksum, uint64_t startcode){
  168. uint8_t *dyn_buf=NULL;
  169. int dyn_size= url_close_dyn_buf(dyn_bc, &dyn_buf);
  170. int forw_ptr= dyn_size + 4*calculate_checksum;
  171. if(forw_ptr > 4096)
  172. init_checksum(bc, av_crc04C11DB7_update, 0);
  173. put_be64(bc, startcode);
  174. put_v(bc, forw_ptr);
  175. if(forw_ptr > 4096)
  176. put_le32(bc, get_checksum(bc));
  177. if(calculate_checksum)
  178. init_checksum(bc, av_crc04C11DB7_update, 0);
  179. put_buffer(bc, dyn_buf, dyn_size);
  180. if(calculate_checksum)
  181. put_le32(bc, get_checksum(bc));
  182. av_free(dyn_buf);
  183. }
  184. static void write_mainheader(NUTContext *nut, ByteIOContext *bc){
  185. int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields;
  186. put_v(bc, 3); /* version */
  187. put_v(bc, nut->avf->nb_streams);
  188. put_v(bc, MAX_DISTANCE);
  189. put_v(bc, nut->time_base_count);
  190. for(i=0; i<nut->time_base_count; i++){
  191. put_v(bc, nut->time_base[i].num);
  192. put_v(bc, nut->time_base[i].den);
  193. }
  194. tmp_pts=0;
  195. tmp_mul=1;
  196. tmp_stream=0;
  197. for(i=0; i<256;){
  198. tmp_fields=0;
  199. tmp_size=0;
  200. // tmp_res=0;
  201. if(tmp_pts != nut->frame_code[i].pts_delta) tmp_fields=1;
  202. if(tmp_mul != nut->frame_code[i].size_mul ) tmp_fields=2;
  203. if(tmp_stream != nut->frame_code[i].stream_id) tmp_fields=3;
  204. if(tmp_size != nut->frame_code[i].size_lsb ) tmp_fields=4;
  205. // if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
  206. tmp_pts = nut->frame_code[i].pts_delta;
  207. tmp_flags = nut->frame_code[i].flags;
  208. tmp_stream= nut->frame_code[i].stream_id;
  209. tmp_mul = nut->frame_code[i].size_mul;
  210. tmp_size = nut->frame_code[i].size_lsb;
  211. // tmp_res = nut->frame_code[i].res;
  212. for(j=0; i<256; j++,i++){
  213. if(i == 'N'){
  214. j--;
  215. continue;
  216. }
  217. if(nut->frame_code[i].pts_delta != tmp_pts ) break;
  218. if(nut->frame_code[i].flags != tmp_flags ) break;
  219. if(nut->frame_code[i].stream_id != tmp_stream) break;
  220. if(nut->frame_code[i].size_mul != tmp_mul ) break;
  221. if(nut->frame_code[i].size_lsb != tmp_size+j) break;
  222. // if(nut->frame_code[i].res != tmp_res ) break;
  223. }
  224. if(j != tmp_mul - tmp_size) tmp_fields=6;
  225. put_v(bc, tmp_flags);
  226. put_v(bc, tmp_fields);
  227. if(tmp_fields>0) put_s(bc, tmp_pts);
  228. if(tmp_fields>1) put_v(bc, tmp_mul);
  229. if(tmp_fields>2) put_v(bc, tmp_stream);
  230. if(tmp_fields>3) put_v(bc, tmp_size);
  231. if(tmp_fields>4) put_v(bc, 0 /*tmp_res*/);
  232. if(tmp_fields>5) put_v(bc, j);
  233. }
  234. }
  235. static int write_streamheader(NUTContext *nut, ByteIOContext *bc, AVCodecContext *codec, int i){
  236. put_v(bc, i);
  237. switch(codec->codec_type){
  238. case CODEC_TYPE_VIDEO: put_v(bc, 0); break;
  239. case CODEC_TYPE_AUDIO: put_v(bc, 1); break;
  240. // case CODEC_TYPE_TEXT : put_v(bc, 2); break;
  241. default : put_v(bc, 3); break;
  242. }
  243. put_v(bc, 4);
  244. if (codec->codec_tag){
  245. put_le32(bc, codec->codec_tag);
  246. }else
  247. return -1;
  248. put_v(bc, nut->stream[i].time_base - nut->time_base);
  249. put_v(bc, nut->stream[i].msb_pts_shift);
  250. put_v(bc, nut->stream[i].max_pts_distance);
  251. put_v(bc, codec->has_b_frames);
  252. put_byte(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
  253. put_v(bc, codec->extradata_size);
  254. put_buffer(bc, codec->extradata, codec->extradata_size);
  255. switch(codec->codec_type){
  256. case CODEC_TYPE_AUDIO:
  257. put_v(bc, codec->sample_rate);
  258. put_v(bc, 1);
  259. put_v(bc, codec->channels);
  260. break;
  261. case CODEC_TYPE_VIDEO:
  262. put_v(bc, codec->width);
  263. put_v(bc, codec->height);
  264. if(codec->sample_aspect_ratio.num<=0 || codec->sample_aspect_ratio.den<=0){
  265. put_v(bc, 0);
  266. put_v(bc, 0);
  267. }else{
  268. put_v(bc, codec->sample_aspect_ratio.num);
  269. put_v(bc, codec->sample_aspect_ratio.den);
  270. }
  271. put_v(bc, 0); /* csp type -- unknown */
  272. break;
  273. default:
  274. break;
  275. }
  276. return 0;
  277. }
  278. static int add_info(ByteIOContext *bc, char *type, char *value){
  279. put_str(bc, type);
  280. put_s(bc, -1);
  281. put_str(bc, value);
  282. return 1;
  283. }
  284. static void write_globalinfo(NUTContext *nut, ByteIOContext *bc){
  285. AVFormatContext *s= nut->avf;
  286. ByteIOContext dyn_bc;
  287. uint8_t *dyn_buf=NULL;
  288. int count=0, dyn_size;
  289. url_open_dyn_buf(&dyn_bc);
  290. if(s->title [0]) count+= add_info(&dyn_bc, "Title" , s->title);
  291. if(s->author [0]) count+= add_info(&dyn_bc, "Author" , s->author);
  292. if(s->copyright[0]) count+= add_info(&dyn_bc, "Copyright", s->copyright);
  293. if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
  294. count+= add_info(&dyn_bc, "Encoder" , LIBAVFORMAT_IDENT);
  295. put_v(bc, 0); //stream_if_plus1
  296. put_v(bc, 0); //chapter_id
  297. put_v(bc, 0); //timestamp_start
  298. put_v(bc, 0); //length
  299. put_v(bc, count);
  300. dyn_size= url_close_dyn_buf(&dyn_bc, &dyn_buf);
  301. put_buffer(bc, dyn_buf, dyn_size);
  302. av_free(dyn_buf);
  303. }
  304. static void write_headers(NUTContext *nut, ByteIOContext *bc){
  305. ByteIOContext dyn_bc;
  306. int i;
  307. url_open_dyn_buf(&dyn_bc);
  308. write_mainheader(nut, &dyn_bc);
  309. put_packet(nut, bc, &dyn_bc, 1, MAIN_STARTCODE);
  310. for (i=0; i < nut->avf->nb_streams; i++){
  311. AVCodecContext *codec = nut->avf->streams[i]->codec;
  312. url_open_dyn_buf(&dyn_bc);
  313. write_streamheader(nut, &dyn_bc, codec, i);
  314. put_packet(nut, bc, &dyn_bc, 1, STREAM_STARTCODE);
  315. }
  316. url_open_dyn_buf(&dyn_bc);
  317. write_globalinfo(nut, &dyn_bc);
  318. put_packet(nut, bc, &dyn_bc, 1, INFO_STARTCODE);
  319. nut->last_syncpoint_pos= INT_MIN;
  320. nut->header_count++;
  321. }
  322. static int write_header(AVFormatContext *s){
  323. NUTContext *nut = s->priv_data;
  324. ByteIOContext *bc = &s->pb;
  325. int i, j;
  326. nut->avf= s;
  327. nut->stream = av_mallocz(sizeof(StreamContext)*s->nb_streams);
  328. nut->time_base= av_mallocz(sizeof(AVRational )*s->nb_streams);
  329. for(i=0; i<s->nb_streams; i++){
  330. AVStream *st= s->streams[i];
  331. int ssize;
  332. AVRational time_base;
  333. ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
  334. av_set_pts_info(st, 64, time_base.num, time_base.den);
  335. for(j=0; j<nut->time_base_count; j++){
  336. if(!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))){
  337. break;
  338. }
  339. }
  340. nut->time_base[j]= time_base;
  341. nut->stream[i].time_base= &nut->time_base[j];
  342. if(j==nut->time_base_count)
  343. nut->time_base_count++;
  344. if(av_q2d(time_base) >= 0.001)
  345. nut->stream[i].msb_pts_shift = 7;
  346. else
  347. nut->stream[i].msb_pts_shift = 14;
  348. nut->stream[i].max_pts_distance= FFMAX(1/av_q2d(time_base), 1);
  349. }
  350. build_frame_code(s);
  351. assert(nut->frame_code['N'].flags == FLAG_INVALID);
  352. put_buffer(bc, ID_STRING, strlen(ID_STRING));
  353. put_byte(bc, 0);
  354. write_headers(nut, bc);
  355. put_flush_packet(bc);
  356. //FIXME index
  357. return 0;
  358. }
  359. static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt){
  360. int flags= 0;
  361. if(pkt->flags & PKT_FLAG_KEY ) flags |= FLAG_KEY;
  362. if(pkt->stream_index != fc->stream_id ) flags |= FLAG_STREAM_ID;
  363. if(pkt->size / fc->size_mul ) flags |= FLAG_SIZE_MSB;
  364. if(pkt->pts - nus->last_pts != fc->pts_delta) flags |= FLAG_CODED_PTS;
  365. if(pkt->size > 2*nut->max_distance ) flags |= FLAG_CHECKSUM;
  366. if(FFABS(pkt->pts - nus->last_pts)
  367. > nus->max_pts_distance) flags |= FLAG_CHECKSUM;
  368. return flags | (fc->flags & FLAG_CODED);
  369. }
  370. static int write_packet(AVFormatContext *s, AVPacket *pkt){
  371. NUTContext *nut = s->priv_data;
  372. StreamContext *nus= &nut->stream[pkt->stream_index];
  373. ByteIOContext *bc = &s->pb, dyn_bc;
  374. FrameCode *fc;
  375. int64_t coded_pts;
  376. int best_length, frame_code, flags, needed_flags, i;
  377. int key_frame = !!(pkt->flags & PKT_FLAG_KEY);
  378. int store_sp=0;
  379. if(1LL<<(20+3*nut->header_count) <= url_ftell(bc))
  380. write_headers(nut, bc);
  381. if(key_frame && !!(nus->last_flags & FLAG_KEY))
  382. store_sp= 1;
  383. if(pkt->size + 30/*FIXME check*/ + url_ftell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
  384. store_sp= 1;
  385. //FIXME: Ensure store_sp is 1 in the first place.
  386. if(store_sp){
  387. syncpoint_t *sp, dummy= {.pos= INT64_MAX};
  388. ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
  389. for(i=0; i<s->nb_streams; i++){
  390. AVStream *st= s->streams[i];
  391. int index= av_index_search_timestamp(st, pkt->dts, AVSEEK_FLAG_BACKWARD);
  392. if(index<0) dummy.pos=0;
  393. else dummy.pos= FFMIN(dummy.pos, st->index_entries[index].pos);
  394. }
  395. sp= av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pos_cmp, NULL);
  396. nut->last_syncpoint_pos= url_ftell(bc);
  397. url_open_dyn_buf(&dyn_bc);
  398. put_t(nut, nus, &dyn_bc, pkt->dts);
  399. put_v(&dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos)>>4 : 0);
  400. put_packet(nut, bc, &dyn_bc, 1, SYNCPOINT_STARTCODE);
  401. ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0/*unused*/, pkt->dts);
  402. }
  403. assert(nus->last_pts != AV_NOPTS_VALUE);
  404. coded_pts = pkt->pts & ((1<<nus->msb_pts_shift)-1);
  405. if(ff_lsb2full(nus, coded_pts) != pkt->pts)
  406. coded_pts= pkt->pts + (1<<nus->msb_pts_shift);
  407. best_length=INT_MAX;
  408. frame_code= -1;
  409. for(i=0; i<256; i++){
  410. int length= 0;
  411. FrameCode *fc= &nut->frame_code[i];
  412. int flags= fc->flags;
  413. if(flags & FLAG_INVALID)
  414. continue;
  415. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  416. if(flags & FLAG_CODED){
  417. length++;
  418. flags = needed_flags;
  419. }
  420. if((flags & needed_flags) != needed_flags)
  421. continue;
  422. if((flags ^ needed_flags) & FLAG_KEY)
  423. continue;
  424. if(flags & FLAG_STREAM_ID)
  425. length+= get_length(pkt->stream_index);
  426. if(pkt->size % fc->size_mul != fc->size_lsb)
  427. continue;
  428. if(flags & FLAG_SIZE_MSB)
  429. length += get_length(pkt->size / fc->size_mul);
  430. if(flags & FLAG_CHECKSUM)
  431. length+=4;
  432. if(flags & FLAG_CODED_PTS)
  433. length += get_length(coded_pts);
  434. length*=4;
  435. length+= !(flags & FLAG_CODED_PTS);
  436. length+= !(flags & FLAG_CHECKSUM);
  437. if(length < best_length){
  438. best_length= length;
  439. frame_code=i;
  440. }
  441. }
  442. assert(frame_code != -1);
  443. fc= &nut->frame_code[frame_code];
  444. flags= fc->flags;
  445. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  446. init_checksum(bc, av_crc04C11DB7_update, 0);
  447. put_byte(bc, frame_code);
  448. if(flags & FLAG_CODED){
  449. put_v(bc, (flags^needed_flags) & ~(FLAG_CODED));
  450. flags = needed_flags;
  451. }
  452. if(flags & FLAG_STREAM_ID) put_v(bc, pkt->stream_index);
  453. if(flags & FLAG_CODED_PTS) put_v(bc, coded_pts);
  454. if(flags & FLAG_SIZE_MSB) put_v(bc, pkt->size / fc->size_mul);
  455. if(flags & FLAG_CHECKSUM) put_le32(bc, get_checksum(bc));
  456. else get_checksum(bc);
  457. put_buffer(bc, pkt->data, pkt->size);
  458. nus->last_flags= flags;
  459. //FIXME just store one per syncpoint
  460. if(flags & FLAG_KEY)
  461. av_add_index_entry(
  462. s->streams[pkt->stream_index],
  463. nut->last_syncpoint_pos,
  464. pkt->pts,
  465. 0,
  466. 0,
  467. AVINDEX_KEYFRAME);
  468. return 0;
  469. }
  470. static int write_trailer(AVFormatContext *s){
  471. NUTContext *nut= s->priv_data;
  472. ByteIOContext *bc= &s->pb;
  473. while(nut->header_count<3)
  474. write_headers(nut, bc);
  475. put_flush_packet(bc);
  476. return 0;
  477. }
  478. AVOutputFormat nut_muxer = {
  479. "nut",
  480. "nut format",
  481. "video/x-nut",
  482. "nut",
  483. sizeof(NUTContext),
  484. #ifdef CONFIG_LIBVORBIS
  485. CODEC_ID_VORBIS,
  486. #elif defined(CONFIG_LIBMP3LAME)
  487. CODEC_ID_MP3,
  488. #else
  489. CODEC_ID_MP2, /* AC3 needs liba52 decoder */
  490. #endif
  491. CODEC_ID_MPEG4,
  492. write_header,
  493. write_packet,
  494. write_trailer,
  495. .flags = AVFMT_GLOBALHEADER,
  496. .codec_tag= (const AVCodecTag*[]){codec_bmp_tags, codec_wav_tags, 0},
  497. };