You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

611 lines
18KB

  1. /*
  2. * nut muxer
  3. * Copyright (c) 2004-2007 Michael Niedermayer
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "nut.h"
  22. #include "tree.h"
  23. static void build_frame_code(AVFormatContext *s){
  24. NUTContext *nut = s->priv_data;
  25. int key_frame, index, pred, stream_id;
  26. int start=1;
  27. int end= 254;
  28. int keyframe_0_esc= s->nb_streams > 2;
  29. int pred_table[10];
  30. FrameCode *ft;
  31. ft= &nut->frame_code[start];
  32. ft->flags= FLAG_CODED;
  33. ft->size_mul=1;
  34. ft->pts_delta=1;
  35. start++;
  36. if(keyframe_0_esc){
  37. /* keyframe = 0 escape */
  38. FrameCode *ft= &nut->frame_code[start];
  39. ft->flags= FLAG_STREAM_ID | FLAG_SIZE_MSB | FLAG_CODED_PTS;
  40. ft->size_mul=1;
  41. start++;
  42. }
  43. for(stream_id= 0; stream_id<s->nb_streams; stream_id++){
  44. int start2= start + (end-start)*stream_id / s->nb_streams;
  45. int end2 = start + (end-start)*(stream_id+1) / s->nb_streams;
  46. AVCodecContext *codec = s->streams[stream_id]->codec;
  47. int is_audio= codec->codec_type == CODEC_TYPE_AUDIO;
  48. int intra_only= /*codec->intra_only || */is_audio;
  49. int pred_count;
  50. for(key_frame=0; key_frame<2; key_frame++){
  51. if(intra_only && keyframe_0_esc && key_frame==0)
  52. continue;
  53. {
  54. FrameCode *ft= &nut->frame_code[start2];
  55. ft->flags= FLAG_KEY*key_frame;
  56. ft->flags|= FLAG_SIZE_MSB | FLAG_CODED_PTS;
  57. ft->stream_id= stream_id;
  58. ft->size_mul=1;
  59. start2++;
  60. }
  61. }
  62. key_frame= intra_only;
  63. #if 1
  64. if(is_audio){
  65. int frame_bytes= codec->frame_size*(int64_t)codec->bit_rate / (8*codec->sample_rate);
  66. int pts;
  67. for(pts=0; pts<2; pts++){
  68. for(pred=0; pred<2; pred++){
  69. FrameCode *ft= &nut->frame_code[start2];
  70. ft->flags= FLAG_KEY*key_frame;
  71. ft->stream_id= stream_id;
  72. ft->size_mul=frame_bytes + 2;
  73. ft->size_lsb=frame_bytes + pred;
  74. ft->pts_delta=pts;
  75. start2++;
  76. }
  77. }
  78. }else{
  79. FrameCode *ft= &nut->frame_code[start2];
  80. ft->flags= FLAG_KEY | FLAG_SIZE_MSB;
  81. ft->stream_id= stream_id;
  82. ft->size_mul=1;
  83. ft->pts_delta=1;
  84. start2++;
  85. }
  86. #endif
  87. if(codec->has_b_frames){
  88. pred_count=5;
  89. pred_table[0]=-2;
  90. pred_table[1]=-1;
  91. pred_table[2]=1;
  92. pred_table[3]=3;
  93. pred_table[4]=4;
  94. }else if(codec->codec_id == CODEC_ID_VORBIS){
  95. pred_count=3;
  96. pred_table[0]=2;
  97. pred_table[1]=9;
  98. pred_table[2]=16;
  99. }else{
  100. pred_count=1;
  101. pred_table[0]=1;
  102. }
  103. for(pred=0; pred<pred_count; pred++){
  104. int start3= start2 + (end2-start2)*pred / pred_count;
  105. int end3 = start2 + (end2-start2)*(pred+1) / pred_count;
  106. for(index=start3; index<end3; index++){
  107. FrameCode *ft= &nut->frame_code[index];
  108. ft->flags= FLAG_KEY*key_frame;
  109. ft->flags|= FLAG_SIZE_MSB;
  110. ft->stream_id= stream_id;
  111. //FIXME use single byte size and pred from last
  112. ft->size_mul= end3-start3;
  113. ft->size_lsb= index - start3;
  114. ft->pts_delta= pred_table[pred];
  115. }
  116. }
  117. }
  118. memmove(&nut->frame_code['N'+1], &nut->frame_code['N'], sizeof(FrameCode)*(255-'N'));
  119. nut->frame_code[ 0].flags=
  120. nut->frame_code[255].flags=
  121. nut->frame_code['N'].flags= FLAG_INVALID;
  122. }
  123. /**
  124. * Gets the length in bytes which is needed to store val as v.
  125. */
  126. static int get_length(uint64_t val){
  127. int i=1;
  128. while(val>>=7)
  129. i++;
  130. return i;
  131. }
  132. static void put_v(ByteIOContext *bc, uint64_t val){
  133. int i= get_length(val);
  134. while(--i>0)
  135. put_byte(bc, 128 | (val>>(7*i)));
  136. put_byte(bc, val&127);
  137. }
  138. static void put_t(NUTContext *nut, StreamContext *nus, ByteIOContext *bc, uint64_t val){
  139. val *= nut->time_base_count;
  140. val += nus->time_base - nut->time_base;
  141. put_v(bc, val);
  142. }
  143. /**
  144. * Stores a string as vb.
  145. */
  146. static void put_str(ByteIOContext *bc, const char *string){
  147. int len= strlen(string);
  148. put_v(bc, len);
  149. put_buffer(bc, string, len);
  150. }
  151. static void put_s(ByteIOContext *bc, int64_t val){
  152. put_v(bc, 2*FFABS(val) - (val>0));
  153. }
  154. #ifdef TRACE
  155. static inline void put_v_trace(ByteIOContext *bc, uint64_t v, char *file, char *func, int line){
  156. av_log(NULL, AV_LOG_DEBUG, "put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  157. put_v(bc, v);
  158. }
  159. static inline void put_s_trace(ByteIOContext *bc, int64_t v, char *file, char *func, int line){
  160. av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
  161. put_s(bc, v);
  162. }
  163. #define put_v(bc, v) put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  164. #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
  165. #endif
  166. //FIXME remove calculate_checksum
  167. static void put_packet(NUTContext *nut, ByteIOContext *bc, ByteIOContext *dyn_bc, int calculate_checksum, uint64_t startcode){
  168. uint8_t *dyn_buf=NULL;
  169. int dyn_size= url_close_dyn_buf(dyn_bc, &dyn_buf);
  170. int forw_ptr= dyn_size + 4*calculate_checksum;
  171. if(forw_ptr > 4096)
  172. init_checksum(bc, ff_crc04C11DB7_update, 0);
  173. put_be64(bc, startcode);
  174. put_v(bc, forw_ptr);
  175. if(forw_ptr > 4096)
  176. put_le32(bc, get_checksum(bc));
  177. if(calculate_checksum)
  178. init_checksum(bc, ff_crc04C11DB7_update, 0);
  179. put_buffer(bc, dyn_buf, dyn_size);
  180. if(calculate_checksum)
  181. put_le32(bc, get_checksum(bc));
  182. av_free(dyn_buf);
  183. }
  184. static void write_mainheader(NUTContext *nut, ByteIOContext *bc){
  185. int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields;
  186. put_v(bc, 3); /* version */
  187. put_v(bc, nut->avf->nb_streams);
  188. put_v(bc, MAX_DISTANCE);
  189. put_v(bc, nut->time_base_count);
  190. for(i=0; i<nut->time_base_count; i++){
  191. put_v(bc, nut->time_base[i].num);
  192. put_v(bc, nut->time_base[i].den);
  193. }
  194. tmp_pts=0;
  195. tmp_mul=1;
  196. tmp_stream=0;
  197. for(i=0; i<256;){
  198. tmp_fields=0;
  199. tmp_size=0;
  200. // tmp_res=0;
  201. if(tmp_pts != nut->frame_code[i].pts_delta) tmp_fields=1;
  202. if(tmp_mul != nut->frame_code[i].size_mul ) tmp_fields=2;
  203. if(tmp_stream != nut->frame_code[i].stream_id) tmp_fields=3;
  204. if(tmp_size != nut->frame_code[i].size_lsb ) tmp_fields=4;
  205. // if(tmp_res != nut->frame_code[i].res ) tmp_fields=5;
  206. tmp_pts = nut->frame_code[i].pts_delta;
  207. tmp_flags = nut->frame_code[i].flags;
  208. tmp_stream= nut->frame_code[i].stream_id;
  209. tmp_mul = nut->frame_code[i].size_mul;
  210. tmp_size = nut->frame_code[i].size_lsb;
  211. // tmp_res = nut->frame_code[i].res;
  212. for(j=0; i<256; j++,i++){
  213. if(i == 'N'){
  214. j--;
  215. continue;
  216. }
  217. if(nut->frame_code[i].pts_delta != tmp_pts ) break;
  218. if(nut->frame_code[i].flags != tmp_flags ) break;
  219. if(nut->frame_code[i].stream_id != tmp_stream) break;
  220. if(nut->frame_code[i].size_mul != tmp_mul ) break;
  221. if(nut->frame_code[i].size_lsb != tmp_size+j) break;
  222. // if(nut->frame_code[i].res != tmp_res ) break;
  223. }
  224. if(j != tmp_mul - tmp_size) tmp_fields=6;
  225. put_v(bc, tmp_flags);
  226. put_v(bc, tmp_fields);
  227. if(tmp_fields>0) put_s(bc, tmp_pts);
  228. if(tmp_fields>1) put_v(bc, tmp_mul);
  229. if(tmp_fields>2) put_v(bc, tmp_stream);
  230. if(tmp_fields>3) put_v(bc, tmp_size);
  231. if(tmp_fields>4) put_v(bc, 0 /*tmp_res*/);
  232. if(tmp_fields>5) put_v(bc, j);
  233. }
  234. }
  235. static int write_streamheader(NUTContext *nut, ByteIOContext *bc, AVCodecContext *codec, int i){
  236. put_v(bc, i);
  237. switch(codec->codec_type){
  238. case CODEC_TYPE_VIDEO: put_v(bc, 0); break;
  239. case CODEC_TYPE_AUDIO: put_v(bc, 1); break;
  240. // case CODEC_TYPE_TEXT : put_v(bc, 2); break;
  241. default : put_v(bc, 3); break;
  242. }
  243. put_v(bc, 4);
  244. if (codec->codec_tag){
  245. put_le32(bc, codec->codec_tag);
  246. }else
  247. return -1;
  248. put_v(bc, nut->stream[i].time_base - nut->time_base);
  249. put_v(bc, nut->stream[i].msb_pts_shift);
  250. put_v(bc, nut->stream[i].max_pts_distance);
  251. put_v(bc, codec->has_b_frames);
  252. put_byte(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
  253. put_v(bc, codec->extradata_size);
  254. put_buffer(bc, codec->extradata, codec->extradata_size);
  255. switch(codec->codec_type){
  256. case CODEC_TYPE_AUDIO:
  257. put_v(bc, codec->sample_rate);
  258. put_v(bc, 1);
  259. put_v(bc, codec->channels);
  260. break;
  261. case CODEC_TYPE_VIDEO:
  262. put_v(bc, codec->width);
  263. put_v(bc, codec->height);
  264. if(codec->sample_aspect_ratio.num<=0 || codec->sample_aspect_ratio.den<=0){
  265. put_v(bc, 0);
  266. put_v(bc, 0);
  267. }else{
  268. put_v(bc, codec->sample_aspect_ratio.num);
  269. put_v(bc, codec->sample_aspect_ratio.den);
  270. }
  271. put_v(bc, 0); /* csp type -- unknown */
  272. break;
  273. default:
  274. break;
  275. }
  276. return 0;
  277. }
  278. static int add_info(ByteIOContext *bc, char *type, char *value){
  279. put_str(bc, type);
  280. put_s(bc, -1);
  281. put_str(bc, value);
  282. return 1;
  283. }
  284. static int write_globalinfo(NUTContext *nut, ByteIOContext *bc){
  285. AVFormatContext *s= nut->avf;
  286. ByteIOContext *dyn_bc;
  287. uint8_t *dyn_buf=NULL;
  288. int count=0, dyn_size;
  289. int ret = url_open_dyn_buf(&dyn_bc);
  290. if(ret < 0)
  291. return ret;
  292. if(s->title [0]) count+= add_info(dyn_bc, "Title" , s->title);
  293. if(s->author [0]) count+= add_info(dyn_bc, "Author" , s->author);
  294. if(s->copyright[0]) count+= add_info(dyn_bc, "Copyright", s->copyright);
  295. if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
  296. count+= add_info(dyn_bc, "Encoder" , LIBAVFORMAT_IDENT);
  297. put_v(bc, 0); //stream_if_plus1
  298. put_v(bc, 0); //chapter_id
  299. put_v(bc, 0); //timestamp_start
  300. put_v(bc, 0); //length
  301. put_v(bc, count);
  302. dyn_size= url_close_dyn_buf(dyn_bc, &dyn_buf);
  303. put_buffer(bc, dyn_buf, dyn_size);
  304. av_free(dyn_buf);
  305. return 0;
  306. }
  307. static int write_headers(NUTContext *nut, ByteIOContext *bc){
  308. ByteIOContext *dyn_bc;
  309. int i, ret;
  310. ret = url_open_dyn_buf(&dyn_bc);
  311. if(ret < 0)
  312. return ret;
  313. write_mainheader(nut, dyn_bc);
  314. put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
  315. for (i=0; i < nut->avf->nb_streams; i++){
  316. AVCodecContext *codec = nut->avf->streams[i]->codec;
  317. ret = url_open_dyn_buf(&dyn_bc);
  318. if(ret < 0)
  319. return ret;
  320. write_streamheader(nut, dyn_bc, codec, i);
  321. put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
  322. }
  323. ret = url_open_dyn_buf(&dyn_bc);
  324. if(ret < 0)
  325. return ret;
  326. write_globalinfo(nut, dyn_bc);
  327. put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
  328. nut->last_syncpoint_pos= INT_MIN;
  329. nut->header_count++;
  330. return 0;
  331. }
  332. static int write_header(AVFormatContext *s){
  333. NUTContext *nut = s->priv_data;
  334. ByteIOContext *bc = s->pb;
  335. int i, j;
  336. nut->avf= s;
  337. nut->stream = av_mallocz(sizeof(StreamContext)*s->nb_streams);
  338. nut->time_base= av_mallocz(sizeof(AVRational )*s->nb_streams);
  339. for(i=0; i<s->nb_streams; i++){
  340. AVStream *st= s->streams[i];
  341. int ssize;
  342. AVRational time_base;
  343. ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
  344. av_set_pts_info(st, 64, time_base.num, time_base.den);
  345. for(j=0; j<nut->time_base_count; j++){
  346. if(!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))){
  347. break;
  348. }
  349. }
  350. nut->time_base[j]= time_base;
  351. nut->stream[i].time_base= &nut->time_base[j];
  352. if(j==nut->time_base_count)
  353. nut->time_base_count++;
  354. if(av_q2d(time_base) >= 0.001)
  355. nut->stream[i].msb_pts_shift = 7;
  356. else
  357. nut->stream[i].msb_pts_shift = 14;
  358. nut->stream[i].max_pts_distance= FFMAX(1/av_q2d(time_base), 1);
  359. }
  360. build_frame_code(s);
  361. assert(nut->frame_code['N'].flags == FLAG_INVALID);
  362. put_buffer(bc, ID_STRING, strlen(ID_STRING));
  363. put_byte(bc, 0);
  364. write_headers(nut, bc);
  365. put_flush_packet(bc);
  366. //FIXME index
  367. return 0;
  368. }
  369. static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt){
  370. int flags= 0;
  371. if(pkt->flags & PKT_FLAG_KEY ) flags |= FLAG_KEY;
  372. if(pkt->stream_index != fc->stream_id ) flags |= FLAG_STREAM_ID;
  373. if(pkt->size / fc->size_mul ) flags |= FLAG_SIZE_MSB;
  374. if(pkt->pts - nus->last_pts != fc->pts_delta) flags |= FLAG_CODED_PTS;
  375. if(pkt->size > 2*nut->max_distance ) flags |= FLAG_CHECKSUM;
  376. if(FFABS(pkt->pts - nus->last_pts)
  377. > nus->max_pts_distance) flags |= FLAG_CHECKSUM;
  378. return flags | (fc->flags & FLAG_CODED);
  379. }
  380. static int write_packet(AVFormatContext *s, AVPacket *pkt){
  381. NUTContext *nut = s->priv_data;
  382. StreamContext *nus= &nut->stream[pkt->stream_index];
  383. ByteIOContext *bc = s->pb, *dyn_bc;
  384. FrameCode *fc;
  385. int64_t coded_pts;
  386. int best_length, frame_code, flags, needed_flags, i;
  387. int key_frame = !!(pkt->flags & PKT_FLAG_KEY);
  388. int store_sp=0;
  389. int ret;
  390. if(1LL<<(20+3*nut->header_count) <= url_ftell(bc))
  391. write_headers(nut, bc);
  392. if(key_frame && !!(nus->last_flags & FLAG_KEY))
  393. store_sp= 1;
  394. if(pkt->size + 30/*FIXME check*/ + url_ftell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
  395. store_sp= 1;
  396. //FIXME: Ensure store_sp is 1 in the first place.
  397. if(store_sp){
  398. syncpoint_t *sp, dummy= {.pos= INT64_MAX};
  399. ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
  400. for(i=0; i<s->nb_streams; i++){
  401. AVStream *st= s->streams[i];
  402. int index= av_index_search_timestamp(st, pkt->dts, AVSEEK_FLAG_BACKWARD);
  403. if(index>=0) dummy.pos= FFMIN(dummy.pos, st->index_entries[index].pos);
  404. }
  405. if(dummy.pos == INT64_MAX)
  406. dummy.pos= 0;
  407. sp= av_tree_find(nut->syncpoints, &dummy, ff_nut_sp_pos_cmp, NULL);
  408. nut->last_syncpoint_pos= url_ftell(bc);
  409. ret = url_open_dyn_buf(&dyn_bc);
  410. if(ret < 0)
  411. return ret;
  412. put_t(nut, nus, dyn_bc, pkt->dts);
  413. put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos)>>4 : 0);
  414. put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
  415. ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0/*unused*/, pkt->dts);
  416. }
  417. assert(nus->last_pts != AV_NOPTS_VALUE);
  418. coded_pts = pkt->pts & ((1<<nus->msb_pts_shift)-1);
  419. if(ff_lsb2full(nus, coded_pts) != pkt->pts)
  420. coded_pts= pkt->pts + (1<<nus->msb_pts_shift);
  421. best_length=INT_MAX;
  422. frame_code= -1;
  423. for(i=0; i<256; i++){
  424. int length= 0;
  425. FrameCode *fc= &nut->frame_code[i];
  426. int flags= fc->flags;
  427. if(flags & FLAG_INVALID)
  428. continue;
  429. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  430. if(flags & FLAG_CODED){
  431. length++;
  432. flags = needed_flags;
  433. }
  434. if((flags & needed_flags) != needed_flags)
  435. continue;
  436. if((flags ^ needed_flags) & FLAG_KEY)
  437. continue;
  438. if(flags & FLAG_STREAM_ID)
  439. length+= get_length(pkt->stream_index);
  440. if(pkt->size % fc->size_mul != fc->size_lsb)
  441. continue;
  442. if(flags & FLAG_SIZE_MSB)
  443. length += get_length(pkt->size / fc->size_mul);
  444. if(flags & FLAG_CHECKSUM)
  445. length+=4;
  446. if(flags & FLAG_CODED_PTS)
  447. length += get_length(coded_pts);
  448. length*=4;
  449. length+= !(flags & FLAG_CODED_PTS);
  450. length+= !(flags & FLAG_CHECKSUM);
  451. if(length < best_length){
  452. best_length= length;
  453. frame_code=i;
  454. }
  455. }
  456. assert(frame_code != -1);
  457. fc= &nut->frame_code[frame_code];
  458. flags= fc->flags;
  459. needed_flags= get_needed_flags(nut, nus, fc, pkt);
  460. init_checksum(bc, ff_crc04C11DB7_update, 0);
  461. put_byte(bc, frame_code);
  462. if(flags & FLAG_CODED){
  463. put_v(bc, (flags^needed_flags) & ~(FLAG_CODED));
  464. flags = needed_flags;
  465. }
  466. if(flags & FLAG_STREAM_ID) put_v(bc, pkt->stream_index);
  467. if(flags & FLAG_CODED_PTS) put_v(bc, coded_pts);
  468. if(flags & FLAG_SIZE_MSB) put_v(bc, pkt->size / fc->size_mul);
  469. if(flags & FLAG_CHECKSUM) put_le32(bc, get_checksum(bc));
  470. else get_checksum(bc);
  471. put_buffer(bc, pkt->data, pkt->size);
  472. nus->last_flags= flags;
  473. //FIXME just store one per syncpoint
  474. if(flags & FLAG_KEY)
  475. av_add_index_entry(
  476. s->streams[pkt->stream_index],
  477. nut->last_syncpoint_pos,
  478. pkt->pts,
  479. 0,
  480. 0,
  481. AVINDEX_KEYFRAME);
  482. return 0;
  483. }
  484. static int write_trailer(AVFormatContext *s){
  485. NUTContext *nut= s->priv_data;
  486. ByteIOContext *bc= s->pb;
  487. while(nut->header_count<3)
  488. write_headers(nut, bc);
  489. put_flush_packet(bc);
  490. return 0;
  491. }
  492. AVOutputFormat nut_muxer = {
  493. "nut",
  494. "nut format",
  495. "video/x-nut",
  496. "nut",
  497. sizeof(NUTContext),
  498. #ifdef CONFIG_LIBVORBIS
  499. CODEC_ID_VORBIS,
  500. #elif defined(CONFIG_LIBMP3LAME)
  501. CODEC_ID_MP3,
  502. #else
  503. CODEC_ID_MP2, /* AC3 needs liba52 decoder */
  504. #endif
  505. CODEC_ID_MPEG4,
  506. write_header,
  507. write_packet,
  508. write_trailer,
  509. .flags = AVFMT_GLOBALHEADER,
  510. .codec_tag= (const AVCodecTag*[]){codec_bmp_tags, codec_wav_tags, 0},
  511. };