You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

579 lines
18KB

  1. /*
  2. * AVI encoder.
  3. * Copyright (c) 2000 Fabrice Bellard.
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  18. */
  19. #include "avformat.h"
  20. #include "avi.h"
  21. #include "riff.h"
  22. /*
  23. * TODO:
  24. * - fill all fields if non streamed (nb_frames for example)
  25. */
  26. #ifdef CONFIG_AVI_MUXER
  27. typedef struct AVIIentry {
  28. unsigned int flags, pos, len;
  29. } AVIIentry;
  30. #define AVI_INDEX_CLUSTER_SIZE 16384
  31. typedef struct AVIIndex {
  32. offset_t indx_start;
  33. int entry;
  34. int ents_allocated;
  35. AVIIentry** cluster;
  36. } AVIIndex;
  37. typedef struct {
  38. offset_t riff_start, movi_list, odml_list;
  39. offset_t frames_hdr_all, frames_hdr_strm[MAX_STREAMS];
  40. int audio_strm_length[MAX_STREAMS];
  41. int riff_id;
  42. int packet_count[MAX_STREAMS];
  43. AVIIndex indexes[MAX_STREAMS];
  44. } AVIContext;
  45. static inline AVIIentry* avi_get_ientry(AVIIndex* idx, int ent_id)
  46. {
  47. int cl = ent_id / AVI_INDEX_CLUSTER_SIZE;
  48. int id = ent_id % AVI_INDEX_CLUSTER_SIZE;
  49. return &idx->cluster[cl][id];
  50. }
  51. static offset_t avi_start_new_riff(AVIContext *avi, ByteIOContext *pb,
  52. const char* riff_tag, const char* list_tag)
  53. {
  54. offset_t loff;
  55. int i;
  56. avi->riff_id++;
  57. for (i=0; i<MAX_STREAMS; i++)
  58. avi->indexes[i].entry = 0;
  59. avi->riff_start = start_tag(pb, "RIFF");
  60. put_tag(pb, riff_tag);
  61. loff = start_tag(pb, "LIST");
  62. put_tag(pb, list_tag);
  63. return loff;
  64. }
  65. static char* avi_stream2fourcc(char* tag, int index, enum CodecType type)
  66. {
  67. tag[0] = '0';
  68. tag[1] = '0' + index;
  69. if (type == CODEC_TYPE_VIDEO) {
  70. tag[2] = 'd';
  71. tag[3] = 'c';
  72. } else {
  73. tag[2] = 'w';
  74. tag[3] = 'b';
  75. }
  76. tag[4] = '\0';
  77. return tag;
  78. }
  79. static void avi_write_info_tag(ByteIOContext *pb, const char *tag, const char *str)
  80. {
  81. int len = strlen(str);
  82. if (len > 0) {
  83. len++;
  84. put_tag(pb, tag);
  85. put_le32(pb, len);
  86. put_strz(pb, str);
  87. if (len & 1)
  88. put_byte(pb, 0);
  89. }
  90. }
  91. static int avi_write_counters(AVFormatContext* s, int riff_id)
  92. {
  93. ByteIOContext *pb = &s->pb;
  94. AVIContext *avi = s->priv_data;
  95. int n, au_byterate, au_ssize, au_scale, nb_frames = 0;
  96. offset_t file_size;
  97. AVCodecContext* stream;
  98. file_size = url_ftell(pb);
  99. for(n = 0; n < s->nb_streams; n++) {
  100. assert(avi->frames_hdr_strm[n]);
  101. stream = s->streams[n]->codec;
  102. url_fseek(pb, avi->frames_hdr_strm[n], SEEK_SET);
  103. ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
  104. if(au_ssize == 0) {
  105. put_le32(pb, avi->packet_count[n]);
  106. } else {
  107. put_le32(pb, avi->audio_strm_length[n] / au_ssize);
  108. }
  109. if(stream->codec_type == CODEC_TYPE_VIDEO)
  110. nb_frames = FFMAX(nb_frames, avi->packet_count[n]);
  111. }
  112. if(riff_id == 1) {
  113. assert(avi->frames_hdr_all);
  114. url_fseek(pb, avi->frames_hdr_all, SEEK_SET);
  115. put_le32(pb, nb_frames);
  116. }
  117. url_fseek(pb, file_size, SEEK_SET);
  118. return 0;
  119. }
  120. static int avi_write_header(AVFormatContext *s)
  121. {
  122. AVIContext *avi = s->priv_data;
  123. ByteIOContext *pb = &s->pb;
  124. int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
  125. AVCodecContext *stream, *video_enc;
  126. offset_t list1, list2, strh, strf;
  127. /* header list */
  128. avi->riff_id = 0;
  129. list1 = avi_start_new_riff(avi, pb, "AVI ", "hdrl");
  130. /* avi header */
  131. put_tag(pb, "avih");
  132. put_le32(pb, 14 * 4);
  133. bitrate = 0;
  134. video_enc = NULL;
  135. for(n=0;n<s->nb_streams;n++) {
  136. stream = s->streams[n]->codec;
  137. bitrate += stream->bit_rate;
  138. if (stream->codec_type == CODEC_TYPE_VIDEO)
  139. video_enc = stream;
  140. }
  141. nb_frames = 0;
  142. if(video_enc){
  143. put_le32(pb, (uint32_t)(int64_t_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
  144. } else {
  145. put_le32(pb, 0);
  146. }
  147. put_le32(pb, bitrate / 8); /* XXX: not quite exact */
  148. put_le32(pb, 0); /* padding */
  149. if (url_is_streamed(pb))
  150. put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
  151. else
  152. put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
  153. avi->frames_hdr_all = url_ftell(pb); /* remember this offset to fill later */
  154. put_le32(pb, nb_frames); /* nb frames, filled later */
  155. put_le32(pb, 0); /* initial frame */
  156. put_le32(pb, s->nb_streams); /* nb streams */
  157. put_le32(pb, 1024 * 1024); /* suggested buffer size */
  158. if(video_enc){
  159. put_le32(pb, video_enc->width);
  160. put_le32(pb, video_enc->height);
  161. } else {
  162. put_le32(pb, 0);
  163. put_le32(pb, 0);
  164. }
  165. put_le32(pb, 0); /* reserved */
  166. put_le32(pb, 0); /* reserved */
  167. put_le32(pb, 0); /* reserved */
  168. put_le32(pb, 0); /* reserved */
  169. /* stream list */
  170. for(i=0;i<n;i++) {
  171. list2 = start_tag(pb, "LIST");
  172. put_tag(pb, "strl");
  173. stream = s->streams[i]->codec;
  174. /* FourCC should really be set by the codec itself */
  175. if (! stream->codec_tag) {
  176. stream->codec_tag = codec_get_bmp_tag(stream->codec_id);
  177. }
  178. /* stream generic header */
  179. strh = start_tag(pb, "strh");
  180. switch(stream->codec_type) {
  181. case CODEC_TYPE_VIDEO: put_tag(pb, "vids"); break;
  182. case CODEC_TYPE_AUDIO: put_tag(pb, "auds"); break;
  183. // case CODEC_TYPE_TEXT : put_tag(pb, "txts"); break;
  184. case CODEC_TYPE_DATA : put_tag(pb, "dats"); break;
  185. }
  186. if(stream->codec_type == CODEC_TYPE_VIDEO)
  187. put_le32(pb, stream->codec_tag);
  188. else
  189. put_le32(pb, 1);
  190. put_le32(pb, 0); /* flags */
  191. put_le16(pb, 0); /* priority */
  192. put_le16(pb, 0); /* language */
  193. put_le32(pb, 0); /* initial frame */
  194. ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
  195. put_le32(pb, au_scale); /* scale */
  196. put_le32(pb, au_byterate); /* rate */
  197. av_set_pts_info(s->streams[i], 64, au_scale, au_byterate);
  198. put_le32(pb, 0); /* start */
  199. avi->frames_hdr_strm[i] = url_ftell(pb); /* remember this offset to fill later */
  200. if (url_is_streamed(pb))
  201. put_le32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
  202. else
  203. put_le32(pb, 0); /* length, XXX: filled later */
  204. /* suggested buffer size */ //FIXME set at the end to largest chunk
  205. if(stream->codec_type == CODEC_TYPE_VIDEO)
  206. put_le32(pb, 1024 * 1024);
  207. else if(stream->codec_type == CODEC_TYPE_AUDIO)
  208. put_le32(pb, 12 * 1024);
  209. else
  210. put_le32(pb, 0);
  211. put_le32(pb, -1); /* quality */
  212. put_le32(pb, au_ssize); /* sample size */
  213. put_le32(pb, 0);
  214. put_le16(pb, stream->width);
  215. put_le16(pb, stream->height);
  216. end_tag(pb, strh);
  217. if(stream->codec_type != CODEC_TYPE_DATA){
  218. strf = start_tag(pb, "strf");
  219. switch(stream->codec_type) {
  220. case CODEC_TYPE_VIDEO:
  221. put_bmp_header(pb, stream, codec_bmp_tags, 0);
  222. break;
  223. case CODEC_TYPE_AUDIO:
  224. if (put_wav_header(pb, stream) < 0) {
  225. av_free(avi);
  226. return -1;
  227. }
  228. break;
  229. default:
  230. return -1;
  231. }
  232. end_tag(pb, strf);
  233. }
  234. if (!url_is_streamed(pb)) {
  235. unsigned char tag[5];
  236. int j;
  237. /* Starting to lay out AVI OpenDML master index.
  238. * We want to make it JUNK entry for now, since we'd
  239. * like to get away without making AVI an OpenDML one
  240. * for compatibility reasons.
  241. */
  242. avi->indexes[i].entry = avi->indexes[i].ents_allocated = 0;
  243. avi->indexes[i].indx_start = start_tag(pb, "JUNK");
  244. put_le16(pb, 4); /* wLongsPerEntry */
  245. put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
  246. put_byte(pb, 0); /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
  247. put_le32(pb, 0); /* nEntriesInUse (will fill out later on) */
  248. put_tag(pb, avi_stream2fourcc(&tag[0], i, stream->codec_type));
  249. /* dwChunkId */
  250. put_le64(pb, 0); /* dwReserved[3]
  251. put_le32(pb, 0); Must be 0. */
  252. for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
  253. put_le64(pb, 0);
  254. end_tag(pb, avi->indexes[i].indx_start);
  255. }
  256. end_tag(pb, list2);
  257. }
  258. if (!url_is_streamed(pb)) {
  259. /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
  260. avi->odml_list = start_tag(pb, "JUNK");
  261. put_tag(pb, "odml");
  262. put_tag(pb, "dmlh");
  263. put_le32(pb, 248);
  264. for (i = 0; i < 248; i+= 4)
  265. put_le32(pb, 0);
  266. end_tag(pb, avi->odml_list);
  267. }
  268. end_tag(pb, list1);
  269. list2 = start_tag(pb, "LIST");
  270. put_tag(pb, "INFO");
  271. avi_write_info_tag(pb, "INAM", s->title);
  272. avi_write_info_tag(pb, "IART", s->author);
  273. avi_write_info_tag(pb, "ICOP", s->copyright);
  274. avi_write_info_tag(pb, "ICMT", s->comment);
  275. avi_write_info_tag(pb, "IPRD", s->album);
  276. avi_write_info_tag(pb, "IGNR", s->genre);
  277. if (s->track) {
  278. char str_track[4];
  279. snprintf(str_track, 4, "%d", s->track);
  280. avi_write_info_tag(pb, "IPRT", str_track);
  281. }
  282. if(!(s->streams[0]->codec->flags & CODEC_FLAG_BITEXACT))
  283. avi_write_info_tag(pb, "ISFT", LIBAVFORMAT_IDENT);
  284. end_tag(pb, list2);
  285. /* some padding for easier tag editing */
  286. list2 = start_tag(pb, "JUNK");
  287. for (i = 0; i < 1016; i += 4)
  288. put_le32(pb, 0);
  289. end_tag(pb, list2);
  290. avi->movi_list = start_tag(pb, "LIST");
  291. put_tag(pb, "movi");
  292. put_flush_packet(pb);
  293. return 0;
  294. }
  295. static int avi_write_ix(AVFormatContext *s)
  296. {
  297. ByteIOContext *pb = &s->pb;
  298. AVIContext *avi = s->priv_data;
  299. char tag[5];
  300. char ix_tag[] = "ix00";
  301. int i, j;
  302. assert(!url_is_streamed(pb));
  303. if (avi->riff_id > AVI_MASTER_INDEX_SIZE)
  304. return -1;
  305. for (i=0;i<s->nb_streams;i++) {
  306. offset_t ix, pos;
  307. avi_stream2fourcc(&tag[0], i, s->streams[i]->codec->codec_type);
  308. ix_tag[3] = '0' + i;
  309. /* Writing AVI OpenDML leaf index chunk */
  310. ix = url_ftell(pb);
  311. put_tag(pb, &ix_tag[0]); /* ix?? */
  312. put_le32(pb, avi->indexes[i].entry * 8 + 24);
  313. /* chunk size */
  314. put_le16(pb, 2); /* wLongsPerEntry */
  315. put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
  316. put_byte(pb, 1); /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */
  317. put_le32(pb, avi->indexes[i].entry);
  318. /* nEntriesInUse */
  319. put_tag(pb, &tag[0]); /* dwChunkId */
  320. put_le64(pb, avi->movi_list);/* qwBaseOffset */
  321. put_le32(pb, 0); /* dwReserved_3 (must be 0) */
  322. for (j=0; j<avi->indexes[i].entry; j++) {
  323. AVIIentry* ie = avi_get_ientry(&avi->indexes[i], j);
  324. put_le32(pb, ie->pos + 8);
  325. put_le32(pb, ((uint32_t)ie->len & ~0x80000000) |
  326. (ie->flags & 0x10 ? 0 : 0x80000000));
  327. }
  328. put_flush_packet(pb);
  329. pos = url_ftell(pb);
  330. /* Updating one entry in the AVI OpenDML master index */
  331. url_fseek(pb, avi->indexes[i].indx_start - 8, SEEK_SET);
  332. put_tag(pb, "indx"); /* enabling this entry */
  333. url_fskip(pb, 8);
  334. put_le32(pb, avi->riff_id); /* nEntriesInUse */
  335. url_fskip(pb, 16*avi->riff_id);
  336. put_le64(pb, ix); /* qwOffset */
  337. put_le32(pb, pos - ix); /* dwSize */
  338. put_le32(pb, avi->indexes[i].entry); /* dwDuration */
  339. url_fseek(pb, pos, SEEK_SET);
  340. }
  341. return 0;
  342. }
  343. static int avi_write_idx1(AVFormatContext *s)
  344. {
  345. ByteIOContext *pb = &s->pb;
  346. AVIContext *avi = s->priv_data;
  347. offset_t idx_chunk;
  348. int i;
  349. char tag[5];
  350. if (!url_is_streamed(pb)) {
  351. AVIIentry* ie = 0, *tie;
  352. int entry[MAX_STREAMS];
  353. int empty, stream_id = -1;
  354. idx_chunk = start_tag(pb, "idx1");
  355. memset(&entry[0], 0, sizeof(entry));
  356. do {
  357. empty = 1;
  358. for (i=0; i<s->nb_streams; i++) {
  359. if (avi->indexes[i].entry <= entry[i])
  360. continue;
  361. tie = avi_get_ientry(&avi->indexes[i], entry[i]);
  362. if (empty || tie->pos < ie->pos) {
  363. ie = tie;
  364. stream_id = i;
  365. }
  366. empty = 0;
  367. }
  368. if (!empty) {
  369. avi_stream2fourcc(&tag[0], stream_id,
  370. s->streams[stream_id]->codec->codec_type);
  371. put_tag(pb, &tag[0]);
  372. put_le32(pb, ie->flags);
  373. put_le32(pb, ie->pos);
  374. put_le32(pb, ie->len);
  375. entry[stream_id]++;
  376. }
  377. } while (!empty);
  378. end_tag(pb, idx_chunk);
  379. avi_write_counters(s, avi->riff_id);
  380. }
  381. return 0;
  382. }
  383. static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
  384. {
  385. AVIContext *avi = s->priv_data;
  386. ByteIOContext *pb = &s->pb;
  387. unsigned char tag[5];
  388. unsigned int flags=0;
  389. const int stream_index= pkt->stream_index;
  390. AVCodecContext *enc= s->streams[stream_index]->codec;
  391. int size= pkt->size;
  392. // av_log(s, AV_LOG_DEBUG, "%lld %d %d\n", pkt->dts, avi->packet_count[stream_index], stream_index);
  393. while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avi->packet_count[stream_index]){
  394. AVPacket empty_packet;
  395. av_init_packet(&empty_packet);
  396. empty_packet.size= 0;
  397. empty_packet.data= NULL;
  398. empty_packet.stream_index= stream_index;
  399. avi_write_packet(s, &empty_packet);
  400. // av_log(s, AV_LOG_DEBUG, "dup %lld %d\n", pkt->dts, avi->packet_count[stream_index]);
  401. }
  402. avi->packet_count[stream_index]++;
  403. // Make sure to put an OpenDML chunk when the file size exceeds the limits
  404. if (!url_is_streamed(pb) &&
  405. (url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {
  406. avi_write_ix(s);
  407. end_tag(pb, avi->movi_list);
  408. if (avi->riff_id == 1)
  409. avi_write_idx1(s);
  410. end_tag(pb, avi->riff_start);
  411. avi->movi_list = avi_start_new_riff(avi, pb, "AVIX", "movi");
  412. }
  413. avi_stream2fourcc(&tag[0], stream_index, enc->codec_type);
  414. if(pkt->flags&PKT_FLAG_KEY)
  415. flags = 0x10;
  416. if (enc->codec_type == CODEC_TYPE_AUDIO) {
  417. avi->audio_strm_length[stream_index] += size;
  418. }
  419. if (!url_is_streamed(&s->pb)) {
  420. AVIIndex* idx = &avi->indexes[stream_index];
  421. int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
  422. int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
  423. if (idx->ents_allocated <= idx->entry) {
  424. idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*));
  425. if (!idx->cluster)
  426. return -1;
  427. idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
  428. if (!idx->cluster[cl])
  429. return -1;
  430. idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
  431. }
  432. idx->cluster[cl][id].flags = flags;
  433. idx->cluster[cl][id].pos = url_ftell(pb) - avi->movi_list;
  434. idx->cluster[cl][id].len = size;
  435. idx->entry++;
  436. }
  437. put_buffer(pb, tag, 4);
  438. put_le32(pb, size);
  439. put_buffer(pb, pkt->data, size);
  440. if (size & 1)
  441. put_byte(pb, 0);
  442. put_flush_packet(pb);
  443. return 0;
  444. }
  445. static int avi_write_trailer(AVFormatContext *s)
  446. {
  447. AVIContext *avi = s->priv_data;
  448. ByteIOContext *pb = &s->pb;
  449. int res = 0;
  450. int i, j, n, nb_frames;
  451. offset_t file_size;
  452. if (!url_is_streamed(pb))
  453. {
  454. if (avi->riff_id == 1) {
  455. end_tag(pb, avi->movi_list);
  456. res = avi_write_idx1(s);
  457. end_tag(pb, avi->riff_start);
  458. } else {
  459. avi_write_ix(s);
  460. end_tag(pb, avi->movi_list);
  461. end_tag(pb, avi->riff_start);
  462. file_size = url_ftell(pb);
  463. url_fseek(pb, avi->odml_list - 8, SEEK_SET);
  464. put_tag(pb, "LIST"); /* Making this AVI OpenDML one */
  465. url_fskip(pb, 16);
  466. for (n=nb_frames=0;n<s->nb_streams;n++) {
  467. AVCodecContext *stream = s->streams[n]->codec;
  468. if (stream->codec_type == CODEC_TYPE_VIDEO) {
  469. if (nb_frames < avi->packet_count[n])
  470. nb_frames = avi->packet_count[n];
  471. } else {
  472. if (stream->codec_id == CODEC_ID_MP2 || stream->codec_id == CODEC_ID_MP3) {
  473. nb_frames += avi->packet_count[n];
  474. }
  475. }
  476. }
  477. put_le32(pb, nb_frames);
  478. url_fseek(pb, file_size, SEEK_SET);
  479. avi_write_counters(s, avi->riff_id);
  480. }
  481. }
  482. put_flush_packet(pb);
  483. for (i=0; i<MAX_STREAMS; i++) {
  484. for (j=0; j<avi->indexes[i].ents_allocated/AVI_INDEX_CLUSTER_SIZE; j++)
  485. av_free(avi->indexes[i].cluster[j]);
  486. av_free(avi->indexes[i].cluster);
  487. avi->indexes[i].cluster = NULL;
  488. avi->indexes[i].ents_allocated = avi->indexes[i].entry = 0;
  489. }
  490. return res;
  491. }
  492. AVOutputFormat avi_muxer = {
  493. "avi",
  494. "avi format",
  495. "video/x-msvideo",
  496. "avi",
  497. sizeof(AVIContext),
  498. CODEC_ID_MP2,
  499. CODEC_ID_MPEG4,
  500. avi_write_header,
  501. avi_write_packet,
  502. avi_write_trailer,
  503. };
  504. #endif //CONFIG_AVI_MUXER