You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

649 lines
21KB

  1. /*
  2. * AVI muxer
  3. * Copyright (c) 2000 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avformat.h"
  22. #include "avi.h"
  23. #include "riff.h"
  24. #include "libavutil/intreadwrite.h"
  25. /*
  26. * TODO:
  27. * - fill all fields if non streamed (nb_frames for example)
  28. */
  29. typedef struct AVIIentry {
  30. unsigned int flags, pos, len;
  31. } AVIIentry;
  32. #define AVI_INDEX_CLUSTER_SIZE 16384
  33. typedef struct AVIIndex {
  34. int64_t indx_start;
  35. int entry;
  36. int ents_allocated;
  37. AVIIentry** cluster;
  38. } AVIIndex;
  39. typedef struct {
  40. int64_t riff_start, movi_list, odml_list;
  41. int64_t frames_hdr_all;
  42. int riff_id;
  43. } AVIContext;
  44. typedef struct {
  45. int64_t frames_hdr_strm;
  46. int audio_strm_length;
  47. int packet_count;
  48. int entry;
  49. AVIIndex indexes;
  50. } AVIStream ;
  51. static inline AVIIentry* avi_get_ientry(AVIIndex* idx, int ent_id)
  52. {
  53. int cl = ent_id / AVI_INDEX_CLUSTER_SIZE;
  54. int id = ent_id % AVI_INDEX_CLUSTER_SIZE;
  55. return &idx->cluster[cl][id];
  56. }
  57. static int64_t avi_start_new_riff(AVFormatContext *s, ByteIOContext *pb,
  58. const char* riff_tag, const char* list_tag)
  59. {
  60. AVIContext *avi= s->priv_data;
  61. int64_t loff;
  62. int i;
  63. avi->riff_id++;
  64. for (i=0; i<s->nb_streams; i++){
  65. AVIStream *avist= s->streams[i]->priv_data;
  66. avist->indexes.entry = 0;
  67. }
  68. avi->riff_start = ff_start_tag(pb, "RIFF");
  69. put_tag(pb, riff_tag);
  70. loff = ff_start_tag(pb, "LIST");
  71. put_tag(pb, list_tag);
  72. return loff;
  73. }
  74. static char* avi_stream2fourcc(char* tag, int index, enum CodecType type)
  75. {
  76. tag[0] = '0';
  77. tag[1] = '0' + index;
  78. if (type == CODEC_TYPE_VIDEO) {
  79. tag[2] = 'd';
  80. tag[3] = 'c';
  81. } else if (type == CODEC_TYPE_SUBTITLE) {
  82. // note: this is not an official code
  83. tag[2] = 's';
  84. tag[3] = 'b';
  85. } else {
  86. tag[2] = 'w';
  87. tag[3] = 'b';
  88. }
  89. tag[4] = '\0';
  90. return tag;
  91. }
  92. static void avi_write_info_tag(ByteIOContext *pb, const char *tag, const char *str)
  93. {
  94. int len = strlen(str);
  95. if (len > 0) {
  96. len++;
  97. put_tag(pb, tag);
  98. put_le32(pb, len);
  99. put_strz(pb, str);
  100. if (len & 1)
  101. put_byte(pb, 0);
  102. }
  103. }
  104. static int avi_write_counters(AVFormatContext* s, int riff_id)
  105. {
  106. ByteIOContext *pb = s->pb;
  107. AVIContext *avi = s->priv_data;
  108. int n, au_byterate, au_ssize, au_scale, nb_frames = 0;
  109. int64_t file_size;
  110. AVCodecContext* stream;
  111. file_size = url_ftell(pb);
  112. for(n = 0; n < s->nb_streams; n++) {
  113. AVIStream *avist= s->streams[n]->priv_data;
  114. assert(avist->frames_hdr_strm);
  115. stream = s->streams[n]->codec;
  116. url_fseek(pb, avist->frames_hdr_strm, SEEK_SET);
  117. ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
  118. if(au_ssize == 0) {
  119. put_le32(pb, avist->packet_count);
  120. } else {
  121. put_le32(pb, avist->audio_strm_length / au_ssize);
  122. }
  123. if(stream->codec_type == CODEC_TYPE_VIDEO)
  124. nb_frames = FFMAX(nb_frames, avist->packet_count);
  125. }
  126. if(riff_id == 1) {
  127. assert(avi->frames_hdr_all);
  128. url_fseek(pb, avi->frames_hdr_all, SEEK_SET);
  129. put_le32(pb, nb_frames);
  130. }
  131. url_fseek(pb, file_size, SEEK_SET);
  132. return 0;
  133. }
  134. static int avi_write_header(AVFormatContext *s)
  135. {
  136. AVIContext *avi = s->priv_data;
  137. ByteIOContext *pb = s->pb;
  138. int bitrate, n, i, nb_frames, au_byterate, au_ssize, au_scale;
  139. AVCodecContext *stream, *video_enc;
  140. int64_t list1, list2, strh, strf;
  141. AVMetadataTag *t = NULL;
  142. for(n=0;n<s->nb_streams;n++) {
  143. s->streams[n]->priv_data= av_mallocz(sizeof(AVIStream));
  144. if(!s->streams[n]->priv_data)
  145. return AVERROR(ENOMEM);
  146. }
  147. /* header list */
  148. avi->riff_id = 0;
  149. list1 = avi_start_new_riff(s, pb, "AVI ", "hdrl");
  150. /* avi header */
  151. put_tag(pb, "avih");
  152. put_le32(pb, 14 * 4);
  153. bitrate = 0;
  154. video_enc = NULL;
  155. for(n=0;n<s->nb_streams;n++) {
  156. stream = s->streams[n]->codec;
  157. bitrate += stream->bit_rate;
  158. if (stream->codec_type == CODEC_TYPE_VIDEO)
  159. video_enc = stream;
  160. }
  161. nb_frames = 0;
  162. if(video_enc){
  163. put_le32(pb, (uint32_t)(INT64_C(1000000) * video_enc->time_base.num / video_enc->time_base.den));
  164. } else {
  165. put_le32(pb, 0);
  166. }
  167. put_le32(pb, bitrate / 8); /* XXX: not quite exact */
  168. put_le32(pb, 0); /* padding */
  169. if (url_is_streamed(pb))
  170. put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_ISINTERLEAVED); /* flags */
  171. else
  172. put_le32(pb, AVIF_TRUSTCKTYPE | AVIF_HASINDEX | AVIF_ISINTERLEAVED); /* flags */
  173. avi->frames_hdr_all = url_ftell(pb); /* remember this offset to fill later */
  174. put_le32(pb, nb_frames); /* nb frames, filled later */
  175. put_le32(pb, 0); /* initial frame */
  176. put_le32(pb, s->nb_streams); /* nb streams */
  177. put_le32(pb, 1024 * 1024); /* suggested buffer size */
  178. if(video_enc){
  179. put_le32(pb, video_enc->width);
  180. put_le32(pb, video_enc->height);
  181. } else {
  182. put_le32(pb, 0);
  183. put_le32(pb, 0);
  184. }
  185. put_le32(pb, 0); /* reserved */
  186. put_le32(pb, 0); /* reserved */
  187. put_le32(pb, 0); /* reserved */
  188. put_le32(pb, 0); /* reserved */
  189. /* stream list */
  190. for(i=0;i<n;i++) {
  191. AVIStream *avist= s->streams[i]->priv_data;
  192. list2 = ff_start_tag(pb, "LIST");
  193. put_tag(pb, "strl");
  194. stream = s->streams[i]->codec;
  195. /* stream generic header */
  196. strh = ff_start_tag(pb, "strh");
  197. switch(stream->codec_type) {
  198. case CODEC_TYPE_SUBTITLE:
  199. // XSUB subtitles behave like video tracks, other subtitles
  200. // are not (yet) supported.
  201. if (stream->codec_id != CODEC_ID_XSUB) break;
  202. case CODEC_TYPE_VIDEO: put_tag(pb, "vids"); break;
  203. case CODEC_TYPE_AUDIO: put_tag(pb, "auds"); break;
  204. // case CODEC_TYPE_TEXT : put_tag(pb, "txts"); break;
  205. case CODEC_TYPE_DATA : put_tag(pb, "dats"); break;
  206. }
  207. if(stream->codec_type == CODEC_TYPE_VIDEO ||
  208. stream->codec_id == CODEC_ID_XSUB)
  209. put_le32(pb, stream->codec_tag);
  210. else
  211. put_le32(pb, 1);
  212. put_le32(pb, 0); /* flags */
  213. put_le16(pb, 0); /* priority */
  214. put_le16(pb, 0); /* language */
  215. put_le32(pb, 0); /* initial frame */
  216. ff_parse_specific_params(stream, &au_byterate, &au_ssize, &au_scale);
  217. put_le32(pb, au_scale); /* scale */
  218. put_le32(pb, au_byterate); /* rate */
  219. av_set_pts_info(s->streams[i], 64, au_scale, au_byterate);
  220. put_le32(pb, 0); /* start */
  221. avist->frames_hdr_strm = url_ftell(pb); /* remember this offset to fill later */
  222. if (url_is_streamed(pb))
  223. put_le32(pb, AVI_MAX_RIFF_SIZE); /* FIXME: this may be broken, but who cares */
  224. else
  225. put_le32(pb, 0); /* length, XXX: filled later */
  226. /* suggested buffer size */ //FIXME set at the end to largest chunk
  227. if(stream->codec_type == CODEC_TYPE_VIDEO)
  228. put_le32(pb, 1024 * 1024);
  229. else if(stream->codec_type == CODEC_TYPE_AUDIO)
  230. put_le32(pb, 12 * 1024);
  231. else
  232. put_le32(pb, 0);
  233. put_le32(pb, -1); /* quality */
  234. put_le32(pb, au_ssize); /* sample size */
  235. put_le32(pb, 0);
  236. put_le16(pb, stream->width);
  237. put_le16(pb, stream->height);
  238. ff_end_tag(pb, strh);
  239. if(stream->codec_type != CODEC_TYPE_DATA){
  240. strf = ff_start_tag(pb, "strf");
  241. switch(stream->codec_type) {
  242. case CODEC_TYPE_SUBTITLE:
  243. // XSUB subtitles behave like video tracks, other subtitles
  244. // are not (yet) supported.
  245. if (stream->codec_id != CODEC_ID_XSUB) break;
  246. case CODEC_TYPE_VIDEO:
  247. ff_put_bmp_header(pb, stream, ff_codec_bmp_tags, 0);
  248. break;
  249. case CODEC_TYPE_AUDIO:
  250. if (ff_put_wav_header(pb, stream) < 0) {
  251. return -1;
  252. }
  253. break;
  254. default:
  255. return -1;
  256. }
  257. ff_end_tag(pb, strf);
  258. if ((t = av_metadata_get(s->streams[i]->metadata, "strn", NULL, 0))) {
  259. avi_write_info_tag(s->pb, t->key, t->value);
  260. t = NULL;
  261. }
  262. //FIXME a limitation of metadata conversion system
  263. else if ((t = av_metadata_get(s->streams[i]->metadata, "INAM", NULL, 0))) {
  264. avi_write_info_tag(s->pb, "strn", t->value);
  265. t = NULL;
  266. }
  267. }
  268. if (!url_is_streamed(pb)) {
  269. unsigned char tag[5];
  270. int j;
  271. /* Starting to lay out AVI OpenDML master index.
  272. * We want to make it JUNK entry for now, since we'd
  273. * like to get away without making AVI an OpenDML one
  274. * for compatibility reasons.
  275. */
  276. avist->indexes.entry = avist->indexes.ents_allocated = 0;
  277. avist->indexes.indx_start = ff_start_tag(pb, "JUNK");
  278. put_le16(pb, 4); /* wLongsPerEntry */
  279. put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
  280. put_byte(pb, 0); /* bIndexType (0 == AVI_INDEX_OF_INDEXES) */
  281. put_le32(pb, 0); /* nEntriesInUse (will fill out later on) */
  282. put_tag(pb, avi_stream2fourcc(&tag[0], i, stream->codec_type));
  283. /* dwChunkId */
  284. put_le64(pb, 0); /* dwReserved[3]
  285. put_le32(pb, 0); Must be 0. */
  286. for (j=0; j < AVI_MASTER_INDEX_SIZE * 2; j++)
  287. put_le64(pb, 0);
  288. ff_end_tag(pb, avist->indexes.indx_start);
  289. }
  290. if( stream->codec_type == CODEC_TYPE_VIDEO
  291. && s->streams[i]->sample_aspect_ratio.num>0
  292. && s->streams[i]->sample_aspect_ratio.den>0){
  293. int vprp= ff_start_tag(pb, "vprp");
  294. AVRational dar = av_mul_q(s->streams[i]->sample_aspect_ratio,
  295. (AVRational){stream->width, stream->height});
  296. int num, den;
  297. av_reduce(&num, &den, dar.num, dar.den, 0xFFFF);
  298. put_le32(pb, 0); //video format = unknown
  299. put_le32(pb, 0); //video standard= unknown
  300. put_le32(pb, lrintf(1.0/av_q2d(stream->time_base)));
  301. put_le32(pb, stream->width );
  302. put_le32(pb, stream->height);
  303. put_le16(pb, den);
  304. put_le16(pb, num);
  305. put_le32(pb, stream->width );
  306. put_le32(pb, stream->height);
  307. put_le32(pb, 1); //progressive FIXME
  308. put_le32(pb, stream->height);
  309. put_le32(pb, stream->width );
  310. put_le32(pb, stream->height);
  311. put_le32(pb, stream->width );
  312. put_le32(pb, 0);
  313. put_le32(pb, 0);
  314. put_le32(pb, 0);
  315. put_le32(pb, 0);
  316. ff_end_tag(pb, vprp);
  317. }
  318. ff_end_tag(pb, list2);
  319. }
  320. if (!url_is_streamed(pb)) {
  321. /* AVI could become an OpenDML one, if it grows beyond 2Gb range */
  322. avi->odml_list = ff_start_tag(pb, "JUNK");
  323. put_tag(pb, "odml");
  324. put_tag(pb, "dmlh");
  325. put_le32(pb, 248);
  326. for (i = 0; i < 248; i+= 4)
  327. put_le32(pb, 0);
  328. ff_end_tag(pb, avi->odml_list);
  329. }
  330. ff_end_tag(pb, list1);
  331. list2 = ff_start_tag(pb, "LIST");
  332. put_tag(pb, "INFO");
  333. for (i = 0; *ff_avi_tags[i]; i++) {
  334. if ((t = av_metadata_get(s->metadata, ff_avi_tags[i], NULL, AV_METADATA_MATCH_CASE)))
  335. avi_write_info_tag(s->pb, t->key, t->value);
  336. }
  337. ff_end_tag(pb, list2);
  338. /* some padding for easier tag editing */
  339. list2 = ff_start_tag(pb, "JUNK");
  340. for (i = 0; i < 1016; i += 4)
  341. put_le32(pb, 0);
  342. ff_end_tag(pb, list2);
  343. avi->movi_list = ff_start_tag(pb, "LIST");
  344. put_tag(pb, "movi");
  345. put_flush_packet(pb);
  346. return 0;
  347. }
  348. static int avi_write_ix(AVFormatContext *s)
  349. {
  350. ByteIOContext *pb = s->pb;
  351. AVIContext *avi = s->priv_data;
  352. char tag[5];
  353. char ix_tag[] = "ix00";
  354. int i, j;
  355. assert(!url_is_streamed(pb));
  356. if (avi->riff_id > AVI_MASTER_INDEX_SIZE)
  357. return -1;
  358. for (i=0;i<s->nb_streams;i++) {
  359. AVIStream *avist= s->streams[i]->priv_data;
  360. int64_t ix, pos;
  361. avi_stream2fourcc(&tag[0], i, s->streams[i]->codec->codec_type);
  362. ix_tag[3] = '0' + i;
  363. /* Writing AVI OpenDML leaf index chunk */
  364. ix = url_ftell(pb);
  365. put_tag(pb, &ix_tag[0]); /* ix?? */
  366. put_le32(pb, avist->indexes.entry * 8 + 24);
  367. /* chunk size */
  368. put_le16(pb, 2); /* wLongsPerEntry */
  369. put_byte(pb, 0); /* bIndexSubType (0 == frame index) */
  370. put_byte(pb, 1); /* bIndexType (1 == AVI_INDEX_OF_CHUNKS) */
  371. put_le32(pb, avist->indexes.entry);
  372. /* nEntriesInUse */
  373. put_tag(pb, &tag[0]); /* dwChunkId */
  374. put_le64(pb, avi->movi_list);/* qwBaseOffset */
  375. put_le32(pb, 0); /* dwReserved_3 (must be 0) */
  376. for (j=0; j<avist->indexes.entry; j++) {
  377. AVIIentry* ie = avi_get_ientry(&avist->indexes, j);
  378. put_le32(pb, ie->pos + 8);
  379. put_le32(pb, ((uint32_t)ie->len & ~0x80000000) |
  380. (ie->flags & 0x10 ? 0 : 0x80000000));
  381. }
  382. put_flush_packet(pb);
  383. pos = url_ftell(pb);
  384. /* Updating one entry in the AVI OpenDML master index */
  385. url_fseek(pb, avist->indexes.indx_start - 8, SEEK_SET);
  386. put_tag(pb, "indx"); /* enabling this entry */
  387. url_fskip(pb, 8);
  388. put_le32(pb, avi->riff_id); /* nEntriesInUse */
  389. url_fskip(pb, 16*avi->riff_id);
  390. put_le64(pb, ix); /* qwOffset */
  391. put_le32(pb, pos - ix); /* dwSize */
  392. put_le32(pb, avist->indexes.entry); /* dwDuration */
  393. url_fseek(pb, pos, SEEK_SET);
  394. }
  395. return 0;
  396. }
  397. static int avi_write_idx1(AVFormatContext *s)
  398. {
  399. ByteIOContext *pb = s->pb;
  400. AVIContext *avi = s->priv_data;
  401. int64_t idx_chunk;
  402. int i;
  403. char tag[5];
  404. if (!url_is_streamed(pb)) {
  405. AVIStream *avist;
  406. AVIIentry* ie = 0, *tie;
  407. int empty, stream_id = -1;
  408. idx_chunk = ff_start_tag(pb, "idx1");
  409. for(i=0; i<s->nb_streams; i++){
  410. avist= s->streams[i]->priv_data;
  411. avist->entry=0;
  412. }
  413. do {
  414. empty = 1;
  415. for (i=0; i<s->nb_streams; i++) {
  416. avist= s->streams[i]->priv_data;
  417. if (avist->indexes.entry <= avist->entry)
  418. continue;
  419. tie = avi_get_ientry(&avist->indexes, avist->entry);
  420. if (empty || tie->pos < ie->pos) {
  421. ie = tie;
  422. stream_id = i;
  423. }
  424. empty = 0;
  425. }
  426. if (!empty) {
  427. avist= s->streams[stream_id]->priv_data;
  428. avi_stream2fourcc(&tag[0], stream_id,
  429. s->streams[stream_id]->codec->codec_type);
  430. put_tag(pb, &tag[0]);
  431. put_le32(pb, ie->flags);
  432. put_le32(pb, ie->pos);
  433. put_le32(pb, ie->len);
  434. avist->entry++;
  435. }
  436. } while (!empty);
  437. ff_end_tag(pb, idx_chunk);
  438. avi_write_counters(s, avi->riff_id);
  439. }
  440. return 0;
  441. }
  442. static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
  443. {
  444. AVIContext *avi = s->priv_data;
  445. ByteIOContext *pb = s->pb;
  446. unsigned char tag[5];
  447. unsigned int flags=0;
  448. const int stream_index= pkt->stream_index;
  449. AVIStream *avist= s->streams[stream_index]->priv_data;
  450. AVCodecContext *enc= s->streams[stream_index]->codec;
  451. int size= pkt->size;
  452. // av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %d\n", pkt->dts, avi->packet_count[stream_index], stream_index);
  453. while(enc->block_align==0 && pkt->dts != AV_NOPTS_VALUE && pkt->dts > avist->packet_count){
  454. AVPacket empty_packet;
  455. av_init_packet(&empty_packet);
  456. empty_packet.size= 0;
  457. empty_packet.data= NULL;
  458. empty_packet.stream_index= stream_index;
  459. avi_write_packet(s, &empty_packet);
  460. // av_log(s, AV_LOG_DEBUG, "dup %"PRId64" %d\n", pkt->dts, avi->packet_count[stream_index]);
  461. }
  462. avist->packet_count++;
  463. // Make sure to put an OpenDML chunk when the file size exceeds the limits
  464. if (!url_is_streamed(pb) &&
  465. (url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE)) {
  466. avi_write_ix(s);
  467. ff_end_tag(pb, avi->movi_list);
  468. if (avi->riff_id == 1)
  469. avi_write_idx1(s);
  470. ff_end_tag(pb, avi->riff_start);
  471. avi->movi_list = avi_start_new_riff(s, pb, "AVIX", "movi");
  472. }
  473. avi_stream2fourcc(&tag[0], stream_index, enc->codec_type);
  474. if(pkt->flags&PKT_FLAG_KEY)
  475. flags = 0x10;
  476. if (enc->codec_type == CODEC_TYPE_AUDIO) {
  477. avist->audio_strm_length += size;
  478. }
  479. if (!url_is_streamed(s->pb)) {
  480. AVIIndex* idx = &avist->indexes;
  481. int cl = idx->entry / AVI_INDEX_CLUSTER_SIZE;
  482. int id = idx->entry % AVI_INDEX_CLUSTER_SIZE;
  483. if (idx->ents_allocated <= idx->entry) {
  484. idx->cluster = av_realloc(idx->cluster, (cl+1)*sizeof(void*));
  485. if (!idx->cluster)
  486. return -1;
  487. idx->cluster[cl] = av_malloc(AVI_INDEX_CLUSTER_SIZE*sizeof(AVIIentry));
  488. if (!idx->cluster[cl])
  489. return -1;
  490. idx->ents_allocated += AVI_INDEX_CLUSTER_SIZE;
  491. }
  492. idx->cluster[cl][id].flags = flags;
  493. idx->cluster[cl][id].pos = url_ftell(pb) - avi->movi_list;
  494. idx->cluster[cl][id].len = size;
  495. idx->entry++;
  496. }
  497. put_buffer(pb, tag, 4);
  498. put_le32(pb, size);
  499. put_buffer(pb, pkt->data, size);
  500. if (size & 1)
  501. put_byte(pb, 0);
  502. put_flush_packet(pb);
  503. return 0;
  504. }
  505. static int avi_write_trailer(AVFormatContext *s)
  506. {
  507. AVIContext *avi = s->priv_data;
  508. ByteIOContext *pb = s->pb;
  509. int res = 0;
  510. int i, j, n, nb_frames;
  511. int64_t file_size;
  512. if (!url_is_streamed(pb)){
  513. if (avi->riff_id == 1) {
  514. ff_end_tag(pb, avi->movi_list);
  515. res = avi_write_idx1(s);
  516. ff_end_tag(pb, avi->riff_start);
  517. } else {
  518. avi_write_ix(s);
  519. ff_end_tag(pb, avi->movi_list);
  520. ff_end_tag(pb, avi->riff_start);
  521. file_size = url_ftell(pb);
  522. url_fseek(pb, avi->odml_list - 8, SEEK_SET);
  523. put_tag(pb, "LIST"); /* Making this AVI OpenDML one */
  524. url_fskip(pb, 16);
  525. for (n=nb_frames=0;n<s->nb_streams;n++) {
  526. AVCodecContext *stream = s->streams[n]->codec;
  527. AVIStream *avist= s->streams[n]->priv_data;
  528. if (stream->codec_type == CODEC_TYPE_VIDEO) {
  529. if (nb_frames < avist->packet_count)
  530. nb_frames = avist->packet_count;
  531. } else {
  532. if (stream->codec_id == CODEC_ID_MP2 || stream->codec_id == CODEC_ID_MP3) {
  533. nb_frames += avist->packet_count;
  534. }
  535. }
  536. }
  537. put_le32(pb, nb_frames);
  538. url_fseek(pb, file_size, SEEK_SET);
  539. avi_write_counters(s, avi->riff_id);
  540. }
  541. }
  542. put_flush_packet(pb);
  543. for (i=0; i<s->nb_streams; i++) {
  544. AVIStream *avist= s->streams[i]->priv_data;
  545. for (j=0; j<avist->indexes.ents_allocated/AVI_INDEX_CLUSTER_SIZE; j++)
  546. av_free(avist->indexes.cluster[j]);
  547. av_freep(&avist->indexes.cluster);
  548. avist->indexes.ents_allocated = avist->indexes.entry = 0;
  549. }
  550. return res;
  551. }
  552. AVOutputFormat avi_muxer = {
  553. "avi",
  554. NULL_IF_CONFIG_SMALL("AVI format"),
  555. "video/x-msvideo",
  556. "avi",
  557. sizeof(AVIContext),
  558. CODEC_ID_MP2,
  559. CODEC_ID_MPEG4,
  560. avi_write_header,
  561. avi_write_packet,
  562. avi_write_trailer,
  563. .codec_tag= (const AVCodecTag* const []){ff_codec_bmp_tags, ff_codec_wav_tags, 0},
  564. .flags= AVFMT_VARIABLE_FPS,
  565. .metadata_conv = ff_avi_metadata_conv,
  566. };