You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1365 lines
45KB

  1. /*
  2. * AVI demuxer
  3. * Copyright (c) 2001 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. //#define DEBUG
  22. //#define DEBUG_SEEK
  23. #include <strings.h>
  24. #include "libavutil/intreadwrite.h"
  25. #include "libavutil/bswap.h"
  26. #include "libavcodec/bytestream.h"
  27. #include "avformat.h"
  28. #include "avi.h"
  29. #include "dv.h"
  30. #include "riff.h"
  31. #undef NDEBUG
  32. #include <assert.h>
  33. typedef struct AVIStream {
  34. int64_t frame_offset; /* current frame (video) or byte (audio) counter
  35. (used to compute the pts) */
  36. int remaining;
  37. int packet_size;
  38. int scale;
  39. int rate;
  40. int sample_size; /* size of one sample (or packet) (in the rate/scale sense) in bytes */
  41. int64_t cum_len; /* temporary storage (used during seek) */
  42. int prefix; ///< normally 'd'<<8 + 'c' or 'w'<<8 + 'b'
  43. int prefix_count;
  44. uint32_t pal[256];
  45. int has_pal;
  46. int dshow_block_align; ///< block align variable used to emulate bugs in the MS dshow demuxer
  47. AVFormatContext *sub_ctx;
  48. AVPacket sub_pkt;
  49. uint8_t *sub_buffer;
  50. } AVIStream;
  51. typedef struct {
  52. int64_t riff_end;
  53. int64_t movi_end;
  54. int64_t fsize;
  55. int64_t movi_list;
  56. int64_t last_pkt_pos;
  57. int index_loaded;
  58. int is_odml;
  59. int non_interleaved;
  60. int stream_index;
  61. DVDemuxContext* dv_demux;
  62. int odml_depth;
  63. #define MAX_ODML_DEPTH 1000
  64. } AVIContext;
  65. static const char avi_headers[][8] = {
  66. { 'R', 'I', 'F', 'F', 'A', 'V', 'I', ' ' },
  67. { 'R', 'I', 'F', 'F', 'A', 'V', 'I', 'X' },
  68. { 'R', 'I', 'F', 'F', 'A', 'V', 'I', 0x19},
  69. { 'O', 'N', '2', ' ', 'O', 'N', '2', 'f' },
  70. { 'R', 'I', 'F', 'F', 'A', 'M', 'V', ' ' },
  71. { 0 }
  72. };
  73. static int avi_load_index(AVFormatContext *s);
  74. static int guess_ni_flag(AVFormatContext *s);
  75. #ifdef DEBUG
  76. static void print_tag(const char *str, unsigned int tag, int size)
  77. {
  78. dprintf(NULL, "%s: tag=%c%c%c%c size=0x%x\n",
  79. str, tag & 0xff,
  80. (tag >> 8) & 0xff,
  81. (tag >> 16) & 0xff,
  82. (tag >> 24) & 0xff,
  83. size);
  84. }
  85. #endif
  86. static inline int get_duration(AVIStream *ast, int len){
  87. if(ast->sample_size){
  88. return len;
  89. }else if (ast->dshow_block_align){
  90. return (len + ast->dshow_block_align - 1)/ast->dshow_block_align;
  91. }else
  92. return 1;
  93. }
  94. static int get_riff(AVFormatContext *s, ByteIOContext *pb)
  95. {
  96. AVIContext *avi = s->priv_data;
  97. char header[8];
  98. int i;
  99. /* check RIFF header */
  100. get_buffer(pb, header, 4);
  101. avi->riff_end = get_le32(pb); /* RIFF chunk size */
  102. avi->riff_end += url_ftell(pb); /* RIFF chunk end */
  103. get_buffer(pb, header+4, 4);
  104. for(i=0; avi_headers[i][0]; i++)
  105. if(!memcmp(header, avi_headers[i], 8))
  106. break;
  107. if(!avi_headers[i][0])
  108. return -1;
  109. if(header[7] == 0x19)
  110. av_log(s, AV_LOG_INFO, "This file has been generated by a totally broken muxer.\n");
  111. return 0;
  112. }
  113. static int read_braindead_odml_indx(AVFormatContext *s, int frame_num){
  114. AVIContext *avi = s->priv_data;
  115. ByteIOContext *pb = s->pb;
  116. int longs_pre_entry= get_le16(pb);
  117. int index_sub_type = get_byte(pb);
  118. int index_type = get_byte(pb);
  119. int entries_in_use = get_le32(pb);
  120. int chunk_id = get_le32(pb);
  121. int64_t base = get_le64(pb);
  122. int stream_id= 10*((chunk_id&0xFF) - '0') + (((chunk_id>>8)&0xFF) - '0');
  123. AVStream *st;
  124. AVIStream *ast;
  125. int i;
  126. int64_t last_pos= -1;
  127. int64_t filesize= url_fsize(s->pb);
  128. #ifdef DEBUG_SEEK
  129. av_log(s, AV_LOG_ERROR, "longs_pre_entry:%d index_type:%d entries_in_use:%d chunk_id:%X base:%16"PRIX64"\n",
  130. longs_pre_entry,index_type, entries_in_use, chunk_id, base);
  131. #endif
  132. if(stream_id >= s->nb_streams || stream_id < 0)
  133. return -1;
  134. st= s->streams[stream_id];
  135. ast = st->priv_data;
  136. if(index_sub_type)
  137. return -1;
  138. get_le32(pb);
  139. if(index_type && longs_pre_entry != 2)
  140. return -1;
  141. if(index_type>1)
  142. return -1;
  143. if(filesize > 0 && base >= filesize){
  144. av_log(s, AV_LOG_ERROR, "ODML index invalid\n");
  145. if(base>>32 == (base & 0xFFFFFFFF) && (base & 0xFFFFFFFF) < filesize && filesize <= 0xFFFFFFFF)
  146. base &= 0xFFFFFFFF;
  147. else
  148. return -1;
  149. }
  150. for(i=0; i<entries_in_use; i++){
  151. if(index_type){
  152. int64_t pos= get_le32(pb) + base - 8;
  153. int len = get_le32(pb);
  154. int key= len >= 0;
  155. len &= 0x7FFFFFFF;
  156. #ifdef DEBUG_SEEK
  157. av_log(s, AV_LOG_ERROR, "pos:%"PRId64", len:%X\n", pos, len);
  158. #endif
  159. if(url_feof(pb))
  160. return -1;
  161. if(last_pos == pos || pos == base - 8)
  162. avi->non_interleaved= 1;
  163. if(last_pos != pos && (len || !ast->sample_size))
  164. av_add_index_entry(st, pos, ast->cum_len, len, 0, key ? AVINDEX_KEYFRAME : 0);
  165. ast->cum_len += get_duration(ast, len);
  166. last_pos= pos;
  167. }else{
  168. int64_t offset, pos;
  169. int duration;
  170. offset = get_le64(pb);
  171. get_le32(pb); /* size */
  172. duration = get_le32(pb);
  173. if(url_feof(pb))
  174. return -1;
  175. pos = url_ftell(pb);
  176. if(avi->odml_depth > MAX_ODML_DEPTH){
  177. av_log(s, AV_LOG_ERROR, "Too deeply nested ODML indexes\n");
  178. return -1;
  179. }
  180. url_fseek(pb, offset+8, SEEK_SET);
  181. avi->odml_depth++;
  182. read_braindead_odml_indx(s, frame_num);
  183. avi->odml_depth--;
  184. frame_num += duration;
  185. url_fseek(pb, pos, SEEK_SET);
  186. }
  187. }
  188. avi->index_loaded=1;
  189. return 0;
  190. }
  191. static void clean_index(AVFormatContext *s){
  192. int i;
  193. int64_t j;
  194. for(i=0; i<s->nb_streams; i++){
  195. AVStream *st = s->streams[i];
  196. AVIStream *ast = st->priv_data;
  197. int n= st->nb_index_entries;
  198. int max= ast->sample_size;
  199. int64_t pos, size, ts;
  200. if(n != 1 || ast->sample_size==0)
  201. continue;
  202. while(max < 1024) max+=max;
  203. pos= st->index_entries[0].pos;
  204. size= st->index_entries[0].size;
  205. ts= st->index_entries[0].timestamp;
  206. for(j=0; j<size; j+=max){
  207. av_add_index_entry(st, pos+j, ts+j, FFMIN(max, size-j), 0, AVINDEX_KEYFRAME);
  208. }
  209. }
  210. }
  211. static int avi_read_tag(AVFormatContext *s, AVStream *st, uint32_t tag, uint32_t size)
  212. {
  213. ByteIOContext *pb = s->pb;
  214. char key[5] = {0}, *value;
  215. size += (size & 1);
  216. if (size == UINT_MAX)
  217. return -1;
  218. value = av_malloc(size+1);
  219. if (!value)
  220. return -1;
  221. get_buffer(pb, value, size);
  222. value[size]=0;
  223. AV_WL32(key, tag);
  224. if(st)
  225. return av_metadata_set2(&st->metadata, key, value,
  226. AV_METADATA_DONT_STRDUP_VAL);
  227. else
  228. return av_metadata_set2(&s->metadata, key, value,
  229. AV_METADATA_DONT_STRDUP_VAL);
  230. }
  231. static void avi_read_info(AVFormatContext *s, uint64_t end)
  232. {
  233. while (url_ftell(s->pb) < end) {
  234. uint32_t tag = get_le32(s->pb);
  235. uint32_t size = get_le32(s->pb);
  236. avi_read_tag(s, NULL, tag, size);
  237. }
  238. }
  239. static const char months[12][4] = { "Jan", "Feb", "Mar", "Apr", "May", "Jun",
  240. "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" };
  241. static void avi_metadata_creation_time(AVMetadata **metadata, char *date)
  242. {
  243. char month[4], time[9], buffer[64];
  244. int i, day, year;
  245. /* parse standard AVI date format (ie. "Mon Mar 10 15:04:43 2003") */
  246. if (sscanf(date, "%*3s%*[ ]%3s%*[ ]%2d%*[ ]%8s%*[ ]%4d",
  247. month, &day, time, &year) == 4)
  248. for (i=0; i<12; i++)
  249. if (!strcasecmp(month, months[i])) {
  250. snprintf(buffer, sizeof(buffer), "%.4d-%.2d-%.2d %s",
  251. year, i+1, day, time);
  252. av_metadata_set2(metadata, "creation_time", buffer, 0);
  253. }
  254. }
  255. static int avi_read_header(AVFormatContext *s, AVFormatParameters *ap)
  256. {
  257. AVIContext *avi = s->priv_data;
  258. ByteIOContext *pb = s->pb;
  259. unsigned int tag, tag1, handler;
  260. int codec_type, stream_index, frame_period, bit_rate;
  261. unsigned int size;
  262. int i;
  263. AVStream *st;
  264. AVIStream *ast = NULL;
  265. int avih_width=0, avih_height=0;
  266. int amv_file_format=0;
  267. uint64_t list_end = 0;
  268. avi->stream_index= -1;
  269. if (get_riff(s, pb) < 0)
  270. return -1;
  271. avi->fsize = url_fsize(pb);
  272. if(avi->fsize<=0)
  273. avi->fsize= avi->riff_end == 8 ? INT64_MAX : avi->riff_end;
  274. /* first list tag */
  275. stream_index = -1;
  276. codec_type = -1;
  277. frame_period = 0;
  278. for(;;) {
  279. if (url_feof(pb))
  280. goto fail;
  281. tag = get_le32(pb);
  282. size = get_le32(pb);
  283. #ifdef DEBUG
  284. print_tag("tag", tag, size);
  285. #endif
  286. switch(tag) {
  287. case MKTAG('L', 'I', 'S', 'T'):
  288. list_end = url_ftell(pb) + size;
  289. /* Ignored, except at start of video packets. */
  290. tag1 = get_le32(pb);
  291. #ifdef DEBUG
  292. print_tag("list", tag1, 0);
  293. #endif
  294. if (tag1 == MKTAG('m', 'o', 'v', 'i')) {
  295. avi->movi_list = url_ftell(pb) - 4;
  296. if(size) avi->movi_end = avi->movi_list + size + (size & 1);
  297. else avi->movi_end = url_fsize(pb);
  298. dprintf(NULL, "movi end=%"PRIx64"\n", avi->movi_end);
  299. goto end_of_header;
  300. }
  301. else if (tag1 == MKTAG('I', 'N', 'F', 'O'))
  302. avi_read_info(s, list_end);
  303. break;
  304. case MKTAG('I', 'D', 'I', 'T'): {
  305. unsigned char date[64] = {0};
  306. size += (size & 1);
  307. size -= get_buffer(pb, date, FFMIN(size, sizeof(date)-1));
  308. url_fskip(pb, size);
  309. avi_metadata_creation_time(&s->metadata, date);
  310. break;
  311. }
  312. case MKTAG('d', 'm', 'l', 'h'):
  313. avi->is_odml = 1;
  314. url_fskip(pb, size + (size & 1));
  315. break;
  316. case MKTAG('a', 'm', 'v', 'h'):
  317. amv_file_format=1;
  318. case MKTAG('a', 'v', 'i', 'h'):
  319. /* AVI header */
  320. /* using frame_period is bad idea */
  321. frame_period = get_le32(pb);
  322. bit_rate = get_le32(pb) * 8;
  323. get_le32(pb);
  324. avi->non_interleaved |= get_le32(pb) & AVIF_MUSTUSEINDEX;
  325. url_fskip(pb, 2 * 4);
  326. get_le32(pb);
  327. get_le32(pb);
  328. avih_width=get_le32(pb);
  329. avih_height=get_le32(pb);
  330. url_fskip(pb, size - 10 * 4);
  331. break;
  332. case MKTAG('s', 't', 'r', 'h'):
  333. /* stream header */
  334. tag1 = get_le32(pb);
  335. handler = get_le32(pb); /* codec tag */
  336. if(tag1 == MKTAG('p', 'a', 'd', 's')){
  337. url_fskip(pb, size - 8);
  338. break;
  339. }else{
  340. stream_index++;
  341. st = av_new_stream(s, stream_index);
  342. if (!st)
  343. goto fail;
  344. ast = av_mallocz(sizeof(AVIStream));
  345. if (!ast)
  346. goto fail;
  347. st->priv_data = ast;
  348. }
  349. if(amv_file_format)
  350. tag1 = stream_index ? MKTAG('a','u','d','s') : MKTAG('v','i','d','s');
  351. #ifdef DEBUG
  352. print_tag("strh", tag1, -1);
  353. #endif
  354. if(tag1 == MKTAG('i', 'a', 'v', 's') || tag1 == MKTAG('i', 'v', 'a', 's')){
  355. int64_t dv_dur;
  356. /*
  357. * After some consideration -- I don't think we
  358. * have to support anything but DV in type1 AVIs.
  359. */
  360. if (s->nb_streams != 1)
  361. goto fail;
  362. if (handler != MKTAG('d', 'v', 's', 'd') &&
  363. handler != MKTAG('d', 'v', 'h', 'd') &&
  364. handler != MKTAG('d', 'v', 's', 'l'))
  365. goto fail;
  366. ast = s->streams[0]->priv_data;
  367. av_freep(&s->streams[0]->codec->extradata);
  368. av_freep(&s->streams[0]->codec);
  369. av_freep(&s->streams[0]);
  370. s->nb_streams = 0;
  371. if (CONFIG_DV_DEMUXER) {
  372. avi->dv_demux = dv_init_demux(s);
  373. if (!avi->dv_demux)
  374. goto fail;
  375. }
  376. s->streams[0]->priv_data = ast;
  377. url_fskip(pb, 3 * 4);
  378. ast->scale = get_le32(pb);
  379. ast->rate = get_le32(pb);
  380. url_fskip(pb, 4); /* start time */
  381. dv_dur = get_le32(pb);
  382. if (ast->scale > 0 && ast->rate > 0 && dv_dur > 0) {
  383. dv_dur *= AV_TIME_BASE;
  384. s->duration = av_rescale(dv_dur, ast->scale, ast->rate);
  385. }
  386. /*
  387. * else, leave duration alone; timing estimation in utils.c
  388. * will make a guess based on bitrate.
  389. */
  390. stream_index = s->nb_streams - 1;
  391. url_fskip(pb, size - 9*4);
  392. break;
  393. }
  394. assert(stream_index < s->nb_streams);
  395. st->codec->stream_codec_tag= handler;
  396. get_le32(pb); /* flags */
  397. get_le16(pb); /* priority */
  398. get_le16(pb); /* language */
  399. get_le32(pb); /* initial frame */
  400. ast->scale = get_le32(pb);
  401. ast->rate = get_le32(pb);
  402. if(!(ast->scale && ast->rate)){
  403. av_log(s, AV_LOG_WARNING, "scale/rate is %u/%u which is invalid. (This file has been generated by broken software.)\n", ast->scale, ast->rate);
  404. if(frame_period){
  405. ast->rate = 1000000;
  406. ast->scale = frame_period;
  407. }else{
  408. ast->rate = 25;
  409. ast->scale = 1;
  410. }
  411. }
  412. av_set_pts_info(st, 64, ast->scale, ast->rate);
  413. ast->cum_len=get_le32(pb); /* start */
  414. st->nb_frames = get_le32(pb);
  415. st->start_time = 0;
  416. get_le32(pb); /* buffer size */
  417. get_le32(pb); /* quality */
  418. ast->sample_size = get_le32(pb); /* sample ssize */
  419. ast->cum_len *= FFMAX(1, ast->sample_size);
  420. // av_log(s, AV_LOG_DEBUG, "%d %d %d %d\n", ast->rate, ast->scale, ast->start, ast->sample_size);
  421. switch(tag1) {
  422. case MKTAG('v', 'i', 'd', 's'):
  423. codec_type = AVMEDIA_TYPE_VIDEO;
  424. ast->sample_size = 0;
  425. break;
  426. case MKTAG('a', 'u', 'd', 's'):
  427. codec_type = AVMEDIA_TYPE_AUDIO;
  428. break;
  429. case MKTAG('t', 'x', 't', 's'):
  430. codec_type = AVMEDIA_TYPE_SUBTITLE;
  431. break;
  432. case MKTAG('d', 'a', 't', 's'):
  433. codec_type = AVMEDIA_TYPE_DATA;
  434. break;
  435. default:
  436. av_log(s, AV_LOG_ERROR, "unknown stream type %X\n", tag1);
  437. goto fail;
  438. }
  439. if(ast->sample_size == 0)
  440. st->duration = st->nb_frames;
  441. ast->frame_offset= ast->cum_len;
  442. url_fskip(pb, size - 12 * 4);
  443. break;
  444. case MKTAG('s', 't', 'r', 'f'):
  445. /* stream header */
  446. if (stream_index >= (unsigned)s->nb_streams || avi->dv_demux) {
  447. url_fskip(pb, size);
  448. } else {
  449. uint64_t cur_pos = url_ftell(pb);
  450. if (cur_pos < list_end)
  451. size = FFMIN(size, list_end - cur_pos);
  452. st = s->streams[stream_index];
  453. switch(codec_type) {
  454. case AVMEDIA_TYPE_VIDEO:
  455. if(amv_file_format){
  456. st->codec->width=avih_width;
  457. st->codec->height=avih_height;
  458. st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
  459. st->codec->codec_id = CODEC_ID_AMV;
  460. url_fskip(pb, size);
  461. break;
  462. }
  463. get_le32(pb); /* size */
  464. st->codec->width = get_le32(pb);
  465. st->codec->height = (int32_t)get_le32(pb);
  466. get_le16(pb); /* panes */
  467. st->codec->bits_per_coded_sample= get_le16(pb); /* depth */
  468. tag1 = get_le32(pb);
  469. get_le32(pb); /* ImageSize */
  470. get_le32(pb); /* XPelsPerMeter */
  471. get_le32(pb); /* YPelsPerMeter */
  472. get_le32(pb); /* ClrUsed */
  473. get_le32(pb); /* ClrImportant */
  474. if (tag1 == MKTAG('D', 'X', 'S', 'B') || tag1 == MKTAG('D','X','S','A')) {
  475. st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
  476. st->codec->codec_tag = tag1;
  477. st->codec->codec_id = CODEC_ID_XSUB;
  478. break;
  479. }
  480. if(size > 10*4 && size<(1<<30)){
  481. st->codec->extradata_size= size - 10*4;
  482. st->codec->extradata= av_malloc(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  483. if (!st->codec->extradata) {
  484. st->codec->extradata_size= 0;
  485. return AVERROR(ENOMEM);
  486. }
  487. get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
  488. }
  489. if(st->codec->extradata_size & 1) //FIXME check if the encoder really did this correctly
  490. get_byte(pb);
  491. /* Extract palette from extradata if bpp <= 8. */
  492. /* This code assumes that extradata contains only palette. */
  493. /* This is true for all paletted codecs implemented in FFmpeg. */
  494. if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 8)) {
  495. st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
  496. #if HAVE_BIGENDIAN
  497. for (i = 0; i < FFMIN(st->codec->extradata_size, AVPALETTE_SIZE)/4; i++)
  498. st->codec->palctrl->palette[i] = av_bswap32(((uint32_t*)st->codec->extradata)[i]);
  499. #else
  500. memcpy(st->codec->palctrl->palette, st->codec->extradata,
  501. FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
  502. #endif
  503. st->codec->palctrl->palette_changed = 1;
  504. }
  505. #ifdef DEBUG
  506. print_tag("video", tag1, 0);
  507. #endif
  508. st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
  509. st->codec->codec_tag = tag1;
  510. st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, tag1);
  511. st->need_parsing = AVSTREAM_PARSE_HEADERS; // This is needed to get the pict type which is necessary for generating correct pts.
  512. // Support "Resolution 1:1" for Avid AVI Codec
  513. if(tag1 == MKTAG('A', 'V', 'R', 'n') &&
  514. st->codec->extradata_size >= 31 &&
  515. !memcmp(&st->codec->extradata[28], "1:1", 3))
  516. st->codec->codec_id = CODEC_ID_RAWVIDEO;
  517. if(st->codec->codec_tag==0 && st->codec->height > 0 && st->codec->extradata_size < 1U<<30){
  518. st->codec->extradata_size+= 9;
  519. st->codec->extradata= av_realloc(st->codec->extradata, st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  520. if(st->codec->extradata)
  521. memcpy(st->codec->extradata + st->codec->extradata_size - 9, "BottomUp", 9);
  522. }
  523. st->codec->height= FFABS(st->codec->height);
  524. // url_fskip(pb, size - 5 * 4);
  525. break;
  526. case AVMEDIA_TYPE_AUDIO:
  527. ff_get_wav_header(pb, st->codec, size);
  528. ast->dshow_block_align= st->codec->block_align;
  529. if(ast->sample_size && st->codec->block_align && ast->sample_size != st->codec->block_align){
  530. av_log(s, AV_LOG_WARNING, "sample size (%d) != block align (%d)\n", ast->sample_size, st->codec->block_align);
  531. ast->sample_size= st->codec->block_align;
  532. }
  533. if (size&1) /* 2-aligned (fix for Stargate SG-1 - 3x18 - Shades of Grey.avi) */
  534. url_fskip(pb, 1);
  535. /* Force parsing as several audio frames can be in
  536. * one packet and timestamps refer to packet start. */
  537. st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
  538. /* ADTS header is in extradata, AAC without header must be
  539. * stored as exact frames. Parser not needed and it will
  540. * fail. */
  541. if (st->codec->codec_id == CODEC_ID_AAC && st->codec->extradata_size)
  542. st->need_parsing = AVSTREAM_PARSE_NONE;
  543. /* AVI files with Xan DPCM audio (wrongly) declare PCM
  544. * audio in the header but have Axan as stream_code_tag. */
  545. if (st->codec->stream_codec_tag == AV_RL32("Axan")){
  546. st->codec->codec_id = CODEC_ID_XAN_DPCM;
  547. st->codec->codec_tag = 0;
  548. }
  549. if (amv_file_format){
  550. st->codec->codec_id = CODEC_ID_ADPCM_IMA_AMV;
  551. ast->dshow_block_align = 0;
  552. }
  553. break;
  554. case AVMEDIA_TYPE_SUBTITLE:
  555. st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
  556. st->codec->codec_id = CODEC_ID_PROBE;
  557. break;
  558. default:
  559. st->codec->codec_type = AVMEDIA_TYPE_DATA;
  560. st->codec->codec_id= CODEC_ID_NONE;
  561. st->codec->codec_tag= 0;
  562. url_fskip(pb, size);
  563. break;
  564. }
  565. }
  566. break;
  567. case MKTAG('i', 'n', 'd', 'x'):
  568. i= url_ftell(pb);
  569. if(!url_is_streamed(pb) && !(s->flags & AVFMT_FLAG_IGNIDX)){
  570. read_braindead_odml_indx(s, 0);
  571. }
  572. url_fseek(pb, i+size, SEEK_SET);
  573. break;
  574. case MKTAG('v', 'p', 'r', 'p'):
  575. if(stream_index < (unsigned)s->nb_streams && size > 9*4){
  576. AVRational active, active_aspect;
  577. st = s->streams[stream_index];
  578. get_le32(pb);
  579. get_le32(pb);
  580. get_le32(pb);
  581. get_le32(pb);
  582. get_le32(pb);
  583. active_aspect.den= get_le16(pb);
  584. active_aspect.num= get_le16(pb);
  585. active.num = get_le32(pb);
  586. active.den = get_le32(pb);
  587. get_le32(pb); //nbFieldsPerFrame
  588. if(active_aspect.num && active_aspect.den && active.num && active.den){
  589. st->sample_aspect_ratio= av_div_q(active_aspect, active);
  590. //av_log(s, AV_LOG_ERROR, "vprp %d/%d %d/%d\n", active_aspect.num, active_aspect.den, active.num, active.den);
  591. }
  592. size -= 9*4;
  593. }
  594. url_fseek(pb, size, SEEK_CUR);
  595. break;
  596. case MKTAG('s', 't', 'r', 'n'):
  597. if(s->nb_streams){
  598. avi_read_tag(s, s->streams[s->nb_streams-1], tag, size);
  599. break;
  600. }
  601. default:
  602. if(size > 1000000){
  603. av_log(s, AV_LOG_ERROR, "Something went wrong during header parsing, "
  604. "I will ignore it and try to continue anyway.\n");
  605. avi->movi_list = url_ftell(pb) - 4;
  606. avi->movi_end = url_fsize(pb);
  607. goto end_of_header;
  608. }
  609. /* skip tag */
  610. size += (size & 1);
  611. url_fskip(pb, size);
  612. break;
  613. }
  614. }
  615. end_of_header:
  616. /* check stream number */
  617. if (stream_index != s->nb_streams - 1) {
  618. fail:
  619. return -1;
  620. }
  621. if(!avi->index_loaded && !url_is_streamed(pb))
  622. avi_load_index(s);
  623. avi->index_loaded = 1;
  624. avi->non_interleaved |= guess_ni_flag(s);
  625. for(i=0; i<s->nb_streams; i++){
  626. AVStream *st = s->streams[i];
  627. if(st->nb_index_entries)
  628. break;
  629. }
  630. if(i==s->nb_streams && avi->non_interleaved) {
  631. av_log(s, AV_LOG_WARNING, "non-interleaved AVI without index, switching to interleaved\n");
  632. avi->non_interleaved=0;
  633. }
  634. if(avi->non_interleaved) {
  635. av_log(s, AV_LOG_INFO, "non-interleaved AVI\n");
  636. clean_index(s);
  637. }
  638. ff_metadata_conv_ctx(s, NULL, ff_avi_metadata_conv);
  639. return 0;
  640. }
  641. static int read_gab2_sub(AVStream *st, AVPacket *pkt) {
  642. if (!strcmp(pkt->data, "GAB2") && AV_RL16(pkt->data+5) == 2) {
  643. uint8_t desc[256], *d = desc;
  644. uint8_t *end, *ptr = pkt->data+7;
  645. unsigned int size, desc_len = bytestream_get_le32(&ptr);
  646. int score = AVPROBE_SCORE_MAX / 2;
  647. AVIStream *ast = st->priv_data;
  648. AVInputFormat *sub_demuxer;
  649. AVRational time_base;
  650. ByteIOContext *pb;
  651. AVProbeData pd;
  652. if (desc_len > FFMAX(pkt->size-17, 0))
  653. return 0;
  654. end = ptr + desc_len;
  655. while (ptr < end-1) {
  656. uint8_t tmp;
  657. uint32_t ch;
  658. GET_UTF16(ch, ptr < end-1 ? bytestream_get_le16(&ptr) : 0, break;);
  659. PUT_UTF8(ch, tmp, if(d-desc < sizeof(desc)-1) *d++ = tmp;);
  660. }
  661. *d = 0;
  662. if (*desc)
  663. av_metadata_set2(&st->metadata, "title", desc, 0);
  664. ptr = end + 2;
  665. size = bytestream_get_le32(&ptr);
  666. size = FFMIN(size, pkt->size+pkt->data-ptr);
  667. pd = (AVProbeData) { .buf = ptr, .buf_size = size };
  668. if (!(sub_demuxer = av_probe_input_format2(&pd, 1, &score)))
  669. return 0;
  670. pb = av_alloc_put_byte(ptr, size, 0, NULL, NULL, NULL, NULL);
  671. if (!av_open_input_stream(&ast->sub_ctx, pb, "", sub_demuxer, NULL)) {
  672. av_read_packet(ast->sub_ctx, &ast->sub_pkt);
  673. *st->codec = *ast->sub_ctx->streams[0]->codec;
  674. ast->sub_ctx->streams[0]->codec->extradata = NULL;
  675. time_base = ast->sub_ctx->streams[0]->time_base;
  676. av_set_pts_info(st, 64, time_base.num, time_base.den);
  677. }
  678. ast->sub_buffer = pkt->data;
  679. memset(pkt, 0, sizeof(*pkt));
  680. return 1;
  681. }
  682. return 0;
  683. }
  684. static AVStream *get_subtitle_pkt(AVFormatContext *s, AVStream *next_st,
  685. AVPacket *pkt)
  686. {
  687. AVIStream *ast, *next_ast = next_st->priv_data;
  688. int64_t ts, next_ts, ts_min = INT64_MAX;
  689. AVStream *st, *sub_st = NULL;
  690. int i;
  691. next_ts = av_rescale_q(next_ast->frame_offset, next_st->time_base,
  692. AV_TIME_BASE_Q);
  693. for (i=0; i<s->nb_streams; i++) {
  694. st = s->streams[i];
  695. ast = st->priv_data;
  696. if (st->discard < AVDISCARD_ALL && ast && ast->sub_pkt.data) {
  697. ts = av_rescale_q(ast->sub_pkt.dts, st->time_base, AV_TIME_BASE_Q);
  698. if (ts <= next_ts && ts < ts_min) {
  699. ts_min = ts;
  700. sub_st = st;
  701. }
  702. }
  703. }
  704. if (sub_st) {
  705. ast = sub_st->priv_data;
  706. *pkt = ast->sub_pkt;
  707. pkt->stream_index = sub_st->index;
  708. if (av_read_packet(ast->sub_ctx, &ast->sub_pkt) < 0)
  709. ast->sub_pkt.data = NULL;
  710. }
  711. return sub_st;
  712. }
  713. static int get_stream_idx(int *d){
  714. if( d[0] >= '0' && d[0] <= '9'
  715. && d[1] >= '0' && d[1] <= '9'){
  716. return (d[0] - '0') * 10 + (d[1] - '0');
  717. }else{
  718. return 100; //invalid stream ID
  719. }
  720. }
  721. static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
  722. {
  723. AVIContext *avi = s->priv_data;
  724. ByteIOContext *pb = s->pb;
  725. int n, d[8];
  726. unsigned int size;
  727. int64_t i, sync;
  728. void* dstr;
  729. if (CONFIG_DV_DEMUXER && avi->dv_demux) {
  730. int size = dv_get_packet(avi->dv_demux, pkt);
  731. if (size >= 0)
  732. return size;
  733. }
  734. if(avi->non_interleaved){
  735. int best_stream_index = 0;
  736. AVStream *best_st= NULL;
  737. AVIStream *best_ast;
  738. int64_t best_ts= INT64_MAX;
  739. int i;
  740. for(i=0; i<s->nb_streams; i++){
  741. AVStream *st = s->streams[i];
  742. AVIStream *ast = st->priv_data;
  743. int64_t ts= ast->frame_offset;
  744. int64_t last_ts;
  745. if(!st->nb_index_entries)
  746. continue;
  747. last_ts = st->index_entries[st->nb_index_entries - 1].timestamp;
  748. if(!ast->remaining && ts > last_ts)
  749. continue;
  750. ts = av_rescale_q(ts, st->time_base, (AVRational){FFMAX(1, ast->sample_size), AV_TIME_BASE});
  751. // av_log(s, AV_LOG_DEBUG, "%"PRId64" %d/%d %"PRId64"\n", ts, st->time_base.num, st->time_base.den, ast->frame_offset);
  752. if(ts < best_ts){
  753. best_ts= ts;
  754. best_st= st;
  755. best_stream_index= i;
  756. }
  757. }
  758. if(!best_st)
  759. return -1;
  760. best_ast = best_st->priv_data;
  761. best_ts = av_rescale_q(best_ts, (AVRational){FFMAX(1, best_ast->sample_size), AV_TIME_BASE}, best_st->time_base);
  762. if(best_ast->remaining)
  763. i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY | AVSEEK_FLAG_BACKWARD);
  764. else{
  765. i= av_index_search_timestamp(best_st, best_ts, AVSEEK_FLAG_ANY);
  766. if(i>=0)
  767. best_ast->frame_offset= best_st->index_entries[i].timestamp;
  768. }
  769. // av_log(s, AV_LOG_DEBUG, "%d\n", i);
  770. if(i>=0){
  771. int64_t pos= best_st->index_entries[i].pos;
  772. pos += best_ast->packet_size - best_ast->remaining;
  773. url_fseek(s->pb, pos + 8, SEEK_SET);
  774. // av_log(s, AV_LOG_DEBUG, "pos=%"PRId64"\n", pos);
  775. assert(best_ast->remaining <= best_ast->packet_size);
  776. avi->stream_index= best_stream_index;
  777. if(!best_ast->remaining)
  778. best_ast->packet_size=
  779. best_ast->remaining= best_st->index_entries[i].size;
  780. }
  781. }
  782. resync:
  783. if(avi->stream_index >= 0){
  784. AVStream *st= s->streams[ avi->stream_index ];
  785. AVIStream *ast= st->priv_data;
  786. int size, err;
  787. if(get_subtitle_pkt(s, st, pkt))
  788. return 0;
  789. if(ast->sample_size <= 1) // minorityreport.AVI block_align=1024 sample_size=1 IMA-ADPCM
  790. size= INT_MAX;
  791. else if(ast->sample_size < 32)
  792. // arbitrary multiplier to avoid tiny packets for raw PCM data
  793. size= 1024*ast->sample_size;
  794. else
  795. size= ast->sample_size;
  796. if(size > ast->remaining)
  797. size= ast->remaining;
  798. avi->last_pkt_pos= url_ftell(pb);
  799. err= av_get_packet(pb, pkt, size);
  800. if(err<0)
  801. return err;
  802. if(ast->has_pal && pkt->data && pkt->size<(unsigned)INT_MAX/2){
  803. void *ptr= av_realloc(pkt->data, pkt->size + 4*256 + FF_INPUT_BUFFER_PADDING_SIZE);
  804. if(ptr){
  805. ast->has_pal=0;
  806. pkt->size += 4*256;
  807. pkt->data= ptr;
  808. memcpy(pkt->data + pkt->size - 4*256, ast->pal, 4*256);
  809. }else
  810. av_log(s, AV_LOG_ERROR, "Failed to append palette\n");
  811. }
  812. if (CONFIG_DV_DEMUXER && avi->dv_demux) {
  813. dstr = pkt->destruct;
  814. size = dv_produce_packet(avi->dv_demux, pkt,
  815. pkt->data, pkt->size);
  816. pkt->destruct = dstr;
  817. pkt->flags |= AV_PKT_FLAG_KEY;
  818. if (size < 0)
  819. av_free_packet(pkt);
  820. } else if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
  821. && !st->codec->codec_tag && read_gab2_sub(st, pkt)) {
  822. ast->frame_offset++;
  823. avi->stream_index = -1;
  824. ast->remaining = 0;
  825. goto resync;
  826. } else {
  827. /* XXX: How to handle B-frames in AVI? */
  828. pkt->dts = ast->frame_offset;
  829. // pkt->dts += ast->start;
  830. if(ast->sample_size)
  831. pkt->dts /= ast->sample_size;
  832. //av_log(s, AV_LOG_DEBUG, "dts:%"PRId64" offset:%"PRId64" %d/%d smpl_siz:%d base:%d st:%d size:%d\n", pkt->dts, ast->frame_offset, ast->scale, ast->rate, ast->sample_size, AV_TIME_BASE, avi->stream_index, size);
  833. pkt->stream_index = avi->stream_index;
  834. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  835. AVIndexEntry *e;
  836. int index;
  837. assert(st->index_entries);
  838. index= av_index_search_timestamp(st, ast->frame_offset, 0);
  839. e= &st->index_entries[index];
  840. if(index >= 0 && e->timestamp == ast->frame_offset){
  841. if (e->flags & AVINDEX_KEYFRAME)
  842. pkt->flags |= AV_PKT_FLAG_KEY;
  843. }
  844. } else {
  845. pkt->flags |= AV_PKT_FLAG_KEY;
  846. }
  847. ast->frame_offset += get_duration(ast, pkt->size);
  848. }
  849. ast->remaining -= size;
  850. if(!ast->remaining){
  851. avi->stream_index= -1;
  852. ast->packet_size= 0;
  853. }
  854. return size;
  855. }
  856. memset(d, -1, sizeof(int)*8);
  857. for(i=sync=url_ftell(pb); !url_feof(pb); i++) {
  858. int j;
  859. for(j=0; j<7; j++)
  860. d[j]= d[j+1];
  861. d[7]= get_byte(pb);
  862. size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24);
  863. n= get_stream_idx(d+2);
  864. //av_log(s, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %"PRId64" %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
  865. if(i + (uint64_t)size > avi->fsize || d[0]<0)
  866. continue;
  867. //parse ix##
  868. if( (d[0] == 'i' && d[1] == 'x' && n < s->nb_streams)
  869. //parse JUNK
  870. ||(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K')
  871. ||(d[0] == 'i' && d[1] == 'd' && d[2] == 'x' && d[3] == '1')){
  872. url_fskip(pb, size);
  873. //av_log(s, AV_LOG_DEBUG, "SKIP\n");
  874. goto resync;
  875. }
  876. //parse stray LIST
  877. if(d[0] == 'L' && d[1] == 'I' && d[2] == 'S' && d[3] == 'T'){
  878. url_fskip(pb, 4);
  879. goto resync;
  880. }
  881. n= get_stream_idx(d);
  882. if(!((i-avi->last_pkt_pos)&1) && get_stream_idx(d+1) < s->nb_streams)
  883. continue;
  884. //detect ##ix chunk and skip
  885. if(d[2] == 'i' && d[3] == 'x' && n < s->nb_streams){
  886. url_fskip(pb, size);
  887. goto resync;
  888. }
  889. //parse ##dc/##wb
  890. if(n < s->nb_streams){
  891. AVStream *st;
  892. AVIStream *ast;
  893. st = s->streams[n];
  894. ast = st->priv_data;
  895. if(s->nb_streams>=2){
  896. AVStream *st1 = s->streams[1];
  897. AVIStream *ast1= st1->priv_data;
  898. //workaround for broken small-file-bug402.avi
  899. if( d[2] == 'w' && d[3] == 'b'
  900. && n==0
  901. && st ->codec->codec_type == AVMEDIA_TYPE_VIDEO
  902. && st1->codec->codec_type == AVMEDIA_TYPE_AUDIO
  903. && ast->prefix == 'd'*256+'c'
  904. && (d[2]*256+d[3] == ast1->prefix || !ast1->prefix_count)
  905. ){
  906. n=1;
  907. st = st1;
  908. ast = ast1;
  909. av_log(s, AV_LOG_WARNING, "Invalid stream + prefix combination, assuming audio.\n");
  910. }
  911. }
  912. if( (st->discard >= AVDISCARD_DEFAULT && size==0)
  913. /*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & AV_PKT_FLAG_KEY))*/ //FIXME needs a little reordering
  914. || st->discard >= AVDISCARD_ALL){
  915. ast->frame_offset += get_duration(ast, size);
  916. url_fskip(pb, size);
  917. goto resync;
  918. }
  919. if (d[2] == 'p' && d[3] == 'c' && size<=4*256+4) {
  920. int k = get_byte(pb);
  921. int last = (k + get_byte(pb) - 1) & 0xFF;
  922. get_le16(pb); //flags
  923. for (; k <= last; k++)
  924. ast->pal[k] = get_be32(pb)>>8;// b + (g << 8) + (r << 16);
  925. ast->has_pal= 1;
  926. goto resync;
  927. } else if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) ||
  928. d[2]*256+d[3] == ast->prefix /*||
  929. (d[2] == 'd' && d[3] == 'c') ||
  930. (d[2] == 'w' && d[3] == 'b')*/) {
  931. //av_log(s, AV_LOG_DEBUG, "OK\n");
  932. if(d[2]*256+d[3] == ast->prefix)
  933. ast->prefix_count++;
  934. else{
  935. ast->prefix= d[2]*256+d[3];
  936. ast->prefix_count= 0;
  937. }
  938. avi->stream_index= n;
  939. ast->packet_size= size + 8;
  940. ast->remaining= size;
  941. if(size || !ast->sample_size){
  942. uint64_t pos= url_ftell(pb) - 8;
  943. if(!st->index_entries || !st->nb_index_entries || st->index_entries[st->nb_index_entries - 1].pos < pos){
  944. av_add_index_entry(st, pos, ast->frame_offset, size, 0, AVINDEX_KEYFRAME);
  945. }
  946. }
  947. goto resync;
  948. }
  949. }
  950. }
  951. return AVERROR_EOF;
  952. }
  953. /* XXX: We make the implicit supposition that the positions are sorted
  954. for each stream. */
  955. static int avi_read_idx1(AVFormatContext *s, int size)
  956. {
  957. AVIContext *avi = s->priv_data;
  958. ByteIOContext *pb = s->pb;
  959. int nb_index_entries, i;
  960. AVStream *st;
  961. AVIStream *ast;
  962. unsigned int index, tag, flags, pos, len;
  963. unsigned last_pos= -1;
  964. nb_index_entries = size / 16;
  965. if (nb_index_entries <= 0)
  966. return -1;
  967. /* Read the entries and sort them in each stream component. */
  968. for(i = 0; i < nb_index_entries; i++) {
  969. tag = get_le32(pb);
  970. flags = get_le32(pb);
  971. pos = get_le32(pb);
  972. len = get_le32(pb);
  973. #if defined(DEBUG_SEEK)
  974. av_log(s, AV_LOG_DEBUG, "%d: tag=0x%x flags=0x%x pos=0x%x len=%d/",
  975. i, tag, flags, pos, len);
  976. #endif
  977. if(i==0 && pos > avi->movi_list)
  978. avi->movi_list= 0; //FIXME better check
  979. pos += avi->movi_list;
  980. index = ((tag & 0xff) - '0') * 10;
  981. index += ((tag >> 8) & 0xff) - '0';
  982. if (index >= s->nb_streams)
  983. continue;
  984. st = s->streams[index];
  985. ast = st->priv_data;
  986. #if defined(DEBUG_SEEK)
  987. av_log(s, AV_LOG_DEBUG, "%d cum_len=%"PRId64"\n", len, ast->cum_len);
  988. #endif
  989. if(url_feof(pb))
  990. return -1;
  991. if(last_pos == pos)
  992. avi->non_interleaved= 1;
  993. else if(len || !ast->sample_size)
  994. av_add_index_entry(st, pos, ast->cum_len, len, 0, (flags&AVIIF_INDEX) ? AVINDEX_KEYFRAME : 0);
  995. ast->cum_len += get_duration(ast, len);
  996. last_pos= pos;
  997. }
  998. return 0;
  999. }
  1000. static int guess_ni_flag(AVFormatContext *s){
  1001. int i;
  1002. int64_t last_start=0;
  1003. int64_t first_end= INT64_MAX;
  1004. int64_t oldpos= url_ftell(s->pb);
  1005. for(i=0; i<s->nb_streams; i++){
  1006. AVStream *st = s->streams[i];
  1007. int n= st->nb_index_entries;
  1008. unsigned int size;
  1009. if(n <= 0)
  1010. continue;
  1011. if(n >= 2){
  1012. int64_t pos= st->index_entries[0].pos;
  1013. url_fseek(s->pb, pos + 4, SEEK_SET);
  1014. size= get_le32(s->pb);
  1015. if(pos + size > st->index_entries[1].pos)
  1016. last_start= INT64_MAX;
  1017. }
  1018. if(st->index_entries[0].pos > last_start)
  1019. last_start= st->index_entries[0].pos;
  1020. if(st->index_entries[n-1].pos < first_end)
  1021. first_end= st->index_entries[n-1].pos;
  1022. }
  1023. url_fseek(s->pb, oldpos, SEEK_SET);
  1024. return last_start > first_end;
  1025. }
  1026. static int avi_load_index(AVFormatContext *s)
  1027. {
  1028. AVIContext *avi = s->priv_data;
  1029. ByteIOContext *pb = s->pb;
  1030. uint32_t tag, size;
  1031. int64_t pos= url_ftell(pb);
  1032. int ret = -1;
  1033. if (url_fseek(pb, avi->movi_end, SEEK_SET) < 0)
  1034. goto the_end; // maybe truncated file
  1035. #ifdef DEBUG_SEEK
  1036. printf("movi_end=0x%"PRIx64"\n", avi->movi_end);
  1037. #endif
  1038. for(;;) {
  1039. if (url_feof(pb))
  1040. break;
  1041. tag = get_le32(pb);
  1042. size = get_le32(pb);
  1043. #ifdef DEBUG_SEEK
  1044. printf("tag=%c%c%c%c size=0x%x\n",
  1045. tag & 0xff,
  1046. (tag >> 8) & 0xff,
  1047. (tag >> 16) & 0xff,
  1048. (tag >> 24) & 0xff,
  1049. size);
  1050. #endif
  1051. switch(tag) {
  1052. case MKTAG('i', 'd', 'x', '1'):
  1053. if (avi_read_idx1(s, size) < 0)
  1054. goto skip;
  1055. ret = 0;
  1056. goto the_end;
  1057. break;
  1058. default:
  1059. skip:
  1060. size += (size & 1);
  1061. if (url_fseek(pb, size, SEEK_CUR) < 0)
  1062. goto the_end; // something is wrong here
  1063. break;
  1064. }
  1065. }
  1066. the_end:
  1067. url_fseek(pb, pos, SEEK_SET);
  1068. return ret;
  1069. }
  1070. static void seek_subtitle(AVStream *st, AVStream *st2, int64_t timestamp)
  1071. {
  1072. AVIStream *ast2 = st2->priv_data;
  1073. int64_t ts2 = av_rescale_q(timestamp, st->time_base, st2->time_base);
  1074. av_free_packet(&ast2->sub_pkt);
  1075. if (avformat_seek_file(ast2->sub_ctx, 0, INT64_MIN, ts2, ts2, 0) >= 0 ||
  1076. avformat_seek_file(ast2->sub_ctx, 0, ts2, ts2, INT64_MAX, 0) >= 0)
  1077. av_read_packet(ast2->sub_ctx, &ast2->sub_pkt);
  1078. }
  1079. static int avi_read_seek(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
  1080. {
  1081. AVIContext *avi = s->priv_data;
  1082. AVStream *st;
  1083. int i, index;
  1084. int64_t pos;
  1085. AVIStream *ast;
  1086. if (!avi->index_loaded) {
  1087. /* we only load the index on demand */
  1088. avi_load_index(s);
  1089. avi->index_loaded = 1;
  1090. }
  1091. assert(stream_index>= 0);
  1092. st = s->streams[stream_index];
  1093. ast= st->priv_data;
  1094. index= av_index_search_timestamp(st, timestamp * FFMAX(ast->sample_size, 1), flags);
  1095. if(index<0)
  1096. return -1;
  1097. /* find the position */
  1098. pos = st->index_entries[index].pos;
  1099. timestamp = st->index_entries[index].timestamp / FFMAX(ast->sample_size, 1);
  1100. // av_log(s, AV_LOG_DEBUG, "XX %"PRId64" %d %"PRId64"\n", timestamp, index, st->index_entries[index].timestamp);
  1101. if (CONFIG_DV_DEMUXER && avi->dv_demux) {
  1102. /* One and only one real stream for DV in AVI, and it has video */
  1103. /* offsets. Calling with other stream indexes should have failed */
  1104. /* the av_index_search_timestamp call above. */
  1105. assert(stream_index == 0);
  1106. /* Feed the DV video stream version of the timestamp to the */
  1107. /* DV demux so it can synthesize correct timestamps. */
  1108. dv_offset_reset(avi->dv_demux, timestamp);
  1109. url_fseek(s->pb, pos, SEEK_SET);
  1110. avi->stream_index= -1;
  1111. return 0;
  1112. }
  1113. for(i = 0; i < s->nb_streams; i++) {
  1114. AVStream *st2 = s->streams[i];
  1115. AVIStream *ast2 = st2->priv_data;
  1116. ast2->packet_size=
  1117. ast2->remaining= 0;
  1118. if (ast2->sub_ctx) {
  1119. seek_subtitle(st, st2, timestamp);
  1120. continue;
  1121. }
  1122. if (st2->nb_index_entries <= 0)
  1123. continue;
  1124. // assert(st2->codec->block_align);
  1125. assert((int64_t)st2->time_base.num*ast2->rate == (int64_t)st2->time_base.den*ast2->scale);
  1126. index = av_index_search_timestamp(
  1127. st2,
  1128. av_rescale_q(timestamp, st->time_base, st2->time_base) * FFMAX(ast2->sample_size, 1),
  1129. flags | AVSEEK_FLAG_BACKWARD);
  1130. if(index<0)
  1131. index=0;
  1132. if(!avi->non_interleaved){
  1133. while(index>0 && st2->index_entries[index].pos > pos)
  1134. index--;
  1135. while(index+1 < st2->nb_index_entries && st2->index_entries[index].pos < pos)
  1136. index++;
  1137. }
  1138. // av_log(s, AV_LOG_DEBUG, "%"PRId64" %d %"PRId64"\n", timestamp, index, st2->index_entries[index].timestamp);
  1139. /* extract the current frame number */
  1140. ast2->frame_offset = st2->index_entries[index].timestamp;
  1141. }
  1142. /* do the seek */
  1143. url_fseek(s->pb, pos, SEEK_SET);
  1144. avi->stream_index= -1;
  1145. return 0;
  1146. }
  1147. static int avi_read_close(AVFormatContext *s)
  1148. {
  1149. int i;
  1150. AVIContext *avi = s->priv_data;
  1151. for(i=0;i<s->nb_streams;i++) {
  1152. AVStream *st = s->streams[i];
  1153. AVIStream *ast = st->priv_data;
  1154. av_free(st->codec->palctrl);
  1155. if (ast) {
  1156. if (ast->sub_ctx) {
  1157. av_freep(&ast->sub_ctx->pb);
  1158. av_close_input_stream(ast->sub_ctx);
  1159. }
  1160. av_free(ast->sub_buffer);
  1161. av_free_packet(&ast->sub_pkt);
  1162. }
  1163. }
  1164. if (avi->dv_demux)
  1165. av_free(avi->dv_demux);
  1166. return 0;
  1167. }
  1168. static int avi_probe(AVProbeData *p)
  1169. {
  1170. int i;
  1171. /* check file header */
  1172. for(i=0; avi_headers[i][0]; i++)
  1173. if(!memcmp(p->buf , avi_headers[i] , 4) &&
  1174. !memcmp(p->buf+8, avi_headers[i]+4, 4))
  1175. return AVPROBE_SCORE_MAX;
  1176. return 0;
  1177. }
  1178. AVInputFormat avi_demuxer = {
  1179. "avi",
  1180. NULL_IF_CONFIG_SMALL("AVI format"),
  1181. sizeof(AVIContext),
  1182. avi_probe,
  1183. avi_read_header,
  1184. avi_read_packet,
  1185. avi_read_close,
  1186. avi_read_seek,
  1187. };