You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2354 lines
78KB

  1. /*
  2. * MOV demuxer
  3. * Copyright (c) 2001 Fabrice Bellard
  4. * Copyright (c) 2009 Baptiste Coudurier <baptiste dot coudurier at gmail dot com>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <limits.h>
  23. //#define DEBUG
  24. //#define DEBUG_METADATA
  25. //#define MOV_EXPORT_ALL_METADATA
  26. #include "libavutil/intreadwrite.h"
  27. #include "libavutil/avstring.h"
  28. #include "avformat.h"
  29. #include "riff.h"
  30. #include "isom.h"
  31. #include "libavcodec/mpeg4audio.h"
  32. #include "libavcodec/mpegaudiodata.h"
  33. #include "libavcodec/get_bits.h"
  34. #if CONFIG_ZLIB
  35. #include <zlib.h>
  36. #endif
  37. /*
  38. * First version by Francois Revol revol@free.fr
  39. * Seek function by Gael Chardon gael.dev@4now.net
  40. *
  41. * Features and limitations:
  42. * - reads most of the QT files I have (at least the structure),
  43. * Sample QuickTime files with mp3 audio can be found at: http://www.3ivx.com/showcase.html
  44. * - the code is quite ugly... maybe I won't do it recursive next time :-)
  45. *
  46. * Funny I didn't know about http://sourceforge.net/projects/qt-ffmpeg/
  47. * when coding this :) (it's a writer anyway)
  48. *
  49. * Reference documents:
  50. * http://www.geocities.com/xhelmboyx/quicktime/formats/qtm-layout.txt
  51. * Apple:
  52. * http://developer.apple.com/documentation/QuickTime/QTFF/
  53. * http://developer.apple.com/documentation/QuickTime/QTFF/qtff.pdf
  54. * QuickTime is a trademark of Apple (AFAIK :))
  55. */
  56. #include "qtpalette.h"
  57. #undef NDEBUG
  58. #include <assert.h>
  59. /* XXX: it's the first time I make a recursive parser I think... sorry if it's ugly :P */
  60. /* those functions parse an atom */
  61. /* return code:
  62. 0: continue to parse next atom
  63. <0: error occurred, exit
  64. */
  65. /* links atom IDs to parse functions */
  66. typedef struct MOVParseTableEntry {
  67. uint32_t type;
  68. int (*parse)(MOVContext *ctx, ByteIOContext *pb, MOVAtom atom);
  69. } MOVParseTableEntry;
  70. static const MOVParseTableEntry mov_default_parse_table[];
  71. static int mov_metadata_trkn(MOVContext *c, ByteIOContext *pb, unsigned len)
  72. {
  73. char buf[16];
  74. get_be16(pb); // unknown
  75. snprintf(buf, sizeof(buf), "%d", get_be16(pb));
  76. av_metadata_set(&c->fc->metadata, "track", buf);
  77. get_be16(pb); // total tracks
  78. return 0;
  79. }
  80. static int mov_read_udta_string(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  81. {
  82. #ifdef MOV_EXPORT_ALL_METADATA
  83. char tmp_key[5];
  84. #endif
  85. char str[1024], key2[16], language[4] = {0};
  86. const char *key = NULL;
  87. uint16_t str_size;
  88. int (*parse)(MOVContext*, ByteIOContext*, unsigned) = NULL;
  89. switch (atom.type) {
  90. case MKTAG(0xa9,'n','a','m'): key = "title"; break;
  91. case MKTAG(0xa9,'a','u','t'):
  92. case MKTAG(0xa9,'A','R','T'): key = "author"; break;
  93. case MKTAG(0xa9,'w','r','t'): key = "composer"; break;
  94. case MKTAG( 'c','p','r','t'):
  95. case MKTAG(0xa9,'c','p','y'): key = "copyright"; break;
  96. case MKTAG(0xa9,'c','m','t'):
  97. case MKTAG(0xa9,'i','n','f'): key = "comment"; break;
  98. case MKTAG(0xa9,'a','l','b'): key = "album"; break;
  99. case MKTAG(0xa9,'d','a','y'): key = "year"; break;
  100. case MKTAG(0xa9,'g','e','n'): key = "genre"; break;
  101. case MKTAG(0xa9,'t','o','o'):
  102. case MKTAG(0xa9,'e','n','c'): key = "encoder"; break;
  103. case MKTAG( 'd','e','s','c'): key = "description";break;
  104. case MKTAG( 'l','d','e','s'): key = "synopsis"; break;
  105. case MKTAG( 't','v','s','h'): key = "show"; break;
  106. case MKTAG( 't','v','e','n'): key = "episode_id";break;
  107. case MKTAG( 't','v','n','n'): key = "network"; break;
  108. case MKTAG( 't','r','k','n'): key = "track";
  109. parse = mov_metadata_trkn; break;
  110. }
  111. if (c->itunes_metadata && atom.size > 8) {
  112. int data_size = get_be32(pb);
  113. int tag = get_le32(pb);
  114. if (tag == MKTAG('d','a','t','a')) {
  115. get_be32(pb); // type
  116. get_be32(pb); // unknown
  117. str_size = data_size - 16;
  118. atom.size -= 16;
  119. } else return 0;
  120. } else if (atom.size > 4 && key && !c->itunes_metadata) {
  121. str_size = get_be16(pb); // string length
  122. ff_mov_lang_to_iso639(get_be16(pb), language);
  123. atom.size -= 4;
  124. } else
  125. str_size = atom.size;
  126. #ifdef MOV_EXPORT_ALL_METADATA
  127. if (!key) {
  128. snprintf(tmp_key, 5, "%.4s", (char*)&atom.type);
  129. key = tmp_key;
  130. }
  131. #endif
  132. if (!key)
  133. return 0;
  134. if (atom.size < 0)
  135. return -1;
  136. str_size = FFMIN3(sizeof(str)-1, str_size, atom.size);
  137. if (parse)
  138. parse(c, pb, str_size);
  139. else {
  140. get_buffer(pb, str, str_size);
  141. str[str_size] = 0;
  142. av_metadata_set(&c->fc->metadata, key, str);
  143. if (*language && strcmp(language, "und")) {
  144. snprintf(key2, sizeof(key2), "%s-%s", key, language);
  145. av_metadata_set(&c->fc->metadata, key2, str);
  146. }
  147. }
  148. #ifdef DEBUG_METADATA
  149. av_log(c->fc, AV_LOG_DEBUG, "lang \"%3s\" ", language);
  150. av_log(c->fc, AV_LOG_DEBUG, "tag \"%s\" value \"%s\" atom \"%.4s\" %d %lld\n",
  151. key, str, (char*)&atom.type, str_size, atom.size);
  152. #endif
  153. return 0;
  154. }
  155. static int mov_read_default(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  156. {
  157. int64_t total_size = 0;
  158. MOVAtom a;
  159. int i;
  160. if (atom.size < 0)
  161. atom.size = INT64_MAX;
  162. while (total_size + 8 < atom.size && !url_feof(pb)) {
  163. int (*parse)(MOVContext*, ByteIOContext*, MOVAtom) = NULL;
  164. a.size = atom.size;
  165. a.type=0;
  166. if(atom.size >= 8) {
  167. a.size = get_be32(pb);
  168. a.type = get_le32(pb);
  169. }
  170. total_size += 8;
  171. dprintf(c->fc, "type: %08x %.4s sz: %"PRIx64" %"PRIx64" %"PRIx64"\n",
  172. a.type, (char*)&a.type, a.size, atom.size, total_size);
  173. if (a.size == 1) { /* 64 bit extended size */
  174. a.size = get_be64(pb) - 8;
  175. total_size += 8;
  176. }
  177. if (a.size == 0) {
  178. a.size = atom.size - total_size;
  179. if (a.size <= 8)
  180. break;
  181. }
  182. a.size -= 8;
  183. if(a.size < 0)
  184. break;
  185. a.size = FFMIN(a.size, atom.size - total_size);
  186. for (i = 0; mov_default_parse_table[i].type; i++)
  187. if (mov_default_parse_table[i].type == a.type) {
  188. parse = mov_default_parse_table[i].parse;
  189. break;
  190. }
  191. // container is user data
  192. if (!parse && (atom.type == MKTAG('u','d','t','a') ||
  193. atom.type == MKTAG('i','l','s','t')))
  194. parse = mov_read_udta_string;
  195. if (!parse) { /* skip leaf atoms data */
  196. url_fskip(pb, a.size);
  197. } else {
  198. int64_t start_pos = url_ftell(pb);
  199. int64_t left;
  200. int err = parse(c, pb, a);
  201. if (err < 0)
  202. return err;
  203. if (url_is_streamed(pb) && c->found_moov && c->found_mdat)
  204. break;
  205. left = a.size - url_ftell(pb) + start_pos;
  206. if (left > 0) /* skip garbage at atom end */
  207. url_fskip(pb, left);
  208. }
  209. total_size += a.size;
  210. }
  211. if (total_size < atom.size && atom.size < 0x7ffff)
  212. url_fskip(pb, atom.size - total_size);
  213. return 0;
  214. }
  215. static int mov_read_dref(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  216. {
  217. AVStream *st;
  218. MOVStreamContext *sc;
  219. int entries, i, j;
  220. if (c->fc->nb_streams < 1)
  221. return 0;
  222. st = c->fc->streams[c->fc->nb_streams-1];
  223. sc = st->priv_data;
  224. get_be32(pb); // version + flags
  225. entries = get_be32(pb);
  226. if (entries >= UINT_MAX / sizeof(*sc->drefs))
  227. return -1;
  228. sc->drefs = av_mallocz(entries * sizeof(*sc->drefs));
  229. if (!sc->drefs)
  230. return AVERROR(ENOMEM);
  231. sc->drefs_count = entries;
  232. for (i = 0; i < sc->drefs_count; i++) {
  233. MOVDref *dref = &sc->drefs[i];
  234. uint32_t size = get_be32(pb);
  235. int64_t next = url_ftell(pb) + size - 4;
  236. dref->type = get_le32(pb);
  237. get_be32(pb); // version + flags
  238. dprintf(c->fc, "type %.4s size %d\n", (char*)&dref->type, size);
  239. if (dref->type == MKTAG('a','l','i','s') && size > 150) {
  240. /* macintosh alias record */
  241. uint16_t volume_len, len;
  242. int16_t type;
  243. url_fskip(pb, 10);
  244. volume_len = get_byte(pb);
  245. volume_len = FFMIN(volume_len, 27);
  246. get_buffer(pb, dref->volume, 27);
  247. dref->volume[volume_len] = 0;
  248. av_log(c->fc, AV_LOG_DEBUG, "volume %s, len %d\n", dref->volume, volume_len);
  249. url_fskip(pb, 12);
  250. len = get_byte(pb);
  251. len = FFMIN(len, 63);
  252. get_buffer(pb, dref->filename, 63);
  253. dref->filename[len] = 0;
  254. av_log(c->fc, AV_LOG_DEBUG, "filename %s, len %d\n", dref->filename, len);
  255. url_fskip(pb, 16);
  256. /* read next level up_from_alias/down_to_target */
  257. dref->nlvl_from = get_be16(pb);
  258. dref->nlvl_to = get_be16(pb);
  259. av_log(c->fc, AV_LOG_DEBUG, "nlvl from %d, nlvl to %d\n",
  260. dref->nlvl_from, dref->nlvl_to);
  261. url_fskip(pb, 16);
  262. for (type = 0; type != -1 && url_ftell(pb) < next; ) {
  263. type = get_be16(pb);
  264. len = get_be16(pb);
  265. av_log(c->fc, AV_LOG_DEBUG, "type %d, len %d\n", type, len);
  266. if (len&1)
  267. len += 1;
  268. if (type == 2) { // absolute path
  269. av_free(dref->path);
  270. dref->path = av_mallocz(len+1);
  271. if (!dref->path)
  272. return AVERROR(ENOMEM);
  273. get_buffer(pb, dref->path, len);
  274. if (len > volume_len && !strncmp(dref->path, dref->volume, volume_len)) {
  275. len -= volume_len;
  276. memmove(dref->path, dref->path+volume_len, len);
  277. dref->path[len] = 0;
  278. }
  279. for (j = 0; j < len; j++)
  280. if (dref->path[j] == ':')
  281. dref->path[j] = '/';
  282. av_log(c->fc, AV_LOG_DEBUG, "path %s\n", dref->path);
  283. } else if (type == 0) { // directory name
  284. av_free(dref->dir);
  285. dref->dir = av_malloc(len+1);
  286. if (!dref->dir)
  287. return AVERROR(ENOMEM);
  288. get_buffer(pb, dref->dir, len);
  289. dref->dir[len] = 0;
  290. for (j = 0; j < len; j++)
  291. if (dref->dir[j] == ':')
  292. dref->dir[j] = '/';
  293. av_log(c->fc, AV_LOG_DEBUG, "dir %s\n", dref->dir);
  294. } else
  295. url_fskip(pb, len);
  296. }
  297. }
  298. url_fseek(pb, next, SEEK_SET);
  299. }
  300. return 0;
  301. }
  302. static int mov_read_hdlr(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  303. {
  304. AVStream *st;
  305. uint32_t type;
  306. uint32_t ctype;
  307. if (c->fc->nb_streams < 1) // meta before first trak
  308. return 0;
  309. st = c->fc->streams[c->fc->nb_streams-1];
  310. get_byte(pb); /* version */
  311. get_be24(pb); /* flags */
  312. /* component type */
  313. ctype = get_le32(pb);
  314. type = get_le32(pb); /* component subtype */
  315. dprintf(c->fc, "ctype= %.4s (0x%08x)\n", (char*)&ctype, ctype);
  316. dprintf(c->fc, "stype= %.4s\n", (char*)&type);
  317. if (type == MKTAG('v','i','d','e'))
  318. st->codec->codec_type = CODEC_TYPE_VIDEO;
  319. else if(type == MKTAG('s','o','u','n'))
  320. st->codec->codec_type = CODEC_TYPE_AUDIO;
  321. else if(type == MKTAG('m','1','a',' '))
  322. st->codec->codec_id = CODEC_ID_MP2;
  323. else if(type == MKTAG('s','u','b','p'))
  324. st->codec->codec_type = CODEC_TYPE_SUBTITLE;
  325. get_be32(pb); /* component manufacture */
  326. get_be32(pb); /* component flags */
  327. get_be32(pb); /* component flags mask */
  328. return 0;
  329. }
  330. int ff_mp4_read_descr_len(ByteIOContext *pb)
  331. {
  332. int len = 0;
  333. int count = 4;
  334. while (count--) {
  335. int c = get_byte(pb);
  336. len = (len << 7) | (c & 0x7f);
  337. if (!(c & 0x80))
  338. break;
  339. }
  340. return len;
  341. }
  342. int mp4_read_descr(AVFormatContext *fc, ByteIOContext *pb, int *tag)
  343. {
  344. int len;
  345. *tag = get_byte(pb);
  346. len = ff_mp4_read_descr_len(pb);
  347. dprintf(fc, "MPEG4 description: tag=0x%02x len=%d\n", *tag, len);
  348. return len;
  349. }
  350. #define MP4ESDescrTag 0x03
  351. #define MP4DecConfigDescrTag 0x04
  352. #define MP4DecSpecificDescrTag 0x05
  353. static const AVCodecTag mp4_audio_types[] = {
  354. { CODEC_ID_MP3ON4, AOT_PS }, /* old mp3on4 draft */
  355. { CODEC_ID_MP3ON4, AOT_L1 }, /* layer 1 */
  356. { CODEC_ID_MP3ON4, AOT_L2 }, /* layer 2 */
  357. { CODEC_ID_MP3ON4, AOT_L3 }, /* layer 3 */
  358. { CODEC_ID_MP4ALS, AOT_ALS }, /* MPEG-4 ALS */
  359. { CODEC_ID_NONE, AOT_NULL },
  360. };
  361. int ff_mov_read_esds(AVFormatContext *fc, ByteIOContext *pb, MOVAtom atom)
  362. {
  363. AVStream *st;
  364. int tag, len;
  365. if (fc->nb_streams < 1)
  366. return 0;
  367. st = fc->streams[fc->nb_streams-1];
  368. get_be32(pb); /* version + flags */
  369. len = mp4_read_descr(fc, pb, &tag);
  370. if (tag == MP4ESDescrTag) {
  371. get_be16(pb); /* ID */
  372. get_byte(pb); /* priority */
  373. } else
  374. get_be16(pb); /* ID */
  375. len = mp4_read_descr(fc, pb, &tag);
  376. if (tag == MP4DecConfigDescrTag) {
  377. int object_type_id = get_byte(pb);
  378. get_byte(pb); /* stream type */
  379. get_be24(pb); /* buffer size db */
  380. get_be32(pb); /* max bitrate */
  381. get_be32(pb); /* avg bitrate */
  382. st->codec->codec_id= ff_codec_get_id(ff_mp4_obj_type, object_type_id);
  383. dprintf(fc, "esds object type id 0x%02x\n", object_type_id);
  384. len = mp4_read_descr(fc, pb, &tag);
  385. if (tag == MP4DecSpecificDescrTag) {
  386. dprintf(fc, "Specific MPEG4 header len=%d\n", len);
  387. if((uint64_t)len > (1<<30))
  388. return -1;
  389. st->codec->extradata = av_mallocz(len + FF_INPUT_BUFFER_PADDING_SIZE);
  390. if (!st->codec->extradata)
  391. return AVERROR(ENOMEM);
  392. get_buffer(pb, st->codec->extradata, len);
  393. st->codec->extradata_size = len;
  394. if (st->codec->codec_id == CODEC_ID_AAC) {
  395. MPEG4AudioConfig cfg;
  396. ff_mpeg4audio_get_config(&cfg, st->codec->extradata,
  397. st->codec->extradata_size);
  398. st->codec->channels = cfg.channels;
  399. if (cfg.object_type == 29 && cfg.sampling_index < 3) // old mp3on4
  400. st->codec->sample_rate = ff_mpa_freq_tab[cfg.sampling_index];
  401. else
  402. st->codec->sample_rate = cfg.sample_rate; // ext sample rate ?
  403. dprintf(fc, "mp4a config channels %d obj %d ext obj %d "
  404. "sample rate %d ext sample rate %d\n", st->codec->channels,
  405. cfg.object_type, cfg.ext_object_type,
  406. cfg.sample_rate, cfg.ext_sample_rate);
  407. if (!(st->codec->codec_id = ff_codec_get_id(mp4_audio_types,
  408. cfg.object_type)))
  409. st->codec->codec_id = CODEC_ID_AAC;
  410. }
  411. }
  412. }
  413. return 0;
  414. }
  415. static int mov_read_esds(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  416. {
  417. return ff_mov_read_esds(c->fc, pb, atom);
  418. }
  419. static int mov_read_pasp(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  420. {
  421. const int num = get_be32(pb);
  422. const int den = get_be32(pb);
  423. AVStream *st;
  424. if (c->fc->nb_streams < 1)
  425. return 0;
  426. st = c->fc->streams[c->fc->nb_streams-1];
  427. if (den != 0) {
  428. if ((st->sample_aspect_ratio.den != 1 || st->sample_aspect_ratio.num) && // default
  429. (den != st->sample_aspect_ratio.den || num != st->sample_aspect_ratio.num))
  430. av_log(c->fc, AV_LOG_WARNING,
  431. "sample aspect ratio already set to %d:%d, overriding by 'pasp' atom\n",
  432. st->sample_aspect_ratio.num, st->sample_aspect_ratio.den);
  433. st->sample_aspect_ratio.num = num;
  434. st->sample_aspect_ratio.den = den;
  435. }
  436. return 0;
  437. }
  438. /* this atom contains actual media data */
  439. static int mov_read_mdat(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  440. {
  441. if(atom.size == 0) /* wrong one (MP4) */
  442. return 0;
  443. c->found_mdat=1;
  444. return 0; /* now go for moov */
  445. }
  446. /* read major brand, minor version and compatible brands and store them as metadata */
  447. static int mov_read_ftyp(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  448. {
  449. uint32_t minor_ver;
  450. int comp_brand_size;
  451. char minor_ver_str[11]; /* 32 bit integer -> 10 digits + null */
  452. char* comp_brands_str;
  453. uint8_t type[5] = {0};
  454. get_buffer(pb, type, 4);
  455. if (strcmp(type, "qt "))
  456. c->isom = 1;
  457. av_log(c->fc, AV_LOG_DEBUG, "ISO: File Type Major Brand: %.4s\n",(char *)&type);
  458. av_metadata_set(&c->fc->metadata, "major_brand", type);
  459. minor_ver = get_be32(pb); /* minor version */
  460. snprintf(minor_ver_str, sizeof(minor_ver_str), "%d", minor_ver);
  461. av_metadata_set(&c->fc->metadata, "minor_version", minor_ver_str);
  462. comp_brand_size = atom.size - 8;
  463. if (comp_brand_size < 0)
  464. return -1;
  465. comp_brands_str = av_malloc(comp_brand_size + 1); /* Add null terminator */
  466. if (!comp_brands_str)
  467. return AVERROR(ENOMEM);
  468. get_buffer(pb, comp_brands_str, comp_brand_size);
  469. comp_brands_str[comp_brand_size] = 0;
  470. av_metadata_set(&c->fc->metadata, "compatible_brands", comp_brands_str);
  471. av_freep(&comp_brands_str);
  472. return 0;
  473. }
  474. /* this atom should contain all header atoms */
  475. static int mov_read_moov(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  476. {
  477. if (mov_read_default(c, pb, atom) < 0)
  478. return -1;
  479. /* we parsed the 'moov' atom, we can terminate the parsing as soon as we find the 'mdat' */
  480. /* so we don't parse the whole file if over a network */
  481. c->found_moov=1;
  482. return 0; /* now go for mdat */
  483. }
  484. static int mov_read_moof(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  485. {
  486. c->fragment.moof_offset = url_ftell(pb) - 8;
  487. dprintf(c->fc, "moof offset %llx\n", c->fragment.moof_offset);
  488. return mov_read_default(c, pb, atom);
  489. }
  490. static int mov_read_mdhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  491. {
  492. AVStream *st;
  493. MOVStreamContext *sc;
  494. int version;
  495. char language[4] = {0};
  496. unsigned lang;
  497. if (c->fc->nb_streams < 1)
  498. return 0;
  499. st = c->fc->streams[c->fc->nb_streams-1];
  500. sc = st->priv_data;
  501. version = get_byte(pb);
  502. if (version > 1)
  503. return -1; /* unsupported */
  504. get_be24(pb); /* flags */
  505. if (version == 1) {
  506. get_be64(pb);
  507. get_be64(pb);
  508. } else {
  509. get_be32(pb); /* creation time */
  510. get_be32(pb); /* modification time */
  511. }
  512. sc->time_scale = get_be32(pb);
  513. st->duration = (version == 1) ? get_be64(pb) : get_be32(pb); /* duration */
  514. lang = get_be16(pb); /* language */
  515. if (ff_mov_lang_to_iso639(lang, language))
  516. av_metadata_set(&st->metadata, "language", language);
  517. get_be16(pb); /* quality */
  518. return 0;
  519. }
  520. static int mov_read_mvhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  521. {
  522. int version = get_byte(pb); /* version */
  523. get_be24(pb); /* flags */
  524. if (version == 1) {
  525. get_be64(pb);
  526. get_be64(pb);
  527. } else {
  528. get_be32(pb); /* creation time */
  529. get_be32(pb); /* modification time */
  530. }
  531. c->time_scale = get_be32(pb); /* time scale */
  532. dprintf(c->fc, "time scale = %i\n", c->time_scale);
  533. c->duration = (version == 1) ? get_be64(pb) : get_be32(pb); /* duration */
  534. get_be32(pb); /* preferred scale */
  535. get_be16(pb); /* preferred volume */
  536. url_fskip(pb, 10); /* reserved */
  537. url_fskip(pb, 36); /* display matrix */
  538. get_be32(pb); /* preview time */
  539. get_be32(pb); /* preview duration */
  540. get_be32(pb); /* poster time */
  541. get_be32(pb); /* selection time */
  542. get_be32(pb); /* selection duration */
  543. get_be32(pb); /* current time */
  544. get_be32(pb); /* next track ID */
  545. return 0;
  546. }
  547. static int mov_read_smi(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  548. {
  549. AVStream *st;
  550. if (c->fc->nb_streams < 1)
  551. return 0;
  552. st = c->fc->streams[c->fc->nb_streams-1];
  553. if((uint64_t)atom.size > (1<<30))
  554. return -1;
  555. // currently SVQ3 decoder expect full STSD header - so let's fake it
  556. // this should be fixed and just SMI header should be passed
  557. av_free(st->codec->extradata);
  558. st->codec->extradata = av_mallocz(atom.size + 0x5a + FF_INPUT_BUFFER_PADDING_SIZE);
  559. if (!st->codec->extradata)
  560. return AVERROR(ENOMEM);
  561. st->codec->extradata_size = 0x5a + atom.size;
  562. memcpy(st->codec->extradata, "SVQ3", 4); // fake
  563. get_buffer(pb, st->codec->extradata + 0x5a, atom.size);
  564. dprintf(c->fc, "Reading SMI %"PRId64" %s\n", atom.size, st->codec->extradata + 0x5a);
  565. return 0;
  566. }
  567. static int mov_read_enda(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  568. {
  569. AVStream *st;
  570. int little_endian;
  571. if (c->fc->nb_streams < 1)
  572. return 0;
  573. st = c->fc->streams[c->fc->nb_streams-1];
  574. little_endian = get_be16(pb);
  575. dprintf(c->fc, "enda %d\n", little_endian);
  576. if (little_endian == 1) {
  577. switch (st->codec->codec_id) {
  578. case CODEC_ID_PCM_S24BE:
  579. st->codec->codec_id = CODEC_ID_PCM_S24LE;
  580. break;
  581. case CODEC_ID_PCM_S32BE:
  582. st->codec->codec_id = CODEC_ID_PCM_S32LE;
  583. break;
  584. case CODEC_ID_PCM_F32BE:
  585. st->codec->codec_id = CODEC_ID_PCM_F32LE;
  586. break;
  587. case CODEC_ID_PCM_F64BE:
  588. st->codec->codec_id = CODEC_ID_PCM_F64LE;
  589. break;
  590. default:
  591. break;
  592. }
  593. }
  594. return 0;
  595. }
  596. /* FIXME modify qdm2/svq3/h264 decoders to take full atom as extradata */
  597. static int mov_read_extradata(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  598. {
  599. AVStream *st;
  600. uint64_t size;
  601. uint8_t *buf;
  602. if (c->fc->nb_streams < 1) // will happen with jp2 files
  603. return 0;
  604. st= c->fc->streams[c->fc->nb_streams-1];
  605. size= (uint64_t)st->codec->extradata_size + atom.size + 8 + FF_INPUT_BUFFER_PADDING_SIZE;
  606. if(size > INT_MAX || (uint64_t)atom.size > INT_MAX)
  607. return -1;
  608. buf= av_realloc(st->codec->extradata, size);
  609. if(!buf)
  610. return -1;
  611. st->codec->extradata= buf;
  612. buf+= st->codec->extradata_size;
  613. st->codec->extradata_size= size - FF_INPUT_BUFFER_PADDING_SIZE;
  614. AV_WB32( buf , atom.size + 8);
  615. AV_WL32( buf + 4, atom.type);
  616. get_buffer(pb, buf + 8, atom.size);
  617. return 0;
  618. }
  619. static int mov_read_wave(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  620. {
  621. AVStream *st;
  622. if (c->fc->nb_streams < 1)
  623. return 0;
  624. st = c->fc->streams[c->fc->nb_streams-1];
  625. if((uint64_t)atom.size > (1<<30))
  626. return -1;
  627. if (st->codec->codec_id == CODEC_ID_QDM2) {
  628. // pass all frma atom to codec, needed at least for QDM2
  629. av_free(st->codec->extradata);
  630. st->codec->extradata = av_mallocz(atom.size + FF_INPUT_BUFFER_PADDING_SIZE);
  631. if (!st->codec->extradata)
  632. return AVERROR(ENOMEM);
  633. st->codec->extradata_size = atom.size;
  634. get_buffer(pb, st->codec->extradata, atom.size);
  635. } else if (atom.size > 8) { /* to read frma, esds atoms */
  636. if (mov_read_default(c, pb, atom) < 0)
  637. return -1;
  638. } else
  639. url_fskip(pb, atom.size);
  640. return 0;
  641. }
  642. /**
  643. * This function reads atom content and puts data in extradata without tag
  644. * nor size unlike mov_read_extradata.
  645. */
  646. static int mov_read_glbl(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  647. {
  648. AVStream *st;
  649. if (c->fc->nb_streams < 1)
  650. return 0;
  651. st = c->fc->streams[c->fc->nb_streams-1];
  652. if((uint64_t)atom.size > (1<<30))
  653. return -1;
  654. av_free(st->codec->extradata);
  655. st->codec->extradata = av_mallocz(atom.size + FF_INPUT_BUFFER_PADDING_SIZE);
  656. if (!st->codec->extradata)
  657. return AVERROR(ENOMEM);
  658. st->codec->extradata_size = atom.size;
  659. get_buffer(pb, st->codec->extradata, atom.size);
  660. return 0;
  661. }
  662. static int mov_read_stco(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  663. {
  664. AVStream *st;
  665. MOVStreamContext *sc;
  666. unsigned int i, entries;
  667. if (c->fc->nb_streams < 1)
  668. return 0;
  669. st = c->fc->streams[c->fc->nb_streams-1];
  670. sc = st->priv_data;
  671. get_byte(pb); /* version */
  672. get_be24(pb); /* flags */
  673. entries = get_be32(pb);
  674. if(entries >= UINT_MAX/sizeof(int64_t))
  675. return -1;
  676. sc->chunk_offsets = av_malloc(entries * sizeof(int64_t));
  677. if (!sc->chunk_offsets)
  678. return AVERROR(ENOMEM);
  679. sc->chunk_count = entries;
  680. if (atom.type == MKTAG('s','t','c','o'))
  681. for(i=0; i<entries; i++)
  682. sc->chunk_offsets[i] = get_be32(pb);
  683. else if (atom.type == MKTAG('c','o','6','4'))
  684. for(i=0; i<entries; i++)
  685. sc->chunk_offsets[i] = get_be64(pb);
  686. else
  687. return -1;
  688. return 0;
  689. }
  690. /**
  691. * Compute codec id for 'lpcm' tag.
  692. * See CoreAudioTypes and AudioStreamBasicDescription at Apple.
  693. */
  694. enum CodecID ff_mov_get_lpcm_codec_id(int bps, int flags)
  695. {
  696. if (flags & 1) { // floating point
  697. if (flags & 2) { // big endian
  698. if (bps == 32) return CODEC_ID_PCM_F32BE;
  699. else if (bps == 64) return CODEC_ID_PCM_F64BE;
  700. } else {
  701. if (bps == 32) return CODEC_ID_PCM_F32LE;
  702. else if (bps == 64) return CODEC_ID_PCM_F64LE;
  703. }
  704. } else {
  705. if (flags & 2) {
  706. if (bps == 8)
  707. // signed integer
  708. if (flags & 4) return CODEC_ID_PCM_S8;
  709. else return CODEC_ID_PCM_U8;
  710. else if (bps == 16) return CODEC_ID_PCM_S16BE;
  711. else if (bps == 24) return CODEC_ID_PCM_S24BE;
  712. else if (bps == 32) return CODEC_ID_PCM_S32BE;
  713. } else {
  714. if (bps == 8)
  715. if (flags & 4) return CODEC_ID_PCM_S8;
  716. else return CODEC_ID_PCM_U8;
  717. else if (bps == 16) return CODEC_ID_PCM_S16LE;
  718. else if (bps == 24) return CODEC_ID_PCM_S24LE;
  719. else if (bps == 32) return CODEC_ID_PCM_S32LE;
  720. }
  721. }
  722. return CODEC_ID_NONE;
  723. }
  724. static int mov_read_stsd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  725. {
  726. AVStream *st;
  727. MOVStreamContext *sc;
  728. int j, entries, pseudo_stream_id;
  729. if (c->fc->nb_streams < 1)
  730. return 0;
  731. st = c->fc->streams[c->fc->nb_streams-1];
  732. sc = st->priv_data;
  733. get_byte(pb); /* version */
  734. get_be24(pb); /* flags */
  735. entries = get_be32(pb);
  736. for(pseudo_stream_id=0; pseudo_stream_id<entries; pseudo_stream_id++) {
  737. //Parsing Sample description table
  738. enum CodecID id;
  739. int dref_id = 1;
  740. MOVAtom a = { 0 };
  741. int64_t start_pos = url_ftell(pb);
  742. int size = get_be32(pb); /* size */
  743. uint32_t format = get_le32(pb); /* data format */
  744. if (size >= 16) {
  745. get_be32(pb); /* reserved */
  746. get_be16(pb); /* reserved */
  747. dref_id = get_be16(pb);
  748. }
  749. if (st->codec->codec_tag &&
  750. st->codec->codec_tag != format &&
  751. (c->fc->video_codec_id ? ff_codec_get_id(codec_movvideo_tags, format) != c->fc->video_codec_id
  752. : st->codec->codec_tag != MKTAG('j','p','e','g'))
  753. ){
  754. /* Multiple fourcc, we skip JPEG. This is not correct, we should
  755. * export it as a separate AVStream but this needs a few changes
  756. * in the MOV demuxer, patch welcome. */
  757. av_log(c->fc, AV_LOG_WARNING, "multiple fourcc not supported\n");
  758. url_fskip(pb, size - (url_ftell(pb) - start_pos));
  759. continue;
  760. }
  761. sc->pseudo_stream_id = st->codec->codec_tag ? -1 : pseudo_stream_id;
  762. sc->dref_id= dref_id;
  763. st->codec->codec_tag = format;
  764. id = ff_codec_get_id(codec_movaudio_tags, format);
  765. if (id<=0 && ((format&0xFFFF) == 'm'+('s'<<8) || (format&0xFFFF) == 'T'+('S'<<8)))
  766. id = ff_codec_get_id(ff_codec_wav_tags, bswap_32(format)&0xFFFF);
  767. if (st->codec->codec_type != CODEC_TYPE_VIDEO && id > 0) {
  768. st->codec->codec_type = CODEC_TYPE_AUDIO;
  769. } else if (st->codec->codec_type != CODEC_TYPE_AUDIO && /* do not overwrite codec type */
  770. format && format != MKTAG('m','p','4','s')) { /* skip old asf mpeg4 tag */
  771. id = ff_codec_get_id(codec_movvideo_tags, format);
  772. if (id <= 0)
  773. id = ff_codec_get_id(ff_codec_bmp_tags, format);
  774. if (id > 0)
  775. st->codec->codec_type = CODEC_TYPE_VIDEO;
  776. else if(st->codec->codec_type == CODEC_TYPE_DATA){
  777. id = ff_codec_get_id(ff_codec_movsubtitle_tags, format);
  778. if(id > 0)
  779. st->codec->codec_type = CODEC_TYPE_SUBTITLE;
  780. }
  781. }
  782. dprintf(c->fc, "size=%d 4CC= %c%c%c%c codec_type=%d\n", size,
  783. (format >> 0) & 0xff, (format >> 8) & 0xff, (format >> 16) & 0xff,
  784. (format >> 24) & 0xff, st->codec->codec_type);
  785. if(st->codec->codec_type==CODEC_TYPE_VIDEO) {
  786. uint8_t codec_name[32];
  787. unsigned int color_depth;
  788. int color_greyscale;
  789. st->codec->codec_id = id;
  790. get_be16(pb); /* version */
  791. get_be16(pb); /* revision level */
  792. get_be32(pb); /* vendor */
  793. get_be32(pb); /* temporal quality */
  794. get_be32(pb); /* spatial quality */
  795. st->codec->width = get_be16(pb); /* width */
  796. st->codec->height = get_be16(pb); /* height */
  797. get_be32(pb); /* horiz resolution */
  798. get_be32(pb); /* vert resolution */
  799. get_be32(pb); /* data size, always 0 */
  800. get_be16(pb); /* frames per samples */
  801. get_buffer(pb, codec_name, 32); /* codec name, pascal string */
  802. if (codec_name[0] <= 31) {
  803. int i;
  804. int pos = 0;
  805. for (i = 0; i < codec_name[0] && pos < sizeof(st->codec->codec_name) - 3; i++) {
  806. uint8_t tmp;
  807. PUT_UTF8(codec_name[i+1], tmp, st->codec->codec_name[pos++] = tmp;)
  808. }
  809. st->codec->codec_name[pos] = 0;
  810. }
  811. st->codec->bits_per_coded_sample = get_be16(pb); /* depth */
  812. st->codec->color_table_id = get_be16(pb); /* colortable id */
  813. dprintf(c->fc, "depth %d, ctab id %d\n",
  814. st->codec->bits_per_coded_sample, st->codec->color_table_id);
  815. /* figure out the palette situation */
  816. color_depth = st->codec->bits_per_coded_sample & 0x1F;
  817. color_greyscale = st->codec->bits_per_coded_sample & 0x20;
  818. /* if the depth is 2, 4, or 8 bpp, file is palettized */
  819. if ((color_depth == 2) || (color_depth == 4) ||
  820. (color_depth == 8)) {
  821. /* for palette traversal */
  822. unsigned int color_start, color_count, color_end;
  823. unsigned char r, g, b;
  824. st->codec->palctrl = av_malloc(sizeof(*st->codec->palctrl));
  825. if (color_greyscale) {
  826. int color_index, color_dec;
  827. /* compute the greyscale palette */
  828. st->codec->bits_per_coded_sample = color_depth;
  829. color_count = 1 << color_depth;
  830. color_index = 255;
  831. color_dec = 256 / (color_count - 1);
  832. for (j = 0; j < color_count; j++) {
  833. r = g = b = color_index;
  834. st->codec->palctrl->palette[j] =
  835. (r << 16) | (g << 8) | (b);
  836. color_index -= color_dec;
  837. if (color_index < 0)
  838. color_index = 0;
  839. }
  840. } else if (st->codec->color_table_id) {
  841. const uint8_t *color_table;
  842. /* if flag bit 3 is set, use the default palette */
  843. color_count = 1 << color_depth;
  844. if (color_depth == 2)
  845. color_table = ff_qt_default_palette_4;
  846. else if (color_depth == 4)
  847. color_table = ff_qt_default_palette_16;
  848. else
  849. color_table = ff_qt_default_palette_256;
  850. for (j = 0; j < color_count; j++) {
  851. r = color_table[j * 3 + 0];
  852. g = color_table[j * 3 + 1];
  853. b = color_table[j * 3 + 2];
  854. st->codec->palctrl->palette[j] =
  855. (r << 16) | (g << 8) | (b);
  856. }
  857. } else {
  858. /* load the palette from the file */
  859. color_start = get_be32(pb);
  860. color_count = get_be16(pb);
  861. color_end = get_be16(pb);
  862. if ((color_start <= 255) &&
  863. (color_end <= 255)) {
  864. for (j = color_start; j <= color_end; j++) {
  865. /* each R, G, or B component is 16 bits;
  866. * only use the top 8 bits; skip alpha bytes
  867. * up front */
  868. get_byte(pb);
  869. get_byte(pb);
  870. r = get_byte(pb);
  871. get_byte(pb);
  872. g = get_byte(pb);
  873. get_byte(pb);
  874. b = get_byte(pb);
  875. get_byte(pb);
  876. st->codec->palctrl->palette[j] =
  877. (r << 16) | (g << 8) | (b);
  878. }
  879. }
  880. }
  881. st->codec->palctrl->palette_changed = 1;
  882. }
  883. } else if(st->codec->codec_type==CODEC_TYPE_AUDIO) {
  884. int bits_per_sample, flags;
  885. uint16_t version = get_be16(pb);
  886. st->codec->codec_id = id;
  887. get_be16(pb); /* revision level */
  888. get_be32(pb); /* vendor */
  889. st->codec->channels = get_be16(pb); /* channel count */
  890. dprintf(c->fc, "audio channels %d\n", st->codec->channels);
  891. st->codec->bits_per_coded_sample = get_be16(pb); /* sample size */
  892. sc->audio_cid = get_be16(pb);
  893. get_be16(pb); /* packet size = 0 */
  894. st->codec->sample_rate = ((get_be32(pb) >> 16));
  895. //Read QT version 1 fields. In version 0 these do not exist.
  896. dprintf(c->fc, "version =%d, isom =%d\n",version,c->isom);
  897. if(!c->isom) {
  898. if(version==1) {
  899. sc->samples_per_frame = get_be32(pb);
  900. get_be32(pb); /* bytes per packet */
  901. sc->bytes_per_frame = get_be32(pb);
  902. get_be32(pb); /* bytes per sample */
  903. } else if(version==2) {
  904. get_be32(pb); /* sizeof struct only */
  905. st->codec->sample_rate = av_int2dbl(get_be64(pb)); /* float 64 */
  906. st->codec->channels = get_be32(pb);
  907. get_be32(pb); /* always 0x7F000000 */
  908. st->codec->bits_per_coded_sample = get_be32(pb); /* bits per channel if sound is uncompressed */
  909. flags = get_be32(pb); /* lpcm format specific flag */
  910. sc->bytes_per_frame = get_be32(pb); /* bytes per audio packet if constant */
  911. sc->samples_per_frame = get_be32(pb); /* lpcm frames per audio packet if constant */
  912. if (format == MKTAG('l','p','c','m'))
  913. st->codec->codec_id = ff_mov_get_lpcm_codec_id(st->codec->bits_per_coded_sample, flags);
  914. }
  915. }
  916. switch (st->codec->codec_id) {
  917. case CODEC_ID_PCM_S8:
  918. case CODEC_ID_PCM_U8:
  919. if (st->codec->bits_per_coded_sample == 16)
  920. st->codec->codec_id = CODEC_ID_PCM_S16BE;
  921. break;
  922. case CODEC_ID_PCM_S16LE:
  923. case CODEC_ID_PCM_S16BE:
  924. if (st->codec->bits_per_coded_sample == 8)
  925. st->codec->codec_id = CODEC_ID_PCM_S8;
  926. else if (st->codec->bits_per_coded_sample == 24)
  927. st->codec->codec_id =
  928. st->codec->codec_id == CODEC_ID_PCM_S16BE ?
  929. CODEC_ID_PCM_S24BE : CODEC_ID_PCM_S24LE;
  930. break;
  931. /* set values for old format before stsd version 1 appeared */
  932. case CODEC_ID_MACE3:
  933. sc->samples_per_frame = 6;
  934. sc->bytes_per_frame = 2*st->codec->channels;
  935. break;
  936. case CODEC_ID_MACE6:
  937. sc->samples_per_frame = 6;
  938. sc->bytes_per_frame = 1*st->codec->channels;
  939. break;
  940. case CODEC_ID_ADPCM_IMA_QT:
  941. sc->samples_per_frame = 64;
  942. sc->bytes_per_frame = 34*st->codec->channels;
  943. break;
  944. case CODEC_ID_GSM:
  945. sc->samples_per_frame = 160;
  946. sc->bytes_per_frame = 33;
  947. break;
  948. default:
  949. break;
  950. }
  951. bits_per_sample = av_get_bits_per_sample(st->codec->codec_id);
  952. if (bits_per_sample) {
  953. st->codec->bits_per_coded_sample = bits_per_sample;
  954. sc->sample_size = (bits_per_sample >> 3) * st->codec->channels;
  955. }
  956. } else if(st->codec->codec_type==CODEC_TYPE_SUBTITLE){
  957. // ttxt stsd contains display flags, justification, background
  958. // color, fonts, and default styles, so fake an atom to read it
  959. MOVAtom fake_atom = { .size = size - (url_ftell(pb) - start_pos) };
  960. if (format != AV_RL32("mp4s")) // mp4s contains a regular esds atom
  961. mov_read_glbl(c, pb, fake_atom);
  962. st->codec->codec_id= id;
  963. st->codec->width = sc->width;
  964. st->codec->height = sc->height;
  965. } else {
  966. /* other codec type, just skip (rtp, mp4s, tmcd ...) */
  967. url_fskip(pb, size - (url_ftell(pb) - start_pos));
  968. }
  969. /* this will read extra atoms at the end (wave, alac, damr, avcC, SMI ...) */
  970. a.size = size - (url_ftell(pb) - start_pos);
  971. if (a.size > 8) {
  972. if (mov_read_default(c, pb, a) < 0)
  973. return -1;
  974. } else if (a.size > 0)
  975. url_fskip(pb, a.size);
  976. }
  977. if(st->codec->codec_type==CODEC_TYPE_AUDIO && st->codec->sample_rate==0 && sc->time_scale>1)
  978. st->codec->sample_rate= sc->time_scale;
  979. /* special codec parameters handling */
  980. switch (st->codec->codec_id) {
  981. #if CONFIG_DV_DEMUXER
  982. case CODEC_ID_DVAUDIO:
  983. c->dv_fctx = avformat_alloc_context();
  984. c->dv_demux = dv_init_demux(c->dv_fctx);
  985. if (!c->dv_demux) {
  986. av_log(c->fc, AV_LOG_ERROR, "dv demux context init error\n");
  987. return -1;
  988. }
  989. sc->dv_audio_container = 1;
  990. st->codec->codec_id = CODEC_ID_PCM_S16LE;
  991. break;
  992. #endif
  993. /* no ifdef since parameters are always those */
  994. case CODEC_ID_QCELP:
  995. // force sample rate for qcelp when not stored in mov
  996. if (st->codec->codec_tag != MKTAG('Q','c','l','p'))
  997. st->codec->sample_rate = 8000;
  998. st->codec->frame_size= 160;
  999. st->codec->channels= 1; /* really needed */
  1000. break;
  1001. case CODEC_ID_AMR_NB:
  1002. case CODEC_ID_AMR_WB:
  1003. st->codec->frame_size= sc->samples_per_frame;
  1004. st->codec->channels= 1; /* really needed */
  1005. /* force sample rate for amr, stsd in 3gp does not store sample rate */
  1006. if (st->codec->codec_id == CODEC_ID_AMR_NB)
  1007. st->codec->sample_rate = 8000;
  1008. else if (st->codec->codec_id == CODEC_ID_AMR_WB)
  1009. st->codec->sample_rate = 16000;
  1010. break;
  1011. case CODEC_ID_MP2:
  1012. case CODEC_ID_MP3:
  1013. st->codec->codec_type = CODEC_TYPE_AUDIO; /* force type after stsd for m1a hdlr */
  1014. st->need_parsing = AVSTREAM_PARSE_FULL;
  1015. break;
  1016. case CODEC_ID_GSM:
  1017. case CODEC_ID_ADPCM_MS:
  1018. case CODEC_ID_ADPCM_IMA_WAV:
  1019. st->codec->block_align = sc->bytes_per_frame;
  1020. break;
  1021. case CODEC_ID_ALAC:
  1022. if (st->codec->extradata_size == 36) {
  1023. st->codec->frame_size = AV_RB32(st->codec->extradata+12);
  1024. st->codec->channels = AV_RB8 (st->codec->extradata+21);
  1025. }
  1026. break;
  1027. default:
  1028. break;
  1029. }
  1030. return 0;
  1031. }
  1032. static int mov_read_stsc(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1033. {
  1034. AVStream *st;
  1035. MOVStreamContext *sc;
  1036. unsigned int i, entries;
  1037. if (c->fc->nb_streams < 1)
  1038. return 0;
  1039. st = c->fc->streams[c->fc->nb_streams-1];
  1040. sc = st->priv_data;
  1041. get_byte(pb); /* version */
  1042. get_be24(pb); /* flags */
  1043. entries = get_be32(pb);
  1044. dprintf(c->fc, "track[%i].stsc.entries = %i\n", c->fc->nb_streams-1, entries);
  1045. if(entries >= UINT_MAX / sizeof(*sc->stsc_data))
  1046. return -1;
  1047. sc->stsc_data = av_malloc(entries * sizeof(*sc->stsc_data));
  1048. if (!sc->stsc_data)
  1049. return AVERROR(ENOMEM);
  1050. sc->stsc_count = entries;
  1051. for(i=0; i<entries; i++) {
  1052. sc->stsc_data[i].first = get_be32(pb);
  1053. sc->stsc_data[i].count = get_be32(pb);
  1054. sc->stsc_data[i].id = get_be32(pb);
  1055. }
  1056. return 0;
  1057. }
  1058. static int mov_read_stps(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1059. {
  1060. AVStream *st;
  1061. MOVStreamContext *sc;
  1062. unsigned i, entries;
  1063. if (c->fc->nb_streams < 1)
  1064. return 0;
  1065. st = c->fc->streams[c->fc->nb_streams-1];
  1066. sc = st->priv_data;
  1067. get_be32(pb); // version + flags
  1068. entries = get_be32(pb);
  1069. if (entries >= UINT_MAX / sizeof(*sc->stps_data))
  1070. return -1;
  1071. sc->stps_data = av_malloc(entries * sizeof(*sc->stps_data));
  1072. if (!sc->stps_data)
  1073. return AVERROR(ENOMEM);
  1074. sc->stps_count = entries;
  1075. for (i = 0; i < entries; i++) {
  1076. sc->stps_data[i] = get_be32(pb);
  1077. //dprintf(c->fc, "stps %d\n", sc->stps_data[i]);
  1078. }
  1079. return 0;
  1080. }
  1081. static int mov_read_stss(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1082. {
  1083. AVStream *st;
  1084. MOVStreamContext *sc;
  1085. unsigned int i, entries;
  1086. if (c->fc->nb_streams < 1)
  1087. return 0;
  1088. st = c->fc->streams[c->fc->nb_streams-1];
  1089. sc = st->priv_data;
  1090. get_byte(pb); /* version */
  1091. get_be24(pb); /* flags */
  1092. entries = get_be32(pb);
  1093. dprintf(c->fc, "keyframe_count = %d\n", entries);
  1094. if(entries >= UINT_MAX / sizeof(int))
  1095. return -1;
  1096. sc->keyframes = av_malloc(entries * sizeof(int));
  1097. if (!sc->keyframes)
  1098. return AVERROR(ENOMEM);
  1099. sc->keyframe_count = entries;
  1100. for(i=0; i<entries; i++) {
  1101. sc->keyframes[i] = get_be32(pb);
  1102. //dprintf(c->fc, "keyframes[]=%d\n", sc->keyframes[i]);
  1103. }
  1104. return 0;
  1105. }
  1106. static int mov_read_stsz(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1107. {
  1108. AVStream *st;
  1109. MOVStreamContext *sc;
  1110. unsigned int i, entries, sample_size, field_size, num_bytes;
  1111. GetBitContext gb;
  1112. unsigned char* buf;
  1113. if (c->fc->nb_streams < 1)
  1114. return 0;
  1115. st = c->fc->streams[c->fc->nb_streams-1];
  1116. sc = st->priv_data;
  1117. get_byte(pb); /* version */
  1118. get_be24(pb); /* flags */
  1119. if (atom.type == MKTAG('s','t','s','z')) {
  1120. sample_size = get_be32(pb);
  1121. if (!sc->sample_size) /* do not overwrite value computed in stsd */
  1122. sc->sample_size = sample_size;
  1123. field_size = 32;
  1124. } else {
  1125. sample_size = 0;
  1126. get_be24(pb); /* reserved */
  1127. field_size = get_byte(pb);
  1128. }
  1129. entries = get_be32(pb);
  1130. dprintf(c->fc, "sample_size = %d sample_count = %d\n", sc->sample_size, entries);
  1131. sc->sample_count = entries;
  1132. if (sample_size)
  1133. return 0;
  1134. if (field_size != 4 && field_size != 8 && field_size != 16 && field_size != 32) {
  1135. av_log(c->fc, AV_LOG_ERROR, "Invalid sample field size %d\n", field_size);
  1136. return -1;
  1137. }
  1138. if (entries >= UINT_MAX / sizeof(int) || entries >= (UINT_MAX - 4) / field_size)
  1139. return -1;
  1140. sc->sample_sizes = av_malloc(entries * sizeof(int));
  1141. if (!sc->sample_sizes)
  1142. return AVERROR(ENOMEM);
  1143. num_bytes = (entries*field_size+4)>>3;
  1144. buf = av_malloc(num_bytes+FF_INPUT_BUFFER_PADDING_SIZE);
  1145. if (!buf) {
  1146. av_freep(&sc->sample_sizes);
  1147. return AVERROR(ENOMEM);
  1148. }
  1149. if (get_buffer(pb, buf, num_bytes) < num_bytes) {
  1150. av_freep(&sc->sample_sizes);
  1151. av_free(buf);
  1152. return -1;
  1153. }
  1154. init_get_bits(&gb, buf, 8*num_bytes);
  1155. for(i=0; i<entries; i++)
  1156. sc->sample_sizes[i] = get_bits_long(&gb, field_size);
  1157. av_free(buf);
  1158. return 0;
  1159. }
  1160. static int mov_read_stts(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1161. {
  1162. AVStream *st;
  1163. MOVStreamContext *sc;
  1164. unsigned int i, entries;
  1165. int64_t duration=0;
  1166. int64_t total_sample_count=0;
  1167. if (c->fc->nb_streams < 1)
  1168. return 0;
  1169. st = c->fc->streams[c->fc->nb_streams-1];
  1170. sc = st->priv_data;
  1171. get_byte(pb); /* version */
  1172. get_be24(pb); /* flags */
  1173. entries = get_be32(pb);
  1174. dprintf(c->fc, "track[%i].stts.entries = %i\n", c->fc->nb_streams-1, entries);
  1175. if(entries >= UINT_MAX / sizeof(*sc->stts_data))
  1176. return -1;
  1177. sc->stts_data = av_malloc(entries * sizeof(*sc->stts_data));
  1178. if (!sc->stts_data)
  1179. return AVERROR(ENOMEM);
  1180. sc->stts_count = entries;
  1181. for(i=0; i<entries; i++) {
  1182. int sample_duration;
  1183. int sample_count;
  1184. sample_count=get_be32(pb);
  1185. sample_duration = get_be32(pb);
  1186. sc->stts_data[i].count= sample_count;
  1187. sc->stts_data[i].duration= sample_duration;
  1188. dprintf(c->fc, "sample_count=%d, sample_duration=%d\n",sample_count,sample_duration);
  1189. duration+=(int64_t)sample_duration*sample_count;
  1190. total_sample_count+=sample_count;
  1191. }
  1192. st->nb_frames= total_sample_count;
  1193. if(duration)
  1194. st->duration= duration;
  1195. return 0;
  1196. }
  1197. static int mov_read_ctts(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1198. {
  1199. AVStream *st;
  1200. MOVStreamContext *sc;
  1201. unsigned int i, entries;
  1202. if (c->fc->nb_streams < 1)
  1203. return 0;
  1204. st = c->fc->streams[c->fc->nb_streams-1];
  1205. sc = st->priv_data;
  1206. get_byte(pb); /* version */
  1207. get_be24(pb); /* flags */
  1208. entries = get_be32(pb);
  1209. dprintf(c->fc, "track[%i].ctts.entries = %i\n", c->fc->nb_streams-1, entries);
  1210. if(entries >= UINT_MAX / sizeof(*sc->ctts_data))
  1211. return -1;
  1212. sc->ctts_data = av_malloc(entries * sizeof(*sc->ctts_data));
  1213. if (!sc->ctts_data)
  1214. return AVERROR(ENOMEM);
  1215. sc->ctts_count = entries;
  1216. for(i=0; i<entries; i++) {
  1217. int count =get_be32(pb);
  1218. int duration =get_be32(pb);
  1219. sc->ctts_data[i].count = count;
  1220. sc->ctts_data[i].duration= duration;
  1221. if (duration < 0)
  1222. sc->dts_shift = FFMAX(sc->dts_shift, -duration);
  1223. }
  1224. dprintf(c->fc, "dts shift %d\n", sc->dts_shift);
  1225. return 0;
  1226. }
  1227. static void mov_build_index(MOVContext *mov, AVStream *st)
  1228. {
  1229. MOVStreamContext *sc = st->priv_data;
  1230. int64_t current_offset;
  1231. int64_t current_dts = 0;
  1232. unsigned int stts_index = 0;
  1233. unsigned int stsc_index = 0;
  1234. unsigned int stss_index = 0;
  1235. unsigned int stps_index = 0;
  1236. unsigned int i, j;
  1237. uint64_t stream_size = 0;
  1238. /* adjust first dts according to edit list */
  1239. if (sc->time_offset) {
  1240. int rescaled = sc->time_offset < 0 ? av_rescale(sc->time_offset, sc->time_scale, mov->time_scale) : sc->time_offset;
  1241. current_dts = -rescaled;
  1242. if (sc->ctts_data && sc->ctts_data[0].duration / sc->stts_data[0].duration > 16) {
  1243. /* more than 16 frames delay, dts are likely wrong
  1244. this happens with files created by iMovie */
  1245. sc->wrong_dts = 1;
  1246. st->codec->has_b_frames = 1;
  1247. }
  1248. }
  1249. /* only use old uncompressed audio chunk demuxing when stts specifies it */
  1250. if (!(st->codec->codec_type == CODEC_TYPE_AUDIO &&
  1251. sc->stts_count == 1 && sc->stts_data[0].duration == 1)) {
  1252. unsigned int current_sample = 0;
  1253. unsigned int stts_sample = 0;
  1254. unsigned int sample_size;
  1255. unsigned int distance = 0;
  1256. int key_off = sc->keyframes && sc->keyframes[0] == 1;
  1257. current_dts -= sc->dts_shift;
  1258. for (i = 0; i < sc->chunk_count; i++) {
  1259. current_offset = sc->chunk_offsets[i];
  1260. if (stsc_index + 1 < sc->stsc_count &&
  1261. i + 1 == sc->stsc_data[stsc_index + 1].first)
  1262. stsc_index++;
  1263. for (j = 0; j < sc->stsc_data[stsc_index].count; j++) {
  1264. int keyframe = 0;
  1265. if (current_sample >= sc->sample_count) {
  1266. av_log(mov->fc, AV_LOG_ERROR, "wrong sample count\n");
  1267. return;
  1268. }
  1269. if (!sc->keyframe_count || current_sample+key_off == sc->keyframes[stss_index]) {
  1270. keyframe = 1;
  1271. if (stss_index + 1 < sc->keyframe_count)
  1272. stss_index++;
  1273. } else if (sc->stps_count && current_sample+key_off == sc->stps_data[stps_index]) {
  1274. keyframe = 1;
  1275. if (stps_index + 1 < sc->stps_count)
  1276. stps_index++;
  1277. }
  1278. if (keyframe)
  1279. distance = 0;
  1280. sample_size = sc->sample_size > 0 ? sc->sample_size : sc->sample_sizes[current_sample];
  1281. if(sc->pseudo_stream_id == -1 ||
  1282. sc->stsc_data[stsc_index].id - 1 == sc->pseudo_stream_id) {
  1283. av_add_index_entry(st, current_offset, current_dts, sample_size, distance,
  1284. keyframe ? AVINDEX_KEYFRAME : 0);
  1285. dprintf(mov->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", "
  1286. "size %d, distance %d, keyframe %d\n", st->index, current_sample,
  1287. current_offset, current_dts, sample_size, distance, keyframe);
  1288. }
  1289. current_offset += sample_size;
  1290. stream_size += sample_size;
  1291. current_dts += sc->stts_data[stts_index].duration;
  1292. distance++;
  1293. stts_sample++;
  1294. current_sample++;
  1295. if (stts_index + 1 < sc->stts_count && stts_sample == sc->stts_data[stts_index].count) {
  1296. stts_sample = 0;
  1297. stts_index++;
  1298. }
  1299. }
  1300. }
  1301. if (st->duration > 0)
  1302. st->codec->bit_rate = stream_size*8*sc->time_scale/st->duration;
  1303. } else {
  1304. for (i = 0; i < sc->chunk_count; i++) {
  1305. unsigned chunk_samples;
  1306. current_offset = sc->chunk_offsets[i];
  1307. if (stsc_index + 1 < sc->stsc_count &&
  1308. i + 1 == sc->stsc_data[stsc_index + 1].first)
  1309. stsc_index++;
  1310. chunk_samples = sc->stsc_data[stsc_index].count;
  1311. if (sc->samples_per_frame && chunk_samples % sc->samples_per_frame) {
  1312. av_log(mov->fc, AV_LOG_ERROR, "error unaligned chunk\n");
  1313. return;
  1314. }
  1315. while (chunk_samples > 0) {
  1316. unsigned size, samples;
  1317. if (sc->samples_per_frame >= 160) { // gsm
  1318. samples = sc->samples_per_frame;
  1319. size = sc->bytes_per_frame;
  1320. } else {
  1321. if (sc->samples_per_frame > 1) {
  1322. samples = FFMIN((1024 / sc->samples_per_frame)*
  1323. sc->samples_per_frame, chunk_samples);
  1324. size = (samples / sc->samples_per_frame) * sc->bytes_per_frame;
  1325. } else {
  1326. samples = FFMIN(1024, chunk_samples);
  1327. size = samples * sc->sample_size;
  1328. }
  1329. }
  1330. av_add_index_entry(st, current_offset, current_dts, size, 0, AVINDEX_KEYFRAME);
  1331. dprintf(mov->fc, "AVIndex stream %d, chunk %d, offset %"PRIx64", dts %"PRId64", "
  1332. "size %d, duration %d\n", st->index, i, current_offset, current_dts,
  1333. size, samples);
  1334. current_offset += size;
  1335. current_dts += samples;
  1336. chunk_samples -= samples;
  1337. }
  1338. }
  1339. }
  1340. }
  1341. static int mov_open_dref(ByteIOContext **pb, char *src, MOVDref *ref)
  1342. {
  1343. /* try absolute path */
  1344. if (!url_fopen(pb, ref->path, URL_RDONLY))
  1345. return 0;
  1346. /* try relative path */
  1347. if (ref->nlvl_to > 0 && ref->nlvl_from > 0) {
  1348. char filename[1024];
  1349. char *src_path;
  1350. int i, l;
  1351. /* find a source dir */
  1352. src_path = strrchr(src, '/');
  1353. if (src_path)
  1354. src_path++;
  1355. else
  1356. src_path = src;
  1357. /* find a next level down to target */
  1358. for (i = 0, l = strlen(ref->path) - 1; l >= 0; l--)
  1359. if (ref->path[l] == '/') {
  1360. if (i == ref->nlvl_to - 1)
  1361. break;
  1362. else
  1363. i++;
  1364. }
  1365. /* compose filename if next level down to target was found */
  1366. if (i == ref->nlvl_to - 1) {
  1367. memcpy(filename, src, src_path - src);
  1368. filename[src_path - src] = 0;
  1369. for (i = 1; i < ref->nlvl_from; i++)
  1370. av_strlcat(filename, "../", 1024);
  1371. av_strlcat(filename, ref->path + l + 1, 1024);
  1372. if (!url_fopen(pb, filename, URL_RDONLY))
  1373. return 0;
  1374. }
  1375. }
  1376. return AVERROR(ENOENT);
  1377. };
  1378. static int mov_read_trak(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1379. {
  1380. AVStream *st;
  1381. MOVStreamContext *sc;
  1382. int ret;
  1383. st = av_new_stream(c->fc, c->fc->nb_streams);
  1384. if (!st) return AVERROR(ENOMEM);
  1385. sc = av_mallocz(sizeof(MOVStreamContext));
  1386. if (!sc) return AVERROR(ENOMEM);
  1387. st->priv_data = sc;
  1388. st->codec->codec_type = CODEC_TYPE_DATA;
  1389. sc->ffindex = st->index;
  1390. if ((ret = mov_read_default(c, pb, atom)) < 0)
  1391. return ret;
  1392. /* sanity checks */
  1393. if (sc->chunk_count && (!sc->stts_count || !sc->stsc_count ||
  1394. (!sc->sample_size && !sc->sample_count))) {
  1395. av_log(c->fc, AV_LOG_ERROR, "stream %d, missing mandatory atoms, broken header\n",
  1396. st->index);
  1397. return 0;
  1398. }
  1399. if (!sc->time_scale) {
  1400. av_log(c->fc, AV_LOG_WARNING, "stream %d, timescale not set\n", st->index);
  1401. sc->time_scale = c->time_scale;
  1402. if (!sc->time_scale)
  1403. sc->time_scale = 1;
  1404. }
  1405. av_set_pts_info(st, 64, 1, sc->time_scale);
  1406. if (st->codec->codec_type == CODEC_TYPE_AUDIO &&
  1407. !st->codec->frame_size && sc->stts_count == 1) {
  1408. st->codec->frame_size = av_rescale(sc->stts_data[0].duration,
  1409. st->codec->sample_rate, sc->time_scale);
  1410. dprintf(c->fc, "frame size %d\n", st->codec->frame_size);
  1411. }
  1412. mov_build_index(c, st);
  1413. if (sc->dref_id-1 < sc->drefs_count && sc->drefs[sc->dref_id-1].path) {
  1414. MOVDref *dref = &sc->drefs[sc->dref_id - 1];
  1415. if (mov_open_dref(&sc->pb, c->fc->filename, dref) < 0)
  1416. av_log(c->fc, AV_LOG_ERROR,
  1417. "stream %d, error opening alias: path='%s', dir='%s', "
  1418. "filename='%s', volume='%s', nlvl_from=%d, nlvl_to=%d\n",
  1419. st->index, dref->path, dref->dir, dref->filename,
  1420. dref->volume, dref->nlvl_from, dref->nlvl_to);
  1421. } else
  1422. sc->pb = c->fc->pb;
  1423. switch (st->codec->codec_id) {
  1424. #if CONFIG_H261_DECODER
  1425. case CODEC_ID_H261:
  1426. #endif
  1427. #if CONFIG_H263_DECODER
  1428. case CODEC_ID_H263:
  1429. #endif
  1430. #if CONFIG_H264_DECODER
  1431. case CODEC_ID_H264:
  1432. #endif
  1433. #if CONFIG_MPEG4_DECODER
  1434. case CODEC_ID_MPEG4:
  1435. #endif
  1436. st->codec->width = 0; /* let decoder init width/height */
  1437. st->codec->height= 0;
  1438. break;
  1439. }
  1440. /* Do not need those anymore. */
  1441. av_freep(&sc->chunk_offsets);
  1442. av_freep(&sc->stsc_data);
  1443. av_freep(&sc->sample_sizes);
  1444. av_freep(&sc->keyframes);
  1445. av_freep(&sc->stts_data);
  1446. av_freep(&sc->stps_data);
  1447. return 0;
  1448. }
  1449. static int mov_read_ilst(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1450. {
  1451. int ret;
  1452. c->itunes_metadata = 1;
  1453. ret = mov_read_default(c, pb, atom);
  1454. c->itunes_metadata = 0;
  1455. return ret;
  1456. }
  1457. static int mov_read_meta(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1458. {
  1459. while (atom.size > 8) {
  1460. uint32_t tag = get_le32(pb);
  1461. atom.size -= 4;
  1462. if (tag == MKTAG('h','d','l','r')) {
  1463. url_fseek(pb, -8, SEEK_CUR);
  1464. atom.size += 8;
  1465. return mov_read_default(c, pb, atom);
  1466. }
  1467. }
  1468. return 0;
  1469. }
  1470. static int mov_read_tkhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1471. {
  1472. int i;
  1473. int width;
  1474. int height;
  1475. int64_t disp_transform[2];
  1476. int display_matrix[3][2];
  1477. AVStream *st;
  1478. MOVStreamContext *sc;
  1479. int version;
  1480. if (c->fc->nb_streams < 1)
  1481. return 0;
  1482. st = c->fc->streams[c->fc->nb_streams-1];
  1483. sc = st->priv_data;
  1484. version = get_byte(pb);
  1485. get_be24(pb); /* flags */
  1486. /*
  1487. MOV_TRACK_ENABLED 0x0001
  1488. MOV_TRACK_IN_MOVIE 0x0002
  1489. MOV_TRACK_IN_PREVIEW 0x0004
  1490. MOV_TRACK_IN_POSTER 0x0008
  1491. */
  1492. if (version == 1) {
  1493. get_be64(pb);
  1494. get_be64(pb);
  1495. } else {
  1496. get_be32(pb); /* creation time */
  1497. get_be32(pb); /* modification time */
  1498. }
  1499. st->id = (int)get_be32(pb); /* track id (NOT 0 !)*/
  1500. get_be32(pb); /* reserved */
  1501. /* highlevel (considering edits) duration in movie timebase */
  1502. (version == 1) ? get_be64(pb) : get_be32(pb);
  1503. get_be32(pb); /* reserved */
  1504. get_be32(pb); /* reserved */
  1505. get_be16(pb); /* layer */
  1506. get_be16(pb); /* alternate group */
  1507. get_be16(pb); /* volume */
  1508. get_be16(pb); /* reserved */
  1509. //read in the display matrix (outlined in ISO 14496-12, Section 6.2.2)
  1510. // they're kept in fixed point format through all calculations
  1511. // ignore u,v,z b/c we don't need the scale factor to calc aspect ratio
  1512. for (i = 0; i < 3; i++) {
  1513. display_matrix[i][0] = get_be32(pb); // 16.16 fixed point
  1514. display_matrix[i][1] = get_be32(pb); // 16.16 fixed point
  1515. get_be32(pb); // 2.30 fixed point (not used)
  1516. }
  1517. width = get_be32(pb); // 16.16 fixed point track width
  1518. height = get_be32(pb); // 16.16 fixed point track height
  1519. sc->width = width >> 16;
  1520. sc->height = height >> 16;
  1521. // transform the display width/height according to the matrix
  1522. // skip this if the display matrix is the default identity matrix
  1523. // or if it is rotating the picture, ex iPhone 3GS
  1524. // to keep the same scale, use [width height 1<<16]
  1525. if (width && height &&
  1526. ((display_matrix[0][0] != 65536 ||
  1527. display_matrix[1][1] != 65536) &&
  1528. !display_matrix[0][1] &&
  1529. !display_matrix[1][0] &&
  1530. !display_matrix[2][0] && !display_matrix[2][1])) {
  1531. for (i = 0; i < 2; i++)
  1532. disp_transform[i] =
  1533. (int64_t) width * display_matrix[0][i] +
  1534. (int64_t) height * display_matrix[1][i] +
  1535. ((int64_t) display_matrix[2][i] << 16);
  1536. //sample aspect ratio is new width/height divided by old width/height
  1537. st->sample_aspect_ratio = av_d2q(
  1538. ((double) disp_transform[0] * height) /
  1539. ((double) disp_transform[1] * width), INT_MAX);
  1540. }
  1541. return 0;
  1542. }
  1543. static int mov_read_tfhd(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1544. {
  1545. MOVFragment *frag = &c->fragment;
  1546. MOVTrackExt *trex = NULL;
  1547. int flags, track_id, i;
  1548. get_byte(pb); /* version */
  1549. flags = get_be24(pb);
  1550. track_id = get_be32(pb);
  1551. if (!track_id)
  1552. return -1;
  1553. frag->track_id = track_id;
  1554. for (i = 0; i < c->trex_count; i++)
  1555. if (c->trex_data[i].track_id == frag->track_id) {
  1556. trex = &c->trex_data[i];
  1557. break;
  1558. }
  1559. if (!trex) {
  1560. av_log(c->fc, AV_LOG_ERROR, "could not find corresponding trex\n");
  1561. return -1;
  1562. }
  1563. if (flags & 0x01) frag->base_data_offset = get_be64(pb);
  1564. else frag->base_data_offset = frag->moof_offset;
  1565. if (flags & 0x02) frag->stsd_id = get_be32(pb);
  1566. else frag->stsd_id = trex->stsd_id;
  1567. frag->duration = flags & 0x08 ? get_be32(pb) : trex->duration;
  1568. frag->size = flags & 0x10 ? get_be32(pb) : trex->size;
  1569. frag->flags = flags & 0x20 ? get_be32(pb) : trex->flags;
  1570. dprintf(c->fc, "frag flags 0x%x\n", frag->flags);
  1571. return 0;
  1572. }
  1573. static int mov_read_trex(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1574. {
  1575. MOVTrackExt *trex;
  1576. if ((uint64_t)c->trex_count+1 >= UINT_MAX / sizeof(*c->trex_data))
  1577. return -1;
  1578. trex = av_realloc(c->trex_data, (c->trex_count+1)*sizeof(*c->trex_data));
  1579. if (!trex)
  1580. return AVERROR(ENOMEM);
  1581. c->trex_data = trex;
  1582. trex = &c->trex_data[c->trex_count++];
  1583. get_byte(pb); /* version */
  1584. get_be24(pb); /* flags */
  1585. trex->track_id = get_be32(pb);
  1586. trex->stsd_id = get_be32(pb);
  1587. trex->duration = get_be32(pb);
  1588. trex->size = get_be32(pb);
  1589. trex->flags = get_be32(pb);
  1590. return 0;
  1591. }
  1592. static int mov_read_trun(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1593. {
  1594. MOVFragment *frag = &c->fragment;
  1595. AVStream *st = NULL;
  1596. MOVStreamContext *sc;
  1597. uint64_t offset;
  1598. int64_t dts;
  1599. int data_offset = 0;
  1600. unsigned entries, first_sample_flags = frag->flags;
  1601. int flags, distance, i;
  1602. for (i = 0; i < c->fc->nb_streams; i++) {
  1603. if (c->fc->streams[i]->id == frag->track_id) {
  1604. st = c->fc->streams[i];
  1605. break;
  1606. }
  1607. }
  1608. if (!st) {
  1609. av_log(c->fc, AV_LOG_ERROR, "could not find corresponding track id %d\n", frag->track_id);
  1610. return -1;
  1611. }
  1612. sc = st->priv_data;
  1613. if (sc->pseudo_stream_id+1 != frag->stsd_id)
  1614. return 0;
  1615. get_byte(pb); /* version */
  1616. flags = get_be24(pb);
  1617. entries = get_be32(pb);
  1618. dprintf(c->fc, "flags 0x%x entries %d\n", flags, entries);
  1619. if (flags & 0x001) data_offset = get_be32(pb);
  1620. if (flags & 0x004) first_sample_flags = get_be32(pb);
  1621. if (flags & 0x800) {
  1622. MOVStts *ctts_data;
  1623. if ((uint64_t)entries+sc->ctts_count >= UINT_MAX/sizeof(*sc->ctts_data))
  1624. return -1;
  1625. ctts_data = av_realloc(sc->ctts_data,
  1626. (entries+sc->ctts_count)*sizeof(*sc->ctts_data));
  1627. if (!ctts_data)
  1628. return AVERROR(ENOMEM);
  1629. sc->ctts_data = ctts_data;
  1630. }
  1631. dts = st->duration;
  1632. offset = frag->base_data_offset + data_offset;
  1633. distance = 0;
  1634. dprintf(c->fc, "first sample flags 0x%x\n", first_sample_flags);
  1635. for (i = 0; i < entries; i++) {
  1636. unsigned sample_size = frag->size;
  1637. int sample_flags = i ? frag->flags : first_sample_flags;
  1638. unsigned sample_duration = frag->duration;
  1639. int keyframe;
  1640. if (flags & 0x100) sample_duration = get_be32(pb);
  1641. if (flags & 0x200) sample_size = get_be32(pb);
  1642. if (flags & 0x400) sample_flags = get_be32(pb);
  1643. if (flags & 0x800) {
  1644. sc->ctts_data[sc->ctts_count].count = 1;
  1645. sc->ctts_data[sc->ctts_count].duration = get_be32(pb);
  1646. sc->ctts_count++;
  1647. }
  1648. if ((keyframe = st->codec->codec_type == CODEC_TYPE_AUDIO ||
  1649. (flags & 0x004 && !i && !sample_flags) || sample_flags & 0x2000000))
  1650. distance = 0;
  1651. av_add_index_entry(st, offset, dts, sample_size, distance,
  1652. keyframe ? AVINDEX_KEYFRAME : 0);
  1653. dprintf(c->fc, "AVIndex stream %d, sample %d, offset %"PRIx64", dts %"PRId64", "
  1654. "size %d, distance %d, keyframe %d\n", st->index, sc->sample_count+i,
  1655. offset, dts, sample_size, distance, keyframe);
  1656. distance++;
  1657. dts += sample_duration;
  1658. offset += sample_size;
  1659. }
  1660. frag->moof_offset = offset;
  1661. st->duration = dts;
  1662. return 0;
  1663. }
  1664. /* this atom should be null (from specs), but some buggy files put the 'moov' atom inside it... */
  1665. /* like the files created with Adobe Premiere 5.0, for samples see */
  1666. /* http://graphics.tudelft.nl/~wouter/publications/soundtests/ */
  1667. static int mov_read_wide(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1668. {
  1669. int err;
  1670. if (atom.size < 8)
  1671. return 0; /* continue */
  1672. if (get_be32(pb) != 0) { /* 0 sized mdat atom... use the 'wide' atom size */
  1673. url_fskip(pb, atom.size - 4);
  1674. return 0;
  1675. }
  1676. atom.type = get_le32(pb);
  1677. atom.size -= 8;
  1678. if (atom.type != MKTAG('m','d','a','t')) {
  1679. url_fskip(pb, atom.size);
  1680. return 0;
  1681. }
  1682. err = mov_read_mdat(c, pb, atom);
  1683. return err;
  1684. }
  1685. static int mov_read_cmov(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1686. {
  1687. #if CONFIG_ZLIB
  1688. ByteIOContext ctx;
  1689. uint8_t *cmov_data;
  1690. uint8_t *moov_data; /* uncompressed data */
  1691. long cmov_len, moov_len;
  1692. int ret = -1;
  1693. get_be32(pb); /* dcom atom */
  1694. if (get_le32(pb) != MKTAG('d','c','o','m'))
  1695. return -1;
  1696. if (get_le32(pb) != MKTAG('z','l','i','b')) {
  1697. av_log(c->fc, AV_LOG_ERROR, "unknown compression for cmov atom !");
  1698. return -1;
  1699. }
  1700. get_be32(pb); /* cmvd atom */
  1701. if (get_le32(pb) != MKTAG('c','m','v','d'))
  1702. return -1;
  1703. moov_len = get_be32(pb); /* uncompressed size */
  1704. cmov_len = atom.size - 6 * 4;
  1705. cmov_data = av_malloc(cmov_len);
  1706. if (!cmov_data)
  1707. return AVERROR(ENOMEM);
  1708. moov_data = av_malloc(moov_len);
  1709. if (!moov_data) {
  1710. av_free(cmov_data);
  1711. return AVERROR(ENOMEM);
  1712. }
  1713. get_buffer(pb, cmov_data, cmov_len);
  1714. if(uncompress (moov_data, (uLongf *) &moov_len, (const Bytef *)cmov_data, cmov_len) != Z_OK)
  1715. goto free_and_return;
  1716. if(init_put_byte(&ctx, moov_data, moov_len, 0, NULL, NULL, NULL, NULL) != 0)
  1717. goto free_and_return;
  1718. atom.type = MKTAG('m','o','o','v');
  1719. atom.size = moov_len;
  1720. #ifdef DEBUG
  1721. // { int fd = open("/tmp/uncompheader.mov", O_WRONLY | O_CREAT); write(fd, moov_data, moov_len); close(fd); }
  1722. #endif
  1723. ret = mov_read_default(c, &ctx, atom);
  1724. free_and_return:
  1725. av_free(moov_data);
  1726. av_free(cmov_data);
  1727. return ret;
  1728. #else
  1729. av_log(c->fc, AV_LOG_ERROR, "this file requires zlib support compiled in\n");
  1730. return -1;
  1731. #endif
  1732. }
  1733. /* edit list atom */
  1734. static int mov_read_elst(MOVContext *c, ByteIOContext *pb, MOVAtom atom)
  1735. {
  1736. MOVStreamContext *sc;
  1737. int i, edit_count;
  1738. if (c->fc->nb_streams < 1)
  1739. return 0;
  1740. sc = c->fc->streams[c->fc->nb_streams-1]->priv_data;
  1741. get_byte(pb); /* version */
  1742. get_be24(pb); /* flags */
  1743. edit_count = get_be32(pb); /* entries */
  1744. if((uint64_t)edit_count*12+8 > atom.size)
  1745. return -1;
  1746. for(i=0; i<edit_count; i++){
  1747. int time;
  1748. int duration = get_be32(pb); /* Track duration */
  1749. time = get_be32(pb); /* Media time */
  1750. get_be32(pb); /* Media rate */
  1751. if (i == 0 && time >= -1) {
  1752. sc->time_offset = time != -1 ? time : -duration;
  1753. }
  1754. }
  1755. if(edit_count > 1)
  1756. av_log(c->fc, AV_LOG_WARNING, "multiple edit list entries, "
  1757. "a/v desync might occur, patch welcome\n");
  1758. dprintf(c->fc, "track[%i].edit_count = %i\n", c->fc->nb_streams-1, edit_count);
  1759. return 0;
  1760. }
  1761. static const MOVParseTableEntry mov_default_parse_table[] = {
  1762. { MKTAG('a','v','s','s'), mov_read_extradata },
  1763. { MKTAG('c','o','6','4'), mov_read_stco },
  1764. { MKTAG('c','t','t','s'), mov_read_ctts }, /* composition time to sample */
  1765. { MKTAG('d','i','n','f'), mov_read_default },
  1766. { MKTAG('d','r','e','f'), mov_read_dref },
  1767. { MKTAG('e','d','t','s'), mov_read_default },
  1768. { MKTAG('e','l','s','t'), mov_read_elst },
  1769. { MKTAG('e','n','d','a'), mov_read_enda },
  1770. { MKTAG('f','i','e','l'), mov_read_extradata },
  1771. { MKTAG('f','t','y','p'), mov_read_ftyp },
  1772. { MKTAG('g','l','b','l'), mov_read_glbl },
  1773. { MKTAG('h','d','l','r'), mov_read_hdlr },
  1774. { MKTAG('i','l','s','t'), mov_read_ilst },
  1775. { MKTAG('j','p','2','h'), mov_read_extradata },
  1776. { MKTAG('m','d','a','t'), mov_read_mdat },
  1777. { MKTAG('m','d','h','d'), mov_read_mdhd },
  1778. { MKTAG('m','d','i','a'), mov_read_default },
  1779. { MKTAG('m','e','t','a'), mov_read_meta },
  1780. { MKTAG('m','i','n','f'), mov_read_default },
  1781. { MKTAG('m','o','o','f'), mov_read_moof },
  1782. { MKTAG('m','o','o','v'), mov_read_moov },
  1783. { MKTAG('m','v','e','x'), mov_read_default },
  1784. { MKTAG('m','v','h','d'), mov_read_mvhd },
  1785. { MKTAG('S','M','I',' '), mov_read_smi }, /* Sorenson extension ??? */
  1786. { MKTAG('a','l','a','c'), mov_read_extradata }, /* alac specific atom */
  1787. { MKTAG('a','v','c','C'), mov_read_glbl },
  1788. { MKTAG('p','a','s','p'), mov_read_pasp },
  1789. { MKTAG('s','t','b','l'), mov_read_default },
  1790. { MKTAG('s','t','c','o'), mov_read_stco },
  1791. { MKTAG('s','t','p','s'), mov_read_stps },
  1792. { MKTAG('s','t','s','c'), mov_read_stsc },
  1793. { MKTAG('s','t','s','d'), mov_read_stsd }, /* sample description */
  1794. { MKTAG('s','t','s','s'), mov_read_stss }, /* sync sample */
  1795. { MKTAG('s','t','s','z'), mov_read_stsz }, /* sample size */
  1796. { MKTAG('s','t','t','s'), mov_read_stts },
  1797. { MKTAG('s','t','z','2'), mov_read_stsz }, /* compact sample size */
  1798. { MKTAG('t','k','h','d'), mov_read_tkhd }, /* track header */
  1799. { MKTAG('t','f','h','d'), mov_read_tfhd }, /* track fragment header */
  1800. { MKTAG('t','r','a','k'), mov_read_trak },
  1801. { MKTAG('t','r','a','f'), mov_read_default },
  1802. { MKTAG('t','r','e','x'), mov_read_trex },
  1803. { MKTAG('t','r','u','n'), mov_read_trun },
  1804. { MKTAG('u','d','t','a'), mov_read_default },
  1805. { MKTAG('w','a','v','e'), mov_read_wave },
  1806. { MKTAG('e','s','d','s'), mov_read_esds },
  1807. { MKTAG('w','i','d','e'), mov_read_wide }, /* place holder */
  1808. { MKTAG('c','m','o','v'), mov_read_cmov },
  1809. { 0, NULL }
  1810. };
  1811. static int mov_probe(AVProbeData *p)
  1812. {
  1813. unsigned int offset;
  1814. uint32_t tag;
  1815. int score = 0;
  1816. /* check file header */
  1817. offset = 0;
  1818. for(;;) {
  1819. /* ignore invalid offset */
  1820. if ((offset + 8) > (unsigned int)p->buf_size)
  1821. return score;
  1822. tag = AV_RL32(p->buf + offset + 4);
  1823. switch(tag) {
  1824. /* check for obvious tags */
  1825. case MKTAG('j','P',' ',' '): /* jpeg 2000 signature */
  1826. case MKTAG('m','o','o','v'):
  1827. case MKTAG('m','d','a','t'):
  1828. case MKTAG('p','n','o','t'): /* detect movs with preview pics like ew.mov and april.mov */
  1829. case MKTAG('u','d','t','a'): /* Packet Video PVAuthor adds this and a lot of more junk */
  1830. case MKTAG('f','t','y','p'):
  1831. return AVPROBE_SCORE_MAX;
  1832. /* those are more common words, so rate then a bit less */
  1833. case MKTAG('e','d','i','w'): /* xdcam files have reverted first tags */
  1834. case MKTAG('w','i','d','e'):
  1835. case MKTAG('f','r','e','e'):
  1836. case MKTAG('j','u','n','k'):
  1837. case MKTAG('p','i','c','t'):
  1838. return AVPROBE_SCORE_MAX - 5;
  1839. case MKTAG(0x82,0x82,0x7f,0x7d):
  1840. case MKTAG('s','k','i','p'):
  1841. case MKTAG('u','u','i','d'):
  1842. case MKTAG('p','r','f','l'):
  1843. offset = AV_RB32(p->buf+offset) + offset;
  1844. /* if we only find those cause probedata is too small at least rate them */
  1845. score = AVPROBE_SCORE_MAX - 50;
  1846. break;
  1847. default:
  1848. /* unrecognized tag */
  1849. return score;
  1850. }
  1851. }
  1852. return score;
  1853. }
  1854. static int mov_read_header(AVFormatContext *s, AVFormatParameters *ap)
  1855. {
  1856. MOVContext *mov = s->priv_data;
  1857. ByteIOContext *pb = s->pb;
  1858. int err;
  1859. MOVAtom atom = { 0 };
  1860. mov->fc = s;
  1861. /* .mov and .mp4 aren't streamable anyway (only progressive download if moov is before mdat) */
  1862. if(!url_is_streamed(pb))
  1863. atom.size = url_fsize(pb);
  1864. else
  1865. atom.size = INT64_MAX;
  1866. /* check MOV header */
  1867. if ((err = mov_read_default(mov, pb, atom)) < 0) {
  1868. av_log(s, AV_LOG_ERROR, "error reading header: %d\n", err);
  1869. return err;
  1870. }
  1871. if (!mov->found_moov) {
  1872. av_log(s, AV_LOG_ERROR, "moov atom not found\n");
  1873. return -1;
  1874. }
  1875. dprintf(mov->fc, "on_parse_exit_offset=%lld\n", url_ftell(pb));
  1876. return 0;
  1877. }
  1878. static AVIndexEntry *mov_find_next_sample(AVFormatContext *s, AVStream **st)
  1879. {
  1880. AVIndexEntry *sample = NULL;
  1881. int64_t best_dts = INT64_MAX;
  1882. int i;
  1883. for (i = 0; i < s->nb_streams; i++) {
  1884. AVStream *avst = s->streams[i];
  1885. MOVStreamContext *msc = avst->priv_data;
  1886. if (msc->pb && msc->current_sample < avst->nb_index_entries) {
  1887. AVIndexEntry *current_sample = &avst->index_entries[msc->current_sample];
  1888. int64_t dts = av_rescale(current_sample->timestamp, AV_TIME_BASE, msc->time_scale);
  1889. dprintf(s, "stream %d, sample %d, dts %"PRId64"\n", i, msc->current_sample, dts);
  1890. if (!sample || (url_is_streamed(s->pb) && current_sample->pos < sample->pos) ||
  1891. (!url_is_streamed(s->pb) &&
  1892. ((msc->pb != s->pb && dts < best_dts) || (msc->pb == s->pb &&
  1893. ((FFABS(best_dts - dts) <= AV_TIME_BASE && current_sample->pos < sample->pos) ||
  1894. (FFABS(best_dts - dts) > AV_TIME_BASE && dts < best_dts)))))) {
  1895. sample = current_sample;
  1896. best_dts = dts;
  1897. *st = avst;
  1898. }
  1899. }
  1900. }
  1901. return sample;
  1902. }
  1903. static int mov_read_packet(AVFormatContext *s, AVPacket *pkt)
  1904. {
  1905. MOVContext *mov = s->priv_data;
  1906. MOVStreamContext *sc;
  1907. AVIndexEntry *sample;
  1908. AVStream *st = NULL;
  1909. int ret;
  1910. retry:
  1911. sample = mov_find_next_sample(s, &st);
  1912. if (!sample) {
  1913. mov->found_mdat = 0;
  1914. if (!url_is_streamed(s->pb) ||
  1915. mov_read_default(mov, s->pb, (MOVAtom){ 0, INT64_MAX }) < 0 ||
  1916. url_feof(s->pb))
  1917. return AVERROR_EOF;
  1918. dprintf(s, "read fragments, offset 0x%llx\n", url_ftell(s->pb));
  1919. goto retry;
  1920. }
  1921. sc = st->priv_data;
  1922. /* must be done just before reading, to avoid infinite loop on sample */
  1923. sc->current_sample++;
  1924. if (st->discard != AVDISCARD_ALL) {
  1925. if (url_fseek(sc->pb, sample->pos, SEEK_SET) != sample->pos) {
  1926. av_log(mov->fc, AV_LOG_ERROR, "stream %d, offset 0x%"PRIx64": partial file\n",
  1927. sc->ffindex, sample->pos);
  1928. return -1;
  1929. }
  1930. ret = av_get_packet(sc->pb, pkt, sample->size);
  1931. if (ret < 0)
  1932. return ret;
  1933. #if CONFIG_DV_DEMUXER
  1934. if (mov->dv_demux && sc->dv_audio_container) {
  1935. dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size);
  1936. av_free(pkt->data);
  1937. pkt->size = 0;
  1938. ret = dv_get_packet(mov->dv_demux, pkt);
  1939. if (ret < 0)
  1940. return ret;
  1941. }
  1942. #endif
  1943. }
  1944. pkt->stream_index = sc->ffindex;
  1945. pkt->dts = sample->timestamp;
  1946. if (sc->ctts_data) {
  1947. pkt->pts = pkt->dts + sc->dts_shift + sc->ctts_data[sc->ctts_index].duration;
  1948. /* update ctts context */
  1949. sc->ctts_sample++;
  1950. if (sc->ctts_index < sc->ctts_count &&
  1951. sc->ctts_data[sc->ctts_index].count == sc->ctts_sample) {
  1952. sc->ctts_index++;
  1953. sc->ctts_sample = 0;
  1954. }
  1955. if (sc->wrong_dts)
  1956. pkt->dts = AV_NOPTS_VALUE;
  1957. } else {
  1958. int64_t next_dts = (sc->current_sample < st->nb_index_entries) ?
  1959. st->index_entries[sc->current_sample].timestamp : st->duration;
  1960. pkt->duration = next_dts - pkt->dts;
  1961. pkt->pts = pkt->dts;
  1962. }
  1963. if (st->discard == AVDISCARD_ALL)
  1964. goto retry;
  1965. pkt->flags |= sample->flags & AVINDEX_KEYFRAME ? PKT_FLAG_KEY : 0;
  1966. pkt->pos = sample->pos;
  1967. dprintf(s, "stream %d, pts %"PRId64", dts %"PRId64", pos 0x%"PRIx64", duration %d\n",
  1968. pkt->stream_index, pkt->pts, pkt->dts, pkt->pos, pkt->duration);
  1969. return 0;
  1970. }
  1971. static int mov_seek_stream(AVFormatContext *s, AVStream *st, int64_t timestamp, int flags)
  1972. {
  1973. MOVStreamContext *sc = st->priv_data;
  1974. int sample, time_sample;
  1975. int i;
  1976. sample = av_index_search_timestamp(st, timestamp, flags);
  1977. dprintf(s, "stream %d, timestamp %"PRId64", sample %d\n", st->index, timestamp, sample);
  1978. if (sample < 0) /* not sure what to do */
  1979. return -1;
  1980. sc->current_sample = sample;
  1981. dprintf(s, "stream %d, found sample %d\n", st->index, sc->current_sample);
  1982. /* adjust ctts index */
  1983. if (sc->ctts_data) {
  1984. time_sample = 0;
  1985. for (i = 0; i < sc->ctts_count; i++) {
  1986. int next = time_sample + sc->ctts_data[i].count;
  1987. if (next > sc->current_sample) {
  1988. sc->ctts_index = i;
  1989. sc->ctts_sample = sc->current_sample - time_sample;
  1990. break;
  1991. }
  1992. time_sample = next;
  1993. }
  1994. }
  1995. return sample;
  1996. }
  1997. static int mov_read_seek(AVFormatContext *s, int stream_index, int64_t sample_time, int flags)
  1998. {
  1999. AVStream *st;
  2000. int64_t seek_timestamp, timestamp;
  2001. int sample;
  2002. int i;
  2003. if (stream_index >= s->nb_streams)
  2004. return -1;
  2005. if (sample_time < 0)
  2006. sample_time = 0;
  2007. st = s->streams[stream_index];
  2008. sample = mov_seek_stream(s, st, sample_time, flags);
  2009. if (sample < 0)
  2010. return -1;
  2011. /* adjust seek timestamp to found sample timestamp */
  2012. seek_timestamp = st->index_entries[sample].timestamp;
  2013. for (i = 0; i < s->nb_streams; i++) {
  2014. st = s->streams[i];
  2015. if (stream_index == i)
  2016. continue;
  2017. timestamp = av_rescale_q(seek_timestamp, s->streams[stream_index]->time_base, st->time_base);
  2018. mov_seek_stream(s, st, timestamp, flags);
  2019. }
  2020. return 0;
  2021. }
  2022. static int mov_read_close(AVFormatContext *s)
  2023. {
  2024. MOVContext *mov = s->priv_data;
  2025. int i, j;
  2026. for (i = 0; i < s->nb_streams; i++) {
  2027. AVStream *st = s->streams[i];
  2028. MOVStreamContext *sc = st->priv_data;
  2029. av_freep(&sc->ctts_data);
  2030. for (j = 0; j < sc->drefs_count; j++) {
  2031. av_freep(&sc->drefs[j].path);
  2032. av_freep(&sc->drefs[j].dir);
  2033. }
  2034. av_freep(&sc->drefs);
  2035. if (sc->pb && sc->pb != s->pb)
  2036. url_fclose(sc->pb);
  2037. av_freep(&st->codec->palctrl);
  2038. }
  2039. if (mov->dv_demux) {
  2040. for(i = 0; i < mov->dv_fctx->nb_streams; i++) {
  2041. av_freep(&mov->dv_fctx->streams[i]->codec);
  2042. av_freep(&mov->dv_fctx->streams[i]);
  2043. }
  2044. av_freep(&mov->dv_fctx);
  2045. av_freep(&mov->dv_demux);
  2046. }
  2047. av_freep(&mov->trex_data);
  2048. return 0;
  2049. }
  2050. AVInputFormat mov_demuxer = {
  2051. "mov,mp4,m4a,3gp,3g2,mj2",
  2052. NULL_IF_CONFIG_SMALL("QuickTime/MPEG-4/Motion JPEG 2000 format"),
  2053. sizeof(MOVContext),
  2054. mov_probe,
  2055. mov_read_header,
  2056. mov_read_packet,
  2057. mov_read_close,
  2058. mov_read_seek,
  2059. };