You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1428 lines
48KB

  1. /*
  2. * FLV demuxer
  3. * Copyright (c) 2003 The FFmpeg Project
  4. *
  5. * This demuxer will generate a 1 byte extradata for VP6F content.
  6. * It is composed of:
  7. * - upper 4 bits: difference between encoded width and visible width
  8. * - lower 4 bits: difference between encoded height and visible height
  9. *
  10. * This file is part of FFmpeg.
  11. *
  12. * FFmpeg is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * FFmpeg is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with FFmpeg; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/dict.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/intfloat.h"
  31. #include "libavutil/mathematics.h"
  32. #include "libavutil/time_internal.h"
  33. #include "libavcodec/bytestream.h"
  34. #include "avformat.h"
  35. #include "internal.h"
  36. #include "avio_internal.h"
  37. #include "flv.h"
  38. #define VALIDATE_INDEX_TS_THRESH 2500
  39. #define RESYNC_BUFFER_SIZE (1<<20)
  40. #define MAX_DEPTH 16 ///< arbitrary limit to prevent unbounded recursion
  41. typedef struct FLVContext {
  42. const AVClass *class; ///< Class for private options.
  43. int trust_metadata; ///< configure streams according onMetaData
  44. int trust_datasize; ///< trust data size of FLVTag
  45. int dump_full_metadata; ///< Dump full metadata of the onMetadata
  46. int wrong_dts; ///< wrong dts due to negative cts
  47. uint8_t *new_extradata[FLV_STREAM_TYPE_NB];
  48. int new_extradata_size[FLV_STREAM_TYPE_NB];
  49. int last_sample_rate;
  50. int last_channels;
  51. struct {
  52. int64_t dts;
  53. int64_t pos;
  54. } validate_index[2];
  55. int validate_next;
  56. int validate_count;
  57. int searched_for_end;
  58. uint8_t resync_buffer[2*RESYNC_BUFFER_SIZE];
  59. int broken_sizes;
  60. int sum_flv_tag_size;
  61. int last_keyframe_stream_index;
  62. int keyframe_count;
  63. int64_t video_bit_rate;
  64. int64_t audio_bit_rate;
  65. int64_t *keyframe_times;
  66. int64_t *keyframe_filepositions;
  67. int missing_streams;
  68. AVRational framerate;
  69. int64_t last_ts;
  70. int64_t time_offset;
  71. int64_t time_pos;
  72. } FLVContext;
  73. /* AMF date type */
  74. typedef struct amf_date {
  75. double milliseconds;
  76. int16_t timezone;
  77. } amf_date;
  78. static int probe(const AVProbeData *p, int live)
  79. {
  80. const uint8_t *d = p->buf;
  81. unsigned offset = AV_RB32(d + 5);
  82. if (d[0] == 'F' &&
  83. d[1] == 'L' &&
  84. d[2] == 'V' &&
  85. d[3] < 5 && d[5] == 0 &&
  86. offset + 100 < p->buf_size &&
  87. offset > 8) {
  88. int is_live = !memcmp(d + offset + 40, "NGINX RTMP", 10);
  89. if (live == is_live)
  90. return AVPROBE_SCORE_MAX;
  91. }
  92. return 0;
  93. }
  94. static int flv_probe(const AVProbeData *p)
  95. {
  96. return probe(p, 0);
  97. }
  98. static int live_flv_probe(const AVProbeData *p)
  99. {
  100. return probe(p, 1);
  101. }
  102. static int kux_probe(const AVProbeData *p)
  103. {
  104. const uint8_t *d = p->buf;
  105. if (d[0] == 'K' &&
  106. d[1] == 'D' &&
  107. d[2] == 'K' &&
  108. d[3] == 0 &&
  109. d[4] == 0) {
  110. return AVPROBE_SCORE_EXTENSION + 1;
  111. }
  112. return 0;
  113. }
  114. static void add_keyframes_index(AVFormatContext *s)
  115. {
  116. FLVContext *flv = s->priv_data;
  117. AVStream *stream = NULL;
  118. unsigned int i = 0;
  119. if (flv->last_keyframe_stream_index < 0) {
  120. av_log(s, AV_LOG_DEBUG, "keyframe stream hasn't been created\n");
  121. return;
  122. }
  123. av_assert0(flv->last_keyframe_stream_index <= s->nb_streams);
  124. stream = s->streams[flv->last_keyframe_stream_index];
  125. if (stream->internal->nb_index_entries == 0) {
  126. for (i = 0; i < flv->keyframe_count; i++) {
  127. av_log(s, AV_LOG_TRACE, "keyframe filepositions = %"PRId64" times = %"PRId64"\n",
  128. flv->keyframe_filepositions[i], flv->keyframe_times[i] * 1000);
  129. av_add_index_entry(stream, flv->keyframe_filepositions[i],
  130. flv->keyframe_times[i] * 1000, 0, 0, AVINDEX_KEYFRAME);
  131. }
  132. } else
  133. av_log(s, AV_LOG_WARNING, "Skipping duplicate index\n");
  134. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  135. av_freep(&flv->keyframe_times);
  136. av_freep(&flv->keyframe_filepositions);
  137. flv->keyframe_count = 0;
  138. }
  139. }
  140. static AVStream *create_stream(AVFormatContext *s, int codec_type)
  141. {
  142. FLVContext *flv = s->priv_data;
  143. AVStream *st = avformat_new_stream(s, NULL);
  144. if (!st)
  145. return NULL;
  146. st->codecpar->codec_type = codec_type;
  147. if (s->nb_streams>=3 ||( s->nb_streams==2
  148. && s->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE
  149. && s->streams[1]->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE
  150. && s->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_DATA
  151. && s->streams[1]->codecpar->codec_type != AVMEDIA_TYPE_DATA))
  152. s->ctx_flags &= ~AVFMTCTX_NOHEADER;
  153. if (codec_type == AVMEDIA_TYPE_AUDIO) {
  154. st->codecpar->bit_rate = flv->audio_bit_rate;
  155. flv->missing_streams &= ~FLV_HEADER_FLAG_HASAUDIO;
  156. }
  157. if (codec_type == AVMEDIA_TYPE_VIDEO) {
  158. st->codecpar->bit_rate = flv->video_bit_rate;
  159. flv->missing_streams &= ~FLV_HEADER_FLAG_HASVIDEO;
  160. st->avg_frame_rate = flv->framerate;
  161. }
  162. avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
  163. flv->last_keyframe_stream_index = s->nb_streams - 1;
  164. add_keyframes_index(s);
  165. return st;
  166. }
  167. static int flv_same_audio_codec(AVCodecParameters *apar, int flags)
  168. {
  169. int bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
  170. int flv_codecid = flags & FLV_AUDIO_CODECID_MASK;
  171. int codec_id;
  172. if (!apar->codec_id && !apar->codec_tag)
  173. return 1;
  174. if (apar->bits_per_coded_sample != bits_per_coded_sample)
  175. return 0;
  176. switch (flv_codecid) {
  177. // no distinction between S16 and S8 PCM codec flags
  178. case FLV_CODECID_PCM:
  179. codec_id = bits_per_coded_sample == 8
  180. ? AV_CODEC_ID_PCM_U8
  181. #if HAVE_BIGENDIAN
  182. : AV_CODEC_ID_PCM_S16BE;
  183. #else
  184. : AV_CODEC_ID_PCM_S16LE;
  185. #endif
  186. return codec_id == apar->codec_id;
  187. case FLV_CODECID_PCM_LE:
  188. codec_id = bits_per_coded_sample == 8
  189. ? AV_CODEC_ID_PCM_U8
  190. : AV_CODEC_ID_PCM_S16LE;
  191. return codec_id == apar->codec_id;
  192. case FLV_CODECID_AAC:
  193. return apar->codec_id == AV_CODEC_ID_AAC;
  194. case FLV_CODECID_ADPCM:
  195. return apar->codec_id == AV_CODEC_ID_ADPCM_SWF;
  196. case FLV_CODECID_SPEEX:
  197. return apar->codec_id == AV_CODEC_ID_SPEEX;
  198. case FLV_CODECID_MP3:
  199. return apar->codec_id == AV_CODEC_ID_MP3;
  200. case FLV_CODECID_NELLYMOSER_8KHZ_MONO:
  201. case FLV_CODECID_NELLYMOSER_16KHZ_MONO:
  202. case FLV_CODECID_NELLYMOSER:
  203. return apar->codec_id == AV_CODEC_ID_NELLYMOSER;
  204. case FLV_CODECID_PCM_MULAW:
  205. return apar->sample_rate == 8000 &&
  206. apar->codec_id == AV_CODEC_ID_PCM_MULAW;
  207. case FLV_CODECID_PCM_ALAW:
  208. return apar->sample_rate == 8000 &&
  209. apar->codec_id == AV_CODEC_ID_PCM_ALAW;
  210. default:
  211. return apar->codec_tag == (flv_codecid >> FLV_AUDIO_CODECID_OFFSET);
  212. }
  213. }
  214. static void flv_set_audio_codec(AVFormatContext *s, AVStream *astream,
  215. AVCodecParameters *apar, int flv_codecid)
  216. {
  217. switch (flv_codecid) {
  218. // no distinction between S16 and S8 PCM codec flags
  219. case FLV_CODECID_PCM:
  220. apar->codec_id = apar->bits_per_coded_sample == 8
  221. ? AV_CODEC_ID_PCM_U8
  222. #if HAVE_BIGENDIAN
  223. : AV_CODEC_ID_PCM_S16BE;
  224. #else
  225. : AV_CODEC_ID_PCM_S16LE;
  226. #endif
  227. break;
  228. case FLV_CODECID_PCM_LE:
  229. apar->codec_id = apar->bits_per_coded_sample == 8
  230. ? AV_CODEC_ID_PCM_U8
  231. : AV_CODEC_ID_PCM_S16LE;
  232. break;
  233. case FLV_CODECID_AAC:
  234. apar->codec_id = AV_CODEC_ID_AAC;
  235. break;
  236. case FLV_CODECID_ADPCM:
  237. apar->codec_id = AV_CODEC_ID_ADPCM_SWF;
  238. break;
  239. case FLV_CODECID_SPEEX:
  240. apar->codec_id = AV_CODEC_ID_SPEEX;
  241. apar->sample_rate = 16000;
  242. break;
  243. case FLV_CODECID_MP3:
  244. apar->codec_id = AV_CODEC_ID_MP3;
  245. astream->need_parsing = AVSTREAM_PARSE_FULL;
  246. break;
  247. case FLV_CODECID_NELLYMOSER_8KHZ_MONO:
  248. // in case metadata does not otherwise declare samplerate
  249. apar->sample_rate = 8000;
  250. apar->codec_id = AV_CODEC_ID_NELLYMOSER;
  251. break;
  252. case FLV_CODECID_NELLYMOSER_16KHZ_MONO:
  253. apar->sample_rate = 16000;
  254. apar->codec_id = AV_CODEC_ID_NELLYMOSER;
  255. break;
  256. case FLV_CODECID_NELLYMOSER:
  257. apar->codec_id = AV_CODEC_ID_NELLYMOSER;
  258. break;
  259. case FLV_CODECID_PCM_MULAW:
  260. apar->sample_rate = 8000;
  261. apar->codec_id = AV_CODEC_ID_PCM_MULAW;
  262. break;
  263. case FLV_CODECID_PCM_ALAW:
  264. apar->sample_rate = 8000;
  265. apar->codec_id = AV_CODEC_ID_PCM_ALAW;
  266. break;
  267. default:
  268. avpriv_request_sample(s, "Audio codec (%x)",
  269. flv_codecid >> FLV_AUDIO_CODECID_OFFSET);
  270. apar->codec_tag = flv_codecid >> FLV_AUDIO_CODECID_OFFSET;
  271. }
  272. }
  273. static int flv_same_video_codec(AVCodecParameters *vpar, int flags)
  274. {
  275. int flv_codecid = flags & FLV_VIDEO_CODECID_MASK;
  276. if (!vpar->codec_id && !vpar->codec_tag)
  277. return 1;
  278. switch (flv_codecid) {
  279. case FLV_CODECID_H263:
  280. return vpar->codec_id == AV_CODEC_ID_FLV1;
  281. case FLV_CODECID_SCREEN:
  282. return vpar->codec_id == AV_CODEC_ID_FLASHSV;
  283. case FLV_CODECID_SCREEN2:
  284. return vpar->codec_id == AV_CODEC_ID_FLASHSV2;
  285. case FLV_CODECID_VP6:
  286. return vpar->codec_id == AV_CODEC_ID_VP6F;
  287. case FLV_CODECID_VP6A:
  288. return vpar->codec_id == AV_CODEC_ID_VP6A;
  289. case FLV_CODECID_H264:
  290. return vpar->codec_id == AV_CODEC_ID_H264;
  291. default:
  292. return vpar->codec_tag == flv_codecid;
  293. }
  294. }
  295. static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream,
  296. int flv_codecid, int read)
  297. {
  298. int ret = 0;
  299. AVCodecParameters *par = vstream->codecpar;
  300. enum AVCodecID old_codec_id = vstream->codecpar->codec_id;
  301. switch (flv_codecid) {
  302. case FLV_CODECID_H263:
  303. par->codec_id = AV_CODEC_ID_FLV1;
  304. break;
  305. case FLV_CODECID_REALH263:
  306. par->codec_id = AV_CODEC_ID_H263;
  307. break; // Really mean it this time
  308. case FLV_CODECID_SCREEN:
  309. par->codec_id = AV_CODEC_ID_FLASHSV;
  310. break;
  311. case FLV_CODECID_SCREEN2:
  312. par->codec_id = AV_CODEC_ID_FLASHSV2;
  313. break;
  314. case FLV_CODECID_VP6:
  315. par->codec_id = AV_CODEC_ID_VP6F;
  316. case FLV_CODECID_VP6A:
  317. if (flv_codecid == FLV_CODECID_VP6A)
  318. par->codec_id = AV_CODEC_ID_VP6A;
  319. if (read) {
  320. if (par->extradata_size != 1) {
  321. ff_alloc_extradata(par, 1);
  322. }
  323. if (par->extradata)
  324. par->extradata[0] = avio_r8(s->pb);
  325. else
  326. avio_skip(s->pb, 1);
  327. }
  328. ret = 1; // 1 byte body size adjustment for flv_read_packet()
  329. break;
  330. case FLV_CODECID_H264:
  331. par->codec_id = AV_CODEC_ID_H264;
  332. vstream->need_parsing = AVSTREAM_PARSE_HEADERS;
  333. ret = 3; // not 4, reading packet type will consume one byte
  334. break;
  335. case FLV_CODECID_MPEG4:
  336. par->codec_id = AV_CODEC_ID_MPEG4;
  337. ret = 3;
  338. break;
  339. default:
  340. avpriv_request_sample(s, "Video codec (%x)", flv_codecid);
  341. par->codec_tag = flv_codecid;
  342. }
  343. if (!vstream->internal->need_context_update && par->codec_id != old_codec_id) {
  344. avpriv_request_sample(s, "Changing the codec id midstream");
  345. return AVERROR_PATCHWELCOME;
  346. }
  347. return ret;
  348. }
  349. static int amf_get_string(AVIOContext *ioc, char *buffer, int buffsize)
  350. {
  351. int ret;
  352. int length = avio_rb16(ioc);
  353. if (length >= buffsize) {
  354. avio_skip(ioc, length);
  355. return -1;
  356. }
  357. ret = avio_read(ioc, buffer, length);
  358. if (ret < 0)
  359. return ret;
  360. if (ret < length)
  361. return AVERROR_INVALIDDATA;
  362. buffer[length] = '\0';
  363. return length;
  364. }
  365. static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc, int64_t max_pos)
  366. {
  367. FLVContext *flv = s->priv_data;
  368. unsigned int timeslen = 0, fileposlen = 0, i;
  369. char str_val[256];
  370. int64_t *times = NULL;
  371. int64_t *filepositions = NULL;
  372. int ret = AVERROR(ENOSYS);
  373. int64_t initial_pos = avio_tell(ioc);
  374. if (flv->keyframe_count > 0) {
  375. av_log(s, AV_LOG_DEBUG, "keyframes have been parsed\n");
  376. return 0;
  377. }
  378. av_assert0(!flv->keyframe_times);
  379. av_assert0(!flv->keyframe_filepositions);
  380. if (s->flags & AVFMT_FLAG_IGNIDX)
  381. return 0;
  382. while (avio_tell(ioc) < max_pos - 2 &&
  383. amf_get_string(ioc, str_val, sizeof(str_val)) > 0) {
  384. int64_t **current_array;
  385. unsigned int arraylen;
  386. // Expect array object in context
  387. if (avio_r8(ioc) != AMF_DATA_TYPE_ARRAY)
  388. break;
  389. arraylen = avio_rb32(ioc);
  390. if (arraylen>>28)
  391. break;
  392. if (!strcmp(KEYFRAMES_TIMESTAMP_TAG , str_val) && !times) {
  393. current_array = &times;
  394. timeslen = arraylen;
  395. } else if (!strcmp(KEYFRAMES_BYTEOFFSET_TAG, str_val) &&
  396. !filepositions) {
  397. current_array = &filepositions;
  398. fileposlen = arraylen;
  399. } else
  400. // unexpected metatag inside keyframes, will not use such
  401. // metadata for indexing
  402. break;
  403. if (!(*current_array = av_mallocz(sizeof(**current_array) * arraylen))) {
  404. ret = AVERROR(ENOMEM);
  405. goto finish;
  406. }
  407. for (i = 0; i < arraylen && avio_tell(ioc) < max_pos - 1; i++) {
  408. double d;
  409. if (avio_r8(ioc) != AMF_DATA_TYPE_NUMBER)
  410. goto invalid;
  411. d = av_int2double(avio_rb64(ioc));
  412. if (isnan(d) || d < INT64_MIN || d > INT64_MAX)
  413. goto invalid;
  414. current_array[0][i] = d;
  415. }
  416. if (times && filepositions) {
  417. // All done, exiting at a position allowing amf_parse_object
  418. // to finish parsing the object
  419. ret = 0;
  420. break;
  421. }
  422. }
  423. if (timeslen == fileposlen && fileposlen>1 && max_pos <= filepositions[0]) {
  424. for (i = 0; i < FFMIN(2,fileposlen); i++) {
  425. flv->validate_index[i].pos = filepositions[i];
  426. flv->validate_index[i].dts = times[i] * 1000;
  427. flv->validate_count = i + 1;
  428. }
  429. flv->keyframe_times = times;
  430. flv->keyframe_filepositions = filepositions;
  431. flv->keyframe_count = timeslen;
  432. times = NULL;
  433. filepositions = NULL;
  434. } else {
  435. invalid:
  436. av_log(s, AV_LOG_WARNING, "Invalid keyframes object, skipping.\n");
  437. }
  438. finish:
  439. av_freep(&times);
  440. av_freep(&filepositions);
  441. avio_seek(ioc, initial_pos, SEEK_SET);
  442. return ret;
  443. }
  444. static int amf_parse_object(AVFormatContext *s, AVStream *astream,
  445. AVStream *vstream, const char *key,
  446. int64_t max_pos, int depth)
  447. {
  448. AVCodecParameters *apar, *vpar;
  449. FLVContext *flv = s->priv_data;
  450. AVIOContext *ioc;
  451. AMFDataType amf_type;
  452. char str_val[1024];
  453. double num_val;
  454. amf_date date;
  455. if (depth > MAX_DEPTH)
  456. return AVERROR_PATCHWELCOME;
  457. num_val = 0;
  458. ioc = s->pb;
  459. if (avio_feof(ioc))
  460. return AVERROR_EOF;
  461. amf_type = avio_r8(ioc);
  462. switch (amf_type) {
  463. case AMF_DATA_TYPE_NUMBER:
  464. num_val = av_int2double(avio_rb64(ioc));
  465. break;
  466. case AMF_DATA_TYPE_BOOL:
  467. num_val = avio_r8(ioc);
  468. break;
  469. case AMF_DATA_TYPE_STRING:
  470. if (amf_get_string(ioc, str_val, sizeof(str_val)) < 0) {
  471. av_log(s, AV_LOG_ERROR, "AMF_DATA_TYPE_STRING parsing failed\n");
  472. return -1;
  473. }
  474. break;
  475. case AMF_DATA_TYPE_OBJECT:
  476. if (key &&
  477. (ioc->seekable & AVIO_SEEKABLE_NORMAL) &&
  478. !strcmp(KEYFRAMES_TAG, key) && depth == 1)
  479. if (parse_keyframes_index(s, ioc, max_pos) < 0)
  480. av_log(s, AV_LOG_ERROR, "Keyframe index parsing failed\n");
  481. else
  482. add_keyframes_index(s);
  483. while (avio_tell(ioc) < max_pos - 2 &&
  484. amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
  485. if (amf_parse_object(s, astream, vstream, str_val, max_pos,
  486. depth + 1) < 0)
  487. return -1; // if we couldn't skip, bomb out.
  488. if (avio_r8(ioc) != AMF_END_OF_OBJECT) {
  489. av_log(s, AV_LOG_ERROR, "Missing AMF_END_OF_OBJECT in AMF_DATA_TYPE_OBJECT\n");
  490. return -1;
  491. }
  492. break;
  493. case AMF_DATA_TYPE_NULL:
  494. case AMF_DATA_TYPE_UNDEFINED:
  495. case AMF_DATA_TYPE_UNSUPPORTED:
  496. break; // these take up no additional space
  497. case AMF_DATA_TYPE_MIXEDARRAY:
  498. {
  499. unsigned v;
  500. avio_skip(ioc, 4); // skip 32-bit max array index
  501. while (avio_tell(ioc) < max_pos - 2 &&
  502. amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
  503. // this is the only case in which we would want a nested
  504. // parse to not skip over the object
  505. if (amf_parse_object(s, astream, vstream, str_val, max_pos,
  506. depth + 1) < 0)
  507. return -1;
  508. v = avio_r8(ioc);
  509. if (v != AMF_END_OF_OBJECT) {
  510. av_log(s, AV_LOG_ERROR, "Missing AMF_END_OF_OBJECT in AMF_DATA_TYPE_MIXEDARRAY, found %d\n", v);
  511. return -1;
  512. }
  513. break;
  514. }
  515. case AMF_DATA_TYPE_ARRAY:
  516. {
  517. unsigned int arraylen, i;
  518. arraylen = avio_rb32(ioc);
  519. for (i = 0; i < arraylen && avio_tell(ioc) < max_pos - 1; i++)
  520. if (amf_parse_object(s, NULL, NULL, NULL, max_pos,
  521. depth + 1) < 0)
  522. return -1; // if we couldn't skip, bomb out.
  523. }
  524. break;
  525. case AMF_DATA_TYPE_DATE:
  526. // timestamp (double) and UTC offset (int16)
  527. date.milliseconds = av_int2double(avio_rb64(ioc));
  528. date.timezone = avio_rb16(ioc);
  529. break;
  530. default: // unsupported type, we couldn't skip
  531. av_log(s, AV_LOG_ERROR, "unsupported amf type %d\n", amf_type);
  532. return -1;
  533. }
  534. if (key) {
  535. apar = astream ? astream->codecpar : NULL;
  536. vpar = vstream ? vstream->codecpar : NULL;
  537. // stream info doesn't live any deeper than the first object
  538. if (depth == 1) {
  539. if (amf_type == AMF_DATA_TYPE_NUMBER ||
  540. amf_type == AMF_DATA_TYPE_BOOL) {
  541. if (!strcmp(key, "duration"))
  542. s->duration = num_val * AV_TIME_BASE;
  543. else if (!strcmp(key, "videodatarate") &&
  544. 0 <= (int)(num_val * 1024.0))
  545. flv->video_bit_rate = num_val * 1024.0;
  546. else if (!strcmp(key, "audiodatarate") &&
  547. 0 <= (int)(num_val * 1024.0))
  548. flv->audio_bit_rate = num_val * 1024.0;
  549. else if (!strcmp(key, "datastream")) {
  550. AVStream *st = create_stream(s, AVMEDIA_TYPE_SUBTITLE);
  551. if (!st)
  552. return AVERROR(ENOMEM);
  553. st->codecpar->codec_id = AV_CODEC_ID_TEXT;
  554. } else if (!strcmp(key, "framerate")) {
  555. flv->framerate = av_d2q(num_val, 1000);
  556. if (vstream)
  557. vstream->avg_frame_rate = flv->framerate;
  558. } else if (flv->trust_metadata) {
  559. if (!strcmp(key, "videocodecid") && vpar) {
  560. int ret = flv_set_video_codec(s, vstream, num_val, 0);
  561. if (ret < 0)
  562. return ret;
  563. } else if (!strcmp(key, "audiocodecid") && apar) {
  564. int id = ((int)num_val) << FLV_AUDIO_CODECID_OFFSET;
  565. flv_set_audio_codec(s, astream, apar, id);
  566. } else if (!strcmp(key, "audiosamplerate") && apar) {
  567. apar->sample_rate = num_val;
  568. } else if (!strcmp(key, "audiosamplesize") && apar) {
  569. apar->bits_per_coded_sample = num_val;
  570. } else if (!strcmp(key, "stereo") && apar) {
  571. apar->channels = num_val + 1;
  572. apar->channel_layout = apar->channels == 2 ?
  573. AV_CH_LAYOUT_STEREO :
  574. AV_CH_LAYOUT_MONO;
  575. } else if (!strcmp(key, "width") && vpar) {
  576. vpar->width = num_val;
  577. } else if (!strcmp(key, "height") && vpar) {
  578. vpar->height = num_val;
  579. }
  580. }
  581. }
  582. if (amf_type == AMF_DATA_TYPE_STRING) {
  583. if (!strcmp(key, "encoder")) {
  584. int version = -1;
  585. if (1 == sscanf(str_val, "Open Broadcaster Software v0.%d", &version)) {
  586. if (version > 0 && version <= 655)
  587. flv->broken_sizes = 1;
  588. }
  589. } else if (!strcmp(key, "metadatacreator")) {
  590. if ( !strcmp (str_val, "MEGA")
  591. || !strncmp(str_val, "FlixEngine", 10))
  592. flv->broken_sizes = 1;
  593. }
  594. }
  595. }
  596. if (amf_type == AMF_DATA_TYPE_OBJECT && s->nb_streams == 1 &&
  597. ((!apar && !strcmp(key, "audiocodecid")) ||
  598. (!vpar && !strcmp(key, "videocodecid"))))
  599. s->ctx_flags &= ~AVFMTCTX_NOHEADER; //If there is either audio/video missing, codecid will be an empty object
  600. if ((!strcmp(key, "duration") ||
  601. !strcmp(key, "filesize") ||
  602. !strcmp(key, "width") ||
  603. !strcmp(key, "height") ||
  604. !strcmp(key, "videodatarate") ||
  605. !strcmp(key, "framerate") ||
  606. !strcmp(key, "videocodecid") ||
  607. !strcmp(key, "audiodatarate") ||
  608. !strcmp(key, "audiosamplerate") ||
  609. !strcmp(key, "audiosamplesize") ||
  610. !strcmp(key, "stereo") ||
  611. !strcmp(key, "audiocodecid") ||
  612. !strcmp(key, "datastream")) && !flv->dump_full_metadata)
  613. return 0;
  614. s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
  615. if (amf_type == AMF_DATA_TYPE_BOOL) {
  616. av_strlcpy(str_val, num_val > 0 ? "true" : "false",
  617. sizeof(str_val));
  618. av_dict_set(&s->metadata, key, str_val, 0);
  619. } else if (amf_type == AMF_DATA_TYPE_NUMBER) {
  620. snprintf(str_val, sizeof(str_val), "%.f", num_val);
  621. av_dict_set(&s->metadata, key, str_val, 0);
  622. } else if (amf_type == AMF_DATA_TYPE_STRING) {
  623. av_dict_set(&s->metadata, key, str_val, 0);
  624. } else if (amf_type == AMF_DATA_TYPE_DATE) {
  625. time_t time;
  626. struct tm t;
  627. char datestr[128];
  628. time = date.milliseconds / 1000; // to seconds
  629. localtime_r(&time, &t);
  630. strftime(datestr, sizeof(datestr), "%a, %d %b %Y %H:%M:%S %z", &t);
  631. av_dict_set(&s->metadata, key, datestr, 0);
  632. }
  633. }
  634. return 0;
  635. }
  636. #define TYPE_ONTEXTDATA 1
  637. #define TYPE_ONCAPTION 2
  638. #define TYPE_ONCAPTIONINFO 3
  639. #define TYPE_UNKNOWN 9
  640. static int flv_read_metabody(AVFormatContext *s, int64_t next_pos)
  641. {
  642. FLVContext *flv = s->priv_data;
  643. AMFDataType type;
  644. AVStream *stream, *astream, *vstream;
  645. AVStream av_unused *dstream;
  646. AVIOContext *ioc;
  647. int i;
  648. char buffer[32];
  649. astream = NULL;
  650. vstream = NULL;
  651. dstream = NULL;
  652. ioc = s->pb;
  653. // first object needs to be "onMetaData" string
  654. type = avio_r8(ioc);
  655. if (type != AMF_DATA_TYPE_STRING ||
  656. amf_get_string(ioc, buffer, sizeof(buffer)) < 0)
  657. return TYPE_UNKNOWN;
  658. if (!strcmp(buffer, "onTextData"))
  659. return TYPE_ONTEXTDATA;
  660. if (!strcmp(buffer, "onCaption"))
  661. return TYPE_ONCAPTION;
  662. if (!strcmp(buffer, "onCaptionInfo"))
  663. return TYPE_ONCAPTIONINFO;
  664. if (strcmp(buffer, "onMetaData") && strcmp(buffer, "onCuePoint") && strcmp(buffer, "|RtmpSampleAccess")) {
  665. av_log(s, AV_LOG_DEBUG, "Unknown type %s\n", buffer);
  666. return TYPE_UNKNOWN;
  667. }
  668. // find the streams now so that amf_parse_object doesn't need to do
  669. // the lookup every time it is called.
  670. for (i = 0; i < s->nb_streams; i++) {
  671. stream = s->streams[i];
  672. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
  673. vstream = stream;
  674. flv->last_keyframe_stream_index = i;
  675. } else if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
  676. astream = stream;
  677. if (flv->last_keyframe_stream_index == -1)
  678. flv->last_keyframe_stream_index = i;
  679. } else if (stream->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
  680. dstream = stream;
  681. }
  682. // parse the second object (we want a mixed array)
  683. if (amf_parse_object(s, astream, vstream, buffer, next_pos, 0) < 0)
  684. return -1;
  685. return 0;
  686. }
  687. static int flv_read_header(AVFormatContext *s)
  688. {
  689. int flags;
  690. FLVContext *flv = s->priv_data;
  691. int offset;
  692. int pre_tag_size = 0;
  693. /* Actual FLV data at 0xe40000 in KUX file */
  694. if(!strcmp(s->iformat->name, "kux"))
  695. avio_skip(s->pb, 0xe40000);
  696. avio_skip(s->pb, 4);
  697. flags = avio_r8(s->pb);
  698. flv->missing_streams = flags & (FLV_HEADER_FLAG_HASVIDEO | FLV_HEADER_FLAG_HASAUDIO);
  699. s->ctx_flags |= AVFMTCTX_NOHEADER;
  700. offset = avio_rb32(s->pb);
  701. avio_seek(s->pb, offset, SEEK_SET);
  702. /* Annex E. The FLV File Format
  703. * E.3 TheFLVFileBody
  704. * Field Type Comment
  705. * PreviousTagSize0 UI32 Always 0
  706. * */
  707. pre_tag_size = avio_rb32(s->pb);
  708. if (pre_tag_size) {
  709. av_log(s, AV_LOG_WARNING, "Read FLV header error, input file is not a standard flv format, first PreviousTagSize0 always is 0\n");
  710. }
  711. s->start_time = 0;
  712. flv->sum_flv_tag_size = 0;
  713. flv->last_keyframe_stream_index = -1;
  714. return 0;
  715. }
  716. static int flv_read_close(AVFormatContext *s)
  717. {
  718. int i;
  719. FLVContext *flv = s->priv_data;
  720. for (i=0; i<FLV_STREAM_TYPE_NB; i++)
  721. av_freep(&flv->new_extradata[i]);
  722. av_freep(&flv->keyframe_times);
  723. av_freep(&flv->keyframe_filepositions);
  724. return 0;
  725. }
  726. static int flv_get_extradata(AVFormatContext *s, AVStream *st, int size)
  727. {
  728. int ret;
  729. if (!size)
  730. return 0;
  731. if ((ret = ff_get_extradata(s, st->codecpar, s->pb, size)) < 0)
  732. return ret;
  733. st->internal->need_context_update = 1;
  734. return 0;
  735. }
  736. static int flv_queue_extradata(FLVContext *flv, AVIOContext *pb, int stream,
  737. int size)
  738. {
  739. if (!size)
  740. return 0;
  741. av_free(flv->new_extradata[stream]);
  742. flv->new_extradata[stream] = av_mallocz(size +
  743. AV_INPUT_BUFFER_PADDING_SIZE);
  744. if (!flv->new_extradata[stream])
  745. return AVERROR(ENOMEM);
  746. flv->new_extradata_size[stream] = size;
  747. avio_read(pb, flv->new_extradata[stream], size);
  748. return 0;
  749. }
  750. static void clear_index_entries(AVFormatContext *s, int64_t pos)
  751. {
  752. int i, j, out;
  753. av_log(s, AV_LOG_WARNING,
  754. "Found invalid index entries, clearing the index.\n");
  755. for (i = 0; i < s->nb_streams; i++) {
  756. AVStream *st = s->streams[i];
  757. /* Remove all index entries that point to >= pos */
  758. out = 0;
  759. for (j = 0; j < st->internal->nb_index_entries; j++)
  760. if (st->internal->index_entries[j].pos < pos)
  761. st->internal->index_entries[out++] = st->internal->index_entries[j];
  762. st->internal->nb_index_entries = out;
  763. }
  764. }
  765. static int amf_skip_tag(AVIOContext *pb, AMFDataType type, int depth)
  766. {
  767. int nb = -1, ret, parse_name = 1;
  768. if (depth > MAX_DEPTH)
  769. return AVERROR_PATCHWELCOME;
  770. if (avio_feof(pb))
  771. return AVERROR_EOF;
  772. switch (type) {
  773. case AMF_DATA_TYPE_NUMBER:
  774. avio_skip(pb, 8);
  775. break;
  776. case AMF_DATA_TYPE_BOOL:
  777. avio_skip(pb, 1);
  778. break;
  779. case AMF_DATA_TYPE_STRING:
  780. avio_skip(pb, avio_rb16(pb));
  781. break;
  782. case AMF_DATA_TYPE_ARRAY:
  783. parse_name = 0;
  784. case AMF_DATA_TYPE_MIXEDARRAY:
  785. nb = avio_rb32(pb);
  786. if (nb < 0)
  787. return AVERROR_INVALIDDATA;
  788. case AMF_DATA_TYPE_OBJECT:
  789. while(!pb->eof_reached && (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY)) {
  790. if (parse_name) {
  791. int size = avio_rb16(pb);
  792. if (!size) {
  793. avio_skip(pb, 1);
  794. break;
  795. }
  796. avio_skip(pb, size);
  797. }
  798. if ((ret = amf_skip_tag(pb, avio_r8(pb), depth + 1)) < 0)
  799. return ret;
  800. }
  801. break;
  802. case AMF_DATA_TYPE_NULL:
  803. case AMF_DATA_TYPE_OBJECT_END:
  804. break;
  805. default:
  806. return AVERROR_INVALIDDATA;
  807. }
  808. return 0;
  809. }
  810. static int flv_data_packet(AVFormatContext *s, AVPacket *pkt,
  811. int64_t dts, int64_t next)
  812. {
  813. AVIOContext *pb = s->pb;
  814. AVStream *st = NULL;
  815. char buf[20];
  816. int ret = AVERROR_INVALIDDATA;
  817. int i, length = -1;
  818. int array = 0;
  819. switch (avio_r8(pb)) {
  820. case AMF_DATA_TYPE_ARRAY:
  821. array = 1;
  822. case AMF_DATA_TYPE_MIXEDARRAY:
  823. avio_seek(pb, 4, SEEK_CUR);
  824. case AMF_DATA_TYPE_OBJECT:
  825. break;
  826. default:
  827. goto skip;
  828. }
  829. while (array || (ret = amf_get_string(pb, buf, sizeof(buf))) > 0) {
  830. AMFDataType type = avio_r8(pb);
  831. if (type == AMF_DATA_TYPE_STRING && (array || !strcmp(buf, "text"))) {
  832. length = avio_rb16(pb);
  833. ret = av_get_packet(pb, pkt, length);
  834. if (ret < 0)
  835. goto skip;
  836. else
  837. break;
  838. } else {
  839. if ((ret = amf_skip_tag(pb, type, 0)) < 0)
  840. goto skip;
  841. }
  842. }
  843. if (length < 0) {
  844. ret = AVERROR_INVALIDDATA;
  845. goto skip;
  846. }
  847. for (i = 0; i < s->nb_streams; i++) {
  848. st = s->streams[i];
  849. if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
  850. break;
  851. }
  852. if (i == s->nb_streams) {
  853. st = create_stream(s, AVMEDIA_TYPE_SUBTITLE);
  854. if (!st)
  855. return AVERROR(ENOMEM);
  856. st->codecpar->codec_id = AV_CODEC_ID_TEXT;
  857. }
  858. pkt->dts = dts;
  859. pkt->pts = dts;
  860. pkt->size = ret;
  861. pkt->stream_index = st->index;
  862. pkt->flags |= AV_PKT_FLAG_KEY;
  863. skip:
  864. avio_seek(s->pb, next + 4, SEEK_SET);
  865. return ret;
  866. }
  867. static int resync(AVFormatContext *s)
  868. {
  869. FLVContext *flv = s->priv_data;
  870. int64_t i;
  871. int64_t pos = avio_tell(s->pb);
  872. for (i=0; !avio_feof(s->pb); i++) {
  873. int j = i & (RESYNC_BUFFER_SIZE-1);
  874. int j1 = j + RESYNC_BUFFER_SIZE;
  875. flv->resync_buffer[j ] =
  876. flv->resync_buffer[j1] = avio_r8(s->pb);
  877. if (i >= 8 && pos) {
  878. uint8_t *d = flv->resync_buffer + j1 - 8;
  879. if (d[0] == 'F' &&
  880. d[1] == 'L' &&
  881. d[2] == 'V' &&
  882. d[3] < 5 && d[5] == 0) {
  883. av_log(s, AV_LOG_WARNING, "Concatenated FLV detected, might fail to demux, decode and seek %"PRId64"\n", flv->last_ts);
  884. flv->time_offset = flv->last_ts + 1;
  885. flv->time_pos = avio_tell(s->pb);
  886. }
  887. }
  888. if (i > 22) {
  889. unsigned lsize2 = AV_RB32(flv->resync_buffer + j1 - 4);
  890. if (lsize2 >= 11 && lsize2 + 8LL < FFMIN(i, RESYNC_BUFFER_SIZE)) {
  891. unsigned size2 = AV_RB24(flv->resync_buffer + j1 - lsize2 + 1 - 4);
  892. unsigned lsize1 = AV_RB32(flv->resync_buffer + j1 - lsize2 - 8);
  893. if (lsize1 >= 11 && lsize1 + 8LL + lsize2 < FFMIN(i, RESYNC_BUFFER_SIZE)) {
  894. unsigned size1 = AV_RB24(flv->resync_buffer + j1 - lsize1 + 1 - lsize2 - 8);
  895. if (size1 == lsize1 - 11 && size2 == lsize2 - 11) {
  896. avio_seek(s->pb, pos + i - lsize1 - lsize2 - 8, SEEK_SET);
  897. return 1;
  898. }
  899. }
  900. }
  901. }
  902. }
  903. return AVERROR_EOF;
  904. }
  905. static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
  906. {
  907. FLVContext *flv = s->priv_data;
  908. int ret, i, size, flags;
  909. enum FlvTagType type;
  910. int stream_type=-1;
  911. int64_t next, pos, meta_pos;
  912. int64_t dts, pts = AV_NOPTS_VALUE;
  913. int av_uninit(channels);
  914. int av_uninit(sample_rate);
  915. AVStream *st = NULL;
  916. int last = -1;
  917. int orig_size;
  918. retry:
  919. /* pkt size is repeated at end. skip it */
  920. pos = avio_tell(s->pb);
  921. type = (avio_r8(s->pb) & 0x1F);
  922. orig_size =
  923. size = avio_rb24(s->pb);
  924. flv->sum_flv_tag_size += size + 11;
  925. dts = avio_rb24(s->pb);
  926. dts |= (unsigned)avio_r8(s->pb) << 24;
  927. av_log(s, AV_LOG_TRACE, "type:%d, size:%d, last:%d, dts:%"PRId64" pos:%"PRId64"\n", type, size, last, dts, avio_tell(s->pb));
  928. if (avio_feof(s->pb))
  929. return AVERROR_EOF;
  930. avio_skip(s->pb, 3); /* stream id, always 0 */
  931. flags = 0;
  932. if (flv->validate_next < flv->validate_count) {
  933. int64_t validate_pos = flv->validate_index[flv->validate_next].pos;
  934. if (pos == validate_pos) {
  935. if (FFABS(dts - flv->validate_index[flv->validate_next].dts) <=
  936. VALIDATE_INDEX_TS_THRESH) {
  937. flv->validate_next++;
  938. } else {
  939. clear_index_entries(s, validate_pos);
  940. flv->validate_count = 0;
  941. }
  942. } else if (pos > validate_pos) {
  943. clear_index_entries(s, validate_pos);
  944. flv->validate_count = 0;
  945. }
  946. }
  947. if (size == 0) {
  948. ret = FFERROR_REDO;
  949. goto leave;
  950. }
  951. next = size + avio_tell(s->pb);
  952. if (type == FLV_TAG_TYPE_AUDIO) {
  953. stream_type = FLV_STREAM_TYPE_AUDIO;
  954. flags = avio_r8(s->pb);
  955. size--;
  956. } else if (type == FLV_TAG_TYPE_VIDEO) {
  957. stream_type = FLV_STREAM_TYPE_VIDEO;
  958. flags = avio_r8(s->pb);
  959. size--;
  960. if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_VIDEO_INFO_CMD)
  961. goto skip;
  962. } else if (type == FLV_TAG_TYPE_META) {
  963. stream_type=FLV_STREAM_TYPE_SUBTITLE;
  964. if (size > 13 + 1 + 4) { // Header-type metadata stuff
  965. int type;
  966. meta_pos = avio_tell(s->pb);
  967. type = flv_read_metabody(s, next);
  968. if (type == 0 && dts == 0 || type < 0) {
  969. if (type < 0 && flv->validate_count &&
  970. flv->validate_index[0].pos > next &&
  971. flv->validate_index[0].pos - 4 < next) {
  972. av_log(s, AV_LOG_WARNING, "Adjusting next position due to index mismatch\n");
  973. next = flv->validate_index[0].pos - 4;
  974. }
  975. goto skip;
  976. } else if (type == TYPE_ONTEXTDATA) {
  977. avpriv_request_sample(s, "OnTextData packet");
  978. return flv_data_packet(s, pkt, dts, next);
  979. } else if (type == TYPE_ONCAPTION) {
  980. return flv_data_packet(s, pkt, dts, next);
  981. } else if (type == TYPE_UNKNOWN) {
  982. stream_type = FLV_STREAM_TYPE_DATA;
  983. }
  984. avio_seek(s->pb, meta_pos, SEEK_SET);
  985. }
  986. } else {
  987. av_log(s, AV_LOG_DEBUG,
  988. "Skipping flv packet: type %d, size %d, flags %d.\n",
  989. type, size, flags);
  990. skip:
  991. if (avio_seek(s->pb, next, SEEK_SET) != next) {
  992. // This can happen if flv_read_metabody above read past
  993. // next, on a non-seekable input, and the preceding data has
  994. // been flushed out from the IO buffer.
  995. av_log(s, AV_LOG_ERROR, "Unable to seek to the next packet\n");
  996. return AVERROR_INVALIDDATA;
  997. }
  998. ret = FFERROR_REDO;
  999. goto leave;
  1000. }
  1001. /* skip empty data packets */
  1002. if (!size) {
  1003. ret = FFERROR_REDO;
  1004. goto leave;
  1005. }
  1006. /* now find stream */
  1007. for (i = 0; i < s->nb_streams; i++) {
  1008. st = s->streams[i];
  1009. if (stream_type == FLV_STREAM_TYPE_AUDIO) {
  1010. if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
  1011. (s->audio_codec_id || flv_same_audio_codec(st->codecpar, flags)))
  1012. break;
  1013. } else if (stream_type == FLV_STREAM_TYPE_VIDEO) {
  1014. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
  1015. (s->video_codec_id || flv_same_video_codec(st->codecpar, flags)))
  1016. break;
  1017. } else if (stream_type == FLV_STREAM_TYPE_SUBTITLE) {
  1018. if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
  1019. break;
  1020. } else if (stream_type == FLV_STREAM_TYPE_DATA) {
  1021. if (st->codecpar->codec_type == AVMEDIA_TYPE_DATA)
  1022. break;
  1023. }
  1024. }
  1025. if (i == s->nb_streams) {
  1026. static const enum AVMediaType stream_types[] = {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_SUBTITLE, AVMEDIA_TYPE_DATA};
  1027. st = create_stream(s, stream_types[stream_type]);
  1028. if (!st)
  1029. return AVERROR(ENOMEM);
  1030. }
  1031. av_log(s, AV_LOG_TRACE, "%d %X %d \n", stream_type, flags, st->discard);
  1032. if (flv->time_pos <= pos) {
  1033. dts += flv->time_offset;
  1034. }
  1035. if ((s->pb->seekable & AVIO_SEEKABLE_NORMAL) &&
  1036. ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY ||
  1037. stream_type == FLV_STREAM_TYPE_AUDIO))
  1038. av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME);
  1039. if ((st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || stream_type == FLV_STREAM_TYPE_AUDIO)) ||
  1040. (st->discard >= AVDISCARD_BIDIR && ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && stream_type == FLV_STREAM_TYPE_VIDEO)) ||
  1041. st->discard >= AVDISCARD_ALL) {
  1042. avio_seek(s->pb, next, SEEK_SET);
  1043. ret = FFERROR_REDO;
  1044. goto leave;
  1045. }
  1046. // if not streamed and no duration from metadata then seek to end to find
  1047. // the duration from the timestamps
  1048. if ((s->pb->seekable & AVIO_SEEKABLE_NORMAL) &&
  1049. (!s->duration || s->duration == AV_NOPTS_VALUE) &&
  1050. !flv->searched_for_end) {
  1051. int size;
  1052. const int64_t pos = avio_tell(s->pb);
  1053. // Read the last 4 bytes of the file, this should be the size of the
  1054. // previous FLV tag. Use the timestamp of its payload as duration.
  1055. int64_t fsize = avio_size(s->pb);
  1056. retry_duration:
  1057. avio_seek(s->pb, fsize - 4, SEEK_SET);
  1058. size = avio_rb32(s->pb);
  1059. if (size > 0 && size < fsize) {
  1060. // Seek to the start of the last FLV tag at position (fsize - 4 - size)
  1061. // but skip the byte indicating the type.
  1062. avio_seek(s->pb, fsize - 3 - size, SEEK_SET);
  1063. if (size == avio_rb24(s->pb) + 11) {
  1064. uint32_t ts = avio_rb24(s->pb);
  1065. ts |= (unsigned)avio_r8(s->pb) << 24;
  1066. if (ts)
  1067. s->duration = ts * (int64_t)AV_TIME_BASE / 1000;
  1068. else if (fsize >= 8 && fsize - 8 >= size) {
  1069. fsize -= size+4;
  1070. goto retry_duration;
  1071. }
  1072. }
  1073. }
  1074. avio_seek(s->pb, pos, SEEK_SET);
  1075. flv->searched_for_end = 1;
  1076. }
  1077. if (stream_type == FLV_STREAM_TYPE_AUDIO) {
  1078. int bits_per_coded_sample;
  1079. channels = (flags & FLV_AUDIO_CHANNEL_MASK) == FLV_STEREO ? 2 : 1;
  1080. sample_rate = 44100 << ((flags & FLV_AUDIO_SAMPLERATE_MASK) >>
  1081. FLV_AUDIO_SAMPLERATE_OFFSET) >> 3;
  1082. bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
  1083. if (!st->codecpar->channels || !st->codecpar->sample_rate ||
  1084. !st->codecpar->bits_per_coded_sample) {
  1085. st->codecpar->channels = channels;
  1086. st->codecpar->channel_layout = channels == 1
  1087. ? AV_CH_LAYOUT_MONO
  1088. : AV_CH_LAYOUT_STEREO;
  1089. st->codecpar->sample_rate = sample_rate;
  1090. st->codecpar->bits_per_coded_sample = bits_per_coded_sample;
  1091. }
  1092. if (!st->codecpar->codec_id) {
  1093. flv_set_audio_codec(s, st, st->codecpar,
  1094. flags & FLV_AUDIO_CODECID_MASK);
  1095. flv->last_sample_rate =
  1096. sample_rate = st->codecpar->sample_rate;
  1097. flv->last_channels =
  1098. channels = st->codecpar->channels;
  1099. } else {
  1100. AVCodecParameters *par = avcodec_parameters_alloc();
  1101. if (!par) {
  1102. ret = AVERROR(ENOMEM);
  1103. goto leave;
  1104. }
  1105. par->sample_rate = sample_rate;
  1106. par->bits_per_coded_sample = bits_per_coded_sample;
  1107. flv_set_audio_codec(s, st, par, flags & FLV_AUDIO_CODECID_MASK);
  1108. sample_rate = par->sample_rate;
  1109. avcodec_parameters_free(&par);
  1110. }
  1111. } else if (stream_type == FLV_STREAM_TYPE_VIDEO) {
  1112. int ret = flv_set_video_codec(s, st, flags & FLV_VIDEO_CODECID_MASK, 1);
  1113. if (ret < 0)
  1114. return ret;
  1115. size -= ret;
  1116. } else if (stream_type == FLV_STREAM_TYPE_SUBTITLE) {
  1117. st->codecpar->codec_id = AV_CODEC_ID_TEXT;
  1118. } else if (stream_type == FLV_STREAM_TYPE_DATA) {
  1119. st->codecpar->codec_id = AV_CODEC_ID_NONE; // Opaque AMF data
  1120. }
  1121. if (st->codecpar->codec_id == AV_CODEC_ID_AAC ||
  1122. st->codecpar->codec_id == AV_CODEC_ID_H264 ||
  1123. st->codecpar->codec_id == AV_CODEC_ID_MPEG4) {
  1124. int type = avio_r8(s->pb);
  1125. size--;
  1126. if (size < 0) {
  1127. ret = AVERROR_INVALIDDATA;
  1128. goto leave;
  1129. }
  1130. if (st->codecpar->codec_id == AV_CODEC_ID_H264 || st->codecpar->codec_id == AV_CODEC_ID_MPEG4) {
  1131. // sign extension
  1132. int32_t cts = (avio_rb24(s->pb) + 0xff800000) ^ 0xff800000;
  1133. pts = av_sat_add64(dts, cts);
  1134. if (cts < 0) { // dts might be wrong
  1135. if (!flv->wrong_dts)
  1136. av_log(s, AV_LOG_WARNING,
  1137. "Negative cts, previous timestamps might be wrong.\n");
  1138. flv->wrong_dts = 1;
  1139. } else if (FFABS(dts - pts) > 1000*60*15) {
  1140. av_log(s, AV_LOG_WARNING,
  1141. "invalid timestamps %"PRId64" %"PRId64"\n", dts, pts);
  1142. dts = pts = AV_NOPTS_VALUE;
  1143. }
  1144. }
  1145. if (type == 0 && (!st->codecpar->extradata || st->codecpar->codec_id == AV_CODEC_ID_AAC ||
  1146. st->codecpar->codec_id == AV_CODEC_ID_H264)) {
  1147. AVDictionaryEntry *t;
  1148. if (st->codecpar->extradata) {
  1149. if ((ret = flv_queue_extradata(flv, s->pb, stream_type, size)) < 0)
  1150. return ret;
  1151. ret = FFERROR_REDO;
  1152. goto leave;
  1153. }
  1154. if ((ret = flv_get_extradata(s, st, size)) < 0)
  1155. return ret;
  1156. /* Workaround for buggy Omnia A/XE encoder */
  1157. t = av_dict_get(s->metadata, "Encoder", NULL, 0);
  1158. if (st->codecpar->codec_id == AV_CODEC_ID_AAC && t && !strcmp(t->value, "Omnia A/XE"))
  1159. st->codecpar->extradata_size = 2;
  1160. ret = FFERROR_REDO;
  1161. goto leave;
  1162. }
  1163. }
  1164. /* skip empty data packets */
  1165. if (!size) {
  1166. ret = FFERROR_REDO;
  1167. goto leave;
  1168. }
  1169. ret = av_get_packet(s->pb, pkt, size);
  1170. if (ret < 0)
  1171. return ret;
  1172. pkt->dts = dts;
  1173. pkt->pts = pts == AV_NOPTS_VALUE ? dts : pts;
  1174. pkt->stream_index = st->index;
  1175. pkt->pos = pos;
  1176. if (flv->new_extradata[stream_type]) {
  1177. int ret = av_packet_add_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA,
  1178. flv->new_extradata[stream_type],
  1179. flv->new_extradata_size[stream_type]);
  1180. if (ret >= 0) {
  1181. flv->new_extradata[stream_type] = NULL;
  1182. flv->new_extradata_size[stream_type] = 0;
  1183. }
  1184. }
  1185. if (stream_type == FLV_STREAM_TYPE_AUDIO &&
  1186. (sample_rate != flv->last_sample_rate ||
  1187. channels != flv->last_channels)) {
  1188. flv->last_sample_rate = sample_rate;
  1189. flv->last_channels = channels;
  1190. ff_add_param_change(pkt, channels, 0, sample_rate, 0, 0);
  1191. }
  1192. if (stream_type == FLV_STREAM_TYPE_AUDIO ||
  1193. (flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY ||
  1194. stream_type == FLV_STREAM_TYPE_SUBTITLE ||
  1195. stream_type == FLV_STREAM_TYPE_DATA)
  1196. pkt->flags |= AV_PKT_FLAG_KEY;
  1197. leave:
  1198. last = avio_rb32(s->pb);
  1199. if (!flv->trust_datasize) {
  1200. if (last != orig_size + 11 && last != orig_size + 10 &&
  1201. !avio_feof(s->pb) &&
  1202. (last != orig_size || !last) && last != flv->sum_flv_tag_size &&
  1203. !flv->broken_sizes) {
  1204. av_log(s, AV_LOG_ERROR, "Packet mismatch %d %d %d\n", last, orig_size + 11, flv->sum_flv_tag_size);
  1205. avio_seek(s->pb, pos + 1, SEEK_SET);
  1206. ret = resync(s);
  1207. av_packet_unref(pkt);
  1208. if (ret >= 0) {
  1209. goto retry;
  1210. }
  1211. }
  1212. }
  1213. if (ret >= 0)
  1214. flv->last_ts = pkt->dts;
  1215. return ret;
  1216. }
  1217. static int flv_read_seek(AVFormatContext *s, int stream_index,
  1218. int64_t ts, int flags)
  1219. {
  1220. FLVContext *flv = s->priv_data;
  1221. flv->validate_count = 0;
  1222. return avio_seek_time(s->pb, stream_index, ts, flags);
  1223. }
  1224. #define OFFSET(x) offsetof(FLVContext, x)
  1225. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  1226. static const AVOption options[] = {
  1227. { "flv_metadata", "Allocate streams according to the onMetaData array", OFFSET(trust_metadata), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
  1228. { "flv_full_metadata", "Dump full metadata of the onMetadata", OFFSET(dump_full_metadata), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
  1229. { "flv_ignore_prevtag", "Ignore the Size of previous tag", OFFSET(trust_datasize), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
  1230. { "missing_streams", "", OFFSET(missing_streams), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 0xFF, VD | AV_OPT_FLAG_EXPORT | AV_OPT_FLAG_READONLY },
  1231. { NULL }
  1232. };
  1233. static const AVClass flv_class = {
  1234. .class_name = "flvdec",
  1235. .item_name = av_default_item_name,
  1236. .option = options,
  1237. .version = LIBAVUTIL_VERSION_INT,
  1238. };
  1239. AVInputFormat ff_flv_demuxer = {
  1240. .name = "flv",
  1241. .long_name = NULL_IF_CONFIG_SMALL("FLV (Flash Video)"),
  1242. .priv_data_size = sizeof(FLVContext),
  1243. .read_probe = flv_probe,
  1244. .read_header = flv_read_header,
  1245. .read_packet = flv_read_packet,
  1246. .read_seek = flv_read_seek,
  1247. .read_close = flv_read_close,
  1248. .extensions = "flv",
  1249. .priv_class = &flv_class,
  1250. };
  1251. static const AVClass live_flv_class = {
  1252. .class_name = "live_flvdec",
  1253. .item_name = av_default_item_name,
  1254. .option = options,
  1255. .version = LIBAVUTIL_VERSION_INT,
  1256. };
  1257. AVInputFormat ff_live_flv_demuxer = {
  1258. .name = "live_flv",
  1259. .long_name = NULL_IF_CONFIG_SMALL("live RTMP FLV (Flash Video)"),
  1260. .priv_data_size = sizeof(FLVContext),
  1261. .read_probe = live_flv_probe,
  1262. .read_header = flv_read_header,
  1263. .read_packet = flv_read_packet,
  1264. .read_seek = flv_read_seek,
  1265. .read_close = flv_read_close,
  1266. .extensions = "flv",
  1267. .priv_class = &live_flv_class,
  1268. .flags = AVFMT_TS_DISCONT
  1269. };
  1270. static const AVClass kux_class = {
  1271. .class_name = "kuxdec",
  1272. .item_name = av_default_item_name,
  1273. .option = options,
  1274. .version = LIBAVUTIL_VERSION_INT,
  1275. };
  1276. AVInputFormat ff_kux_demuxer = {
  1277. .name = "kux",
  1278. .long_name = NULL_IF_CONFIG_SMALL("KUX (YouKu)"),
  1279. .priv_data_size = sizeof(FLVContext),
  1280. .read_probe = kux_probe,
  1281. .read_header = flv_read_header,
  1282. .read_packet = flv_read_packet,
  1283. .read_seek = flv_read_seek,
  1284. .read_close = flv_read_close,
  1285. .extensions = "kux",
  1286. .priv_class = &kux_class,
  1287. };