You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1151 lines
39KB

  1. /*
  2. * FLV demuxer
  3. * Copyright (c) 2003 The FFmpeg Project
  4. *
  5. * This demuxer will generate a 1 byte extradata for VP6F content.
  6. * It is composed of:
  7. * - upper 4bits: difference between encoded width and visible width
  8. * - lower 4bits: difference between encoded height and visible height
  9. *
  10. * This file is part of FFmpeg.
  11. *
  12. * FFmpeg is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * FFmpeg is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with FFmpeg; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/dict.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/intfloat.h"
  31. #include "libavutil/mathematics.h"
  32. #include "libavcodec/bytestream.h"
  33. #include "libavcodec/mpeg4audio.h"
  34. #include "avformat.h"
  35. #include "internal.h"
  36. #include "avio_internal.h"
  37. #include "flv.h"
  38. #define VALIDATE_INDEX_TS_THRESH 2500
  39. typedef struct FLVContext {
  40. const AVClass *class; ///< Class for private options.
  41. int trust_metadata; ///< configure streams according onMetaData
  42. int wrong_dts; ///< wrong dts due to negative cts
  43. uint8_t *new_extradata[FLV_STREAM_TYPE_NB];
  44. int new_extradata_size[FLV_STREAM_TYPE_NB];
  45. int last_sample_rate;
  46. int last_channels;
  47. struct {
  48. int64_t dts;
  49. int64_t pos;
  50. } validate_index[2];
  51. int validate_next;
  52. int validate_count;
  53. int searched_for_end;
  54. } FLVContext;
  55. static int probe(AVProbeData *p, int live)
  56. {
  57. const uint8_t *d = p->buf;
  58. unsigned offset = AV_RB32(d + 5);
  59. if (d[0] == 'F' &&
  60. d[1] == 'L' &&
  61. d[2] == 'V' &&
  62. d[3] < 5 && d[5] == 0 &&
  63. offset + 100 < p->buf_size &&
  64. offset > 8) {
  65. int is_live = !memcmp(d + offset + 40, "NGINX RTMP", 10);
  66. if (live == is_live)
  67. return AVPROBE_SCORE_MAX;
  68. }
  69. return 0;
  70. }
  71. static int flv_probe(AVProbeData *p)
  72. {
  73. return probe(p, 0);
  74. }
  75. static int live_flv_probe(AVProbeData *p)
  76. {
  77. return probe(p, 1);
  78. }
  79. static AVStream *create_stream(AVFormatContext *s, int codec_type)
  80. {
  81. AVStream *st = avformat_new_stream(s, NULL);
  82. if (!st)
  83. return NULL;
  84. st->codec->codec_type = codec_type;
  85. if (s->nb_streams>=3 ||( s->nb_streams==2
  86. && s->streams[0]->codec->codec_type != AVMEDIA_TYPE_SUBTITLE
  87. && s->streams[1]->codec->codec_type != AVMEDIA_TYPE_SUBTITLE))
  88. s->ctx_flags &= ~AVFMTCTX_NOHEADER;
  89. avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
  90. return st;
  91. }
  92. static int flv_same_audio_codec(AVCodecContext *acodec, int flags)
  93. {
  94. int bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
  95. int flv_codecid = flags & FLV_AUDIO_CODECID_MASK;
  96. int codec_id;
  97. if (!acodec->codec_id && !acodec->codec_tag)
  98. return 1;
  99. if (acodec->bits_per_coded_sample != bits_per_coded_sample)
  100. return 0;
  101. switch (flv_codecid) {
  102. // no distinction between S16 and S8 PCM codec flags
  103. case FLV_CODECID_PCM:
  104. codec_id = bits_per_coded_sample == 8
  105. ? AV_CODEC_ID_PCM_U8
  106. #if HAVE_BIGENDIAN
  107. : AV_CODEC_ID_PCM_S16BE;
  108. #else
  109. : AV_CODEC_ID_PCM_S16LE;
  110. #endif
  111. return codec_id == acodec->codec_id;
  112. case FLV_CODECID_PCM_LE:
  113. codec_id = bits_per_coded_sample == 8
  114. ? AV_CODEC_ID_PCM_U8
  115. : AV_CODEC_ID_PCM_S16LE;
  116. return codec_id == acodec->codec_id;
  117. case FLV_CODECID_AAC:
  118. return acodec->codec_id == AV_CODEC_ID_AAC;
  119. case FLV_CODECID_ADPCM:
  120. return acodec->codec_id == AV_CODEC_ID_ADPCM_SWF;
  121. case FLV_CODECID_SPEEX:
  122. return acodec->codec_id == AV_CODEC_ID_SPEEX;
  123. case FLV_CODECID_MP3:
  124. return acodec->codec_id == AV_CODEC_ID_MP3;
  125. case FLV_CODECID_NELLYMOSER_8KHZ_MONO:
  126. case FLV_CODECID_NELLYMOSER_16KHZ_MONO:
  127. case FLV_CODECID_NELLYMOSER:
  128. return acodec->codec_id == AV_CODEC_ID_NELLYMOSER;
  129. case FLV_CODECID_PCM_MULAW:
  130. return acodec->sample_rate == 8000 &&
  131. acodec->codec_id == AV_CODEC_ID_PCM_MULAW;
  132. case FLV_CODECID_PCM_ALAW:
  133. return acodec->sample_rate == 8000 &&
  134. acodec->codec_id == AV_CODEC_ID_PCM_ALAW;
  135. default:
  136. return acodec->codec_tag == (flv_codecid >> FLV_AUDIO_CODECID_OFFSET);
  137. }
  138. }
  139. static void flv_set_audio_codec(AVFormatContext *s, AVStream *astream,
  140. AVCodecContext *acodec, int flv_codecid)
  141. {
  142. switch (flv_codecid) {
  143. // no distinction between S16 and S8 PCM codec flags
  144. case FLV_CODECID_PCM:
  145. acodec->codec_id = acodec->bits_per_coded_sample == 8
  146. ? AV_CODEC_ID_PCM_U8
  147. #if HAVE_BIGENDIAN
  148. : AV_CODEC_ID_PCM_S16BE;
  149. #else
  150. : AV_CODEC_ID_PCM_S16LE;
  151. #endif
  152. break;
  153. case FLV_CODECID_PCM_LE:
  154. acodec->codec_id = acodec->bits_per_coded_sample == 8
  155. ? AV_CODEC_ID_PCM_U8
  156. : AV_CODEC_ID_PCM_S16LE;
  157. break;
  158. case FLV_CODECID_AAC:
  159. acodec->codec_id = AV_CODEC_ID_AAC;
  160. break;
  161. case FLV_CODECID_ADPCM:
  162. acodec->codec_id = AV_CODEC_ID_ADPCM_SWF;
  163. break;
  164. case FLV_CODECID_SPEEX:
  165. acodec->codec_id = AV_CODEC_ID_SPEEX;
  166. acodec->sample_rate = 16000;
  167. break;
  168. case FLV_CODECID_MP3:
  169. acodec->codec_id = AV_CODEC_ID_MP3;
  170. astream->need_parsing = AVSTREAM_PARSE_FULL;
  171. break;
  172. case FLV_CODECID_NELLYMOSER_8KHZ_MONO:
  173. // in case metadata does not otherwise declare samplerate
  174. acodec->sample_rate = 8000;
  175. acodec->codec_id = AV_CODEC_ID_NELLYMOSER;
  176. break;
  177. case FLV_CODECID_NELLYMOSER_16KHZ_MONO:
  178. acodec->sample_rate = 16000;
  179. acodec->codec_id = AV_CODEC_ID_NELLYMOSER;
  180. break;
  181. case FLV_CODECID_NELLYMOSER:
  182. acodec->codec_id = AV_CODEC_ID_NELLYMOSER;
  183. break;
  184. case FLV_CODECID_PCM_MULAW:
  185. acodec->sample_rate = 8000;
  186. acodec->codec_id = AV_CODEC_ID_PCM_MULAW;
  187. break;
  188. case FLV_CODECID_PCM_ALAW:
  189. acodec->sample_rate = 8000;
  190. acodec->codec_id = AV_CODEC_ID_PCM_ALAW;
  191. break;
  192. default:
  193. avpriv_request_sample(s, "Audio codec (%x)",
  194. flv_codecid >> FLV_AUDIO_CODECID_OFFSET);
  195. acodec->codec_tag = flv_codecid >> FLV_AUDIO_CODECID_OFFSET;
  196. }
  197. }
  198. static int flv_same_video_codec(AVCodecContext *vcodec, int flags)
  199. {
  200. int flv_codecid = flags & FLV_VIDEO_CODECID_MASK;
  201. if (!vcodec->codec_id && !vcodec->codec_tag)
  202. return 1;
  203. switch (flv_codecid) {
  204. case FLV_CODECID_H263:
  205. return vcodec->codec_id == AV_CODEC_ID_FLV1;
  206. case FLV_CODECID_SCREEN:
  207. return vcodec->codec_id == AV_CODEC_ID_FLASHSV;
  208. case FLV_CODECID_SCREEN2:
  209. return vcodec->codec_id == AV_CODEC_ID_FLASHSV2;
  210. case FLV_CODECID_VP6:
  211. return vcodec->codec_id == AV_CODEC_ID_VP6F;
  212. case FLV_CODECID_VP6A:
  213. return vcodec->codec_id == AV_CODEC_ID_VP6A;
  214. case FLV_CODECID_H264:
  215. return vcodec->codec_id == AV_CODEC_ID_H264;
  216. default:
  217. return vcodec->codec_tag == flv_codecid;
  218. }
  219. }
  220. static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream,
  221. int flv_codecid, int read)
  222. {
  223. AVCodecContext *vcodec = vstream->codec;
  224. switch (flv_codecid) {
  225. case FLV_CODECID_H263:
  226. vcodec->codec_id = AV_CODEC_ID_FLV1;
  227. break;
  228. case FLV_CODECID_REALH263:
  229. vcodec->codec_id = AV_CODEC_ID_H263;
  230. break; // Really mean it this time
  231. case FLV_CODECID_SCREEN:
  232. vcodec->codec_id = AV_CODEC_ID_FLASHSV;
  233. break;
  234. case FLV_CODECID_SCREEN2:
  235. vcodec->codec_id = AV_CODEC_ID_FLASHSV2;
  236. break;
  237. case FLV_CODECID_VP6:
  238. vcodec->codec_id = AV_CODEC_ID_VP6F;
  239. case FLV_CODECID_VP6A:
  240. if (flv_codecid == FLV_CODECID_VP6A)
  241. vcodec->codec_id = AV_CODEC_ID_VP6A;
  242. if (read) {
  243. if (vcodec->extradata_size != 1) {
  244. ff_alloc_extradata(vcodec, 1);
  245. }
  246. if (vcodec->extradata)
  247. vcodec->extradata[0] = avio_r8(s->pb);
  248. else
  249. avio_skip(s->pb, 1);
  250. }
  251. return 1; // 1 byte body size adjustment for flv_read_packet()
  252. case FLV_CODECID_H264:
  253. vcodec->codec_id = AV_CODEC_ID_H264;
  254. vstream->need_parsing = AVSTREAM_PARSE_HEADERS;
  255. return 3; // not 4, reading packet type will consume one byte
  256. case FLV_CODECID_MPEG4:
  257. vcodec->codec_id = AV_CODEC_ID_MPEG4;
  258. return 3;
  259. default:
  260. avpriv_request_sample(s, "Video codec (%x)", flv_codecid);
  261. vcodec->codec_tag = flv_codecid;
  262. }
  263. return 0;
  264. }
  265. static int amf_get_string(AVIOContext *ioc, char *buffer, int buffsize)
  266. {
  267. int length = avio_rb16(ioc);
  268. if (length >= buffsize) {
  269. avio_skip(ioc, length);
  270. return -1;
  271. }
  272. avio_read(ioc, buffer, length);
  273. buffer[length] = '\0';
  274. return length;
  275. }
  276. static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc,
  277. AVStream *vstream, int64_t max_pos)
  278. {
  279. FLVContext *flv = s->priv_data;
  280. unsigned int timeslen = 0, fileposlen = 0, i;
  281. char str_val[256];
  282. int64_t *times = NULL;
  283. int64_t *filepositions = NULL;
  284. int ret = AVERROR(ENOSYS);
  285. int64_t initial_pos = avio_tell(ioc);
  286. if (vstream->nb_index_entries>0) {
  287. av_log(s, AV_LOG_WARNING, "Skipping duplicate index\n");
  288. return 0;
  289. }
  290. if (s->flags & AVFMT_FLAG_IGNIDX)
  291. return 0;
  292. while (avio_tell(ioc) < max_pos - 2 &&
  293. amf_get_string(ioc, str_val, sizeof(str_val)) > 0) {
  294. int64_t **current_array;
  295. unsigned int arraylen;
  296. // Expect array object in context
  297. if (avio_r8(ioc) != AMF_DATA_TYPE_ARRAY)
  298. break;
  299. arraylen = avio_rb32(ioc);
  300. if (arraylen>>28)
  301. break;
  302. if (!strcmp(KEYFRAMES_TIMESTAMP_TAG , str_val) && !times) {
  303. current_array = &times;
  304. timeslen = arraylen;
  305. } else if (!strcmp(KEYFRAMES_BYTEOFFSET_TAG, str_val) &&
  306. !filepositions) {
  307. current_array = &filepositions;
  308. fileposlen = arraylen;
  309. } else
  310. // unexpected metatag inside keyframes, will not use such
  311. // metadata for indexing
  312. break;
  313. if (!(*current_array = av_mallocz(sizeof(**current_array) * arraylen))) {
  314. ret = AVERROR(ENOMEM);
  315. goto finish;
  316. }
  317. for (i = 0; i < arraylen && avio_tell(ioc) < max_pos - 1; i++) {
  318. if (avio_r8(ioc) != AMF_DATA_TYPE_NUMBER)
  319. goto invalid;
  320. current_array[0][i] = av_int2double(avio_rb64(ioc));
  321. }
  322. if (times && filepositions) {
  323. // All done, exiting at a position allowing amf_parse_object
  324. // to finish parsing the object
  325. ret = 0;
  326. break;
  327. }
  328. }
  329. if (timeslen == fileposlen && fileposlen>1 && max_pos <= filepositions[0]) {
  330. for (i = 0; i < fileposlen; i++) {
  331. av_add_index_entry(vstream, filepositions[i], times[i] * 1000,
  332. 0, 0, AVINDEX_KEYFRAME);
  333. if (i < 2) {
  334. flv->validate_index[i].pos = filepositions[i];
  335. flv->validate_index[i].dts = times[i] * 1000;
  336. flv->validate_count = i + 1;
  337. }
  338. }
  339. } else {
  340. invalid:
  341. av_log(s, AV_LOG_WARNING, "Invalid keyframes object, skipping.\n");
  342. }
  343. finish:
  344. av_freep(&times);
  345. av_freep(&filepositions);
  346. avio_seek(ioc, initial_pos, SEEK_SET);
  347. return ret;
  348. }
  349. static int amf_parse_object(AVFormatContext *s, AVStream *astream,
  350. AVStream *vstream, const char *key,
  351. int64_t max_pos, int depth)
  352. {
  353. AVCodecContext *acodec, *vcodec;
  354. FLVContext *flv = s->priv_data;
  355. AVIOContext *ioc;
  356. AMFDataType amf_type;
  357. char str_val[1024];
  358. double num_val;
  359. num_val = 0;
  360. ioc = s->pb;
  361. amf_type = avio_r8(ioc);
  362. switch (amf_type) {
  363. case AMF_DATA_TYPE_NUMBER:
  364. num_val = av_int2double(avio_rb64(ioc));
  365. break;
  366. case AMF_DATA_TYPE_BOOL:
  367. num_val = avio_r8(ioc);
  368. break;
  369. case AMF_DATA_TYPE_STRING:
  370. if (amf_get_string(ioc, str_val, sizeof(str_val)) < 0) {
  371. av_log(s, AV_LOG_ERROR, "AMF_DATA_TYPE_STRING parsing failed\n");
  372. return -1;
  373. }
  374. break;
  375. case AMF_DATA_TYPE_OBJECT:
  376. if ((vstream || astream) && key &&
  377. ioc->seekable &&
  378. !strcmp(KEYFRAMES_TAG, key) && depth == 1)
  379. if (parse_keyframes_index(s, ioc, vstream ? vstream : astream,
  380. max_pos) < 0)
  381. av_log(s, AV_LOG_ERROR, "Keyframe index parsing failed\n");
  382. while (avio_tell(ioc) < max_pos - 2 &&
  383. amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
  384. if (amf_parse_object(s, astream, vstream, str_val, max_pos,
  385. depth + 1) < 0)
  386. return -1; // if we couldn't skip, bomb out.
  387. if (avio_r8(ioc) != AMF_END_OF_OBJECT) {
  388. av_log(s, AV_LOG_ERROR, "Missing AMF_END_OF_OBJECT in AMF_DATA_TYPE_OBJECT\n");
  389. return -1;
  390. }
  391. break;
  392. case AMF_DATA_TYPE_NULL:
  393. case AMF_DATA_TYPE_UNDEFINED:
  394. case AMF_DATA_TYPE_UNSUPPORTED:
  395. break; // these take up no additional space
  396. case AMF_DATA_TYPE_MIXEDARRAY:
  397. {
  398. unsigned v;
  399. avio_skip(ioc, 4); // skip 32-bit max array index
  400. while (avio_tell(ioc) < max_pos - 2 &&
  401. amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
  402. // this is the only case in which we would want a nested
  403. // parse to not skip over the object
  404. if (amf_parse_object(s, astream, vstream, str_val, max_pos,
  405. depth + 1) < 0)
  406. return -1;
  407. v = avio_r8(ioc);
  408. if (v != AMF_END_OF_OBJECT) {
  409. av_log(s, AV_LOG_ERROR, "Missing AMF_END_OF_OBJECT in AMF_DATA_TYPE_MIXEDARRAY, found %d\n", v);
  410. return -1;
  411. }
  412. break;
  413. }
  414. case AMF_DATA_TYPE_ARRAY:
  415. {
  416. unsigned int arraylen, i;
  417. arraylen = avio_rb32(ioc);
  418. for (i = 0; i < arraylen && avio_tell(ioc) < max_pos - 1; i++)
  419. if (amf_parse_object(s, NULL, NULL, NULL, max_pos,
  420. depth + 1) < 0)
  421. return -1; // if we couldn't skip, bomb out.
  422. }
  423. break;
  424. case AMF_DATA_TYPE_DATE:
  425. avio_skip(ioc, 8 + 2); // timestamp (double) and UTC offset (int16)
  426. break;
  427. default: // unsupported type, we couldn't skip
  428. av_log(s, AV_LOG_ERROR, "unsupported amf type %d\n", amf_type);
  429. return -1;
  430. }
  431. if (key) {
  432. acodec = astream ? astream->codec : NULL;
  433. vcodec = vstream ? vstream->codec : NULL;
  434. // stream info doesn't live any deeper than the first object
  435. if (depth == 1) {
  436. if (amf_type == AMF_DATA_TYPE_NUMBER ||
  437. amf_type == AMF_DATA_TYPE_BOOL) {
  438. if (!strcmp(key, "duration"))
  439. s->duration = num_val * AV_TIME_BASE;
  440. else if (!strcmp(key, "videodatarate") && vcodec &&
  441. 0 <= (int)(num_val * 1024.0))
  442. vcodec->bit_rate = num_val * 1024.0;
  443. else if (!strcmp(key, "audiodatarate") && acodec &&
  444. 0 <= (int)(num_val * 1024.0))
  445. acodec->bit_rate = num_val * 1024.0;
  446. else if (!strcmp(key, "datastream")) {
  447. AVStream *st = create_stream(s, AVMEDIA_TYPE_SUBTITLE);
  448. if (!st)
  449. return AVERROR(ENOMEM);
  450. st->codec->codec_id = AV_CODEC_ID_TEXT;
  451. } else if (flv->trust_metadata) {
  452. if (!strcmp(key, "videocodecid") && vcodec) {
  453. flv_set_video_codec(s, vstream, num_val, 0);
  454. } else if (!strcmp(key, "audiocodecid") && acodec) {
  455. int id = ((int)num_val) << FLV_AUDIO_CODECID_OFFSET;
  456. flv_set_audio_codec(s, astream, acodec, id);
  457. } else if (!strcmp(key, "audiosamplerate") && acodec) {
  458. acodec->sample_rate = num_val;
  459. } else if (!strcmp(key, "audiosamplesize") && acodec) {
  460. acodec->bits_per_coded_sample = num_val;
  461. } else if (!strcmp(key, "stereo") && acodec) {
  462. acodec->channels = num_val + 1;
  463. acodec->channel_layout = acodec->channels == 2 ?
  464. AV_CH_LAYOUT_STEREO :
  465. AV_CH_LAYOUT_MONO;
  466. } else if (!strcmp(key, "width") && vcodec) {
  467. vcodec->width = num_val;
  468. } else if (!strcmp(key, "height") && vcodec) {
  469. vcodec->height = num_val;
  470. }
  471. }
  472. }
  473. }
  474. if (amf_type == AMF_DATA_TYPE_OBJECT && s->nb_streams == 1 &&
  475. ((!acodec && !strcmp(key, "audiocodecid")) ||
  476. (!vcodec && !strcmp(key, "videocodecid"))))
  477. s->ctx_flags &= ~AVFMTCTX_NOHEADER; //If there is either audio/video missing, codecid will be an empty object
  478. if (!strcmp(key, "duration") ||
  479. !strcmp(key, "filesize") ||
  480. !strcmp(key, "width") ||
  481. !strcmp(key, "height") ||
  482. !strcmp(key, "videodatarate") ||
  483. !strcmp(key, "framerate") ||
  484. !strcmp(key, "videocodecid") ||
  485. !strcmp(key, "audiodatarate") ||
  486. !strcmp(key, "audiosamplerate") ||
  487. !strcmp(key, "audiosamplesize") ||
  488. !strcmp(key, "stereo") ||
  489. !strcmp(key, "audiocodecid") ||
  490. !strcmp(key, "datastream"))
  491. return 0;
  492. s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
  493. if (amf_type == AMF_DATA_TYPE_BOOL) {
  494. av_strlcpy(str_val, num_val > 0 ? "true" : "false",
  495. sizeof(str_val));
  496. av_dict_set(&s->metadata, key, str_val, 0);
  497. } else if (amf_type == AMF_DATA_TYPE_NUMBER) {
  498. snprintf(str_val, sizeof(str_val), "%.f", num_val);
  499. av_dict_set(&s->metadata, key, str_val, 0);
  500. } else if (amf_type == AMF_DATA_TYPE_STRING)
  501. av_dict_set(&s->metadata, key, str_val, 0);
  502. }
  503. return 0;
  504. }
  505. #define TYPE_ONTEXTDATA 1
  506. #define TYPE_ONCAPTION 2
  507. #define TYPE_ONCAPTIONINFO 3
  508. #define TYPE_UNKNOWN 9
  509. static int flv_read_metabody(AVFormatContext *s, int64_t next_pos)
  510. {
  511. AMFDataType type;
  512. AVStream *stream, *astream, *vstream;
  513. AVStream av_unused *dstream;
  514. AVIOContext *ioc;
  515. int i;
  516. // only needs to hold the string "onMetaData".
  517. // Anything longer is something we don't want.
  518. char buffer[32];
  519. astream = NULL;
  520. vstream = NULL;
  521. dstream = NULL;
  522. ioc = s->pb;
  523. // first object needs to be "onMetaData" string
  524. type = avio_r8(ioc);
  525. if (type != AMF_DATA_TYPE_STRING ||
  526. amf_get_string(ioc, buffer, sizeof(buffer)) < 0)
  527. return TYPE_UNKNOWN;
  528. if (!strcmp(buffer, "onTextData"))
  529. return TYPE_ONTEXTDATA;
  530. if (!strcmp(buffer, "onCaption"))
  531. return TYPE_ONCAPTION;
  532. if (!strcmp(buffer, "onCaptionInfo"))
  533. return TYPE_ONCAPTIONINFO;
  534. if (strcmp(buffer, "onMetaData") && strcmp(buffer, "onCuePoint")) {
  535. av_log(s, AV_LOG_DEBUG, "Unknown type %s\n", buffer);
  536. return TYPE_UNKNOWN;
  537. }
  538. // find the streams now so that amf_parse_object doesn't need to do
  539. // the lookup every time it is called.
  540. for (i = 0; i < s->nb_streams; i++) {
  541. stream = s->streams[i];
  542. if (stream->codec->codec_type == AVMEDIA_TYPE_VIDEO)
  543. vstream = stream;
  544. else if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO)
  545. astream = stream;
  546. else if (stream->codec->codec_type == AVMEDIA_TYPE_SUBTITLE)
  547. dstream = stream;
  548. }
  549. // parse the second object (we want a mixed array)
  550. if (amf_parse_object(s, astream, vstream, buffer, next_pos, 0) < 0)
  551. return -1;
  552. return 0;
  553. }
  554. static int flv_read_header(AVFormatContext *s)
  555. {
  556. int offset, flags;
  557. avio_skip(s->pb, 4);
  558. flags = avio_r8(s->pb);
  559. s->ctx_flags |= AVFMTCTX_NOHEADER;
  560. if (flags & FLV_HEADER_FLAG_HASVIDEO)
  561. if (!create_stream(s, AVMEDIA_TYPE_VIDEO))
  562. return AVERROR(ENOMEM);
  563. if (flags & FLV_HEADER_FLAG_HASAUDIO)
  564. if (!create_stream(s, AVMEDIA_TYPE_AUDIO))
  565. return AVERROR(ENOMEM);
  566. // Flag doesn't indicate whether or not there is script-data present. Must
  567. // create that stream if it's encountered.
  568. offset = avio_rb32(s->pb);
  569. avio_seek(s->pb, offset, SEEK_SET);
  570. avio_skip(s->pb, 4);
  571. s->start_time = 0;
  572. return 0;
  573. }
  574. static int flv_read_close(AVFormatContext *s)
  575. {
  576. int i;
  577. FLVContext *flv = s->priv_data;
  578. for (i=0; i<FLV_STREAM_TYPE_NB; i++)
  579. av_freep(&flv->new_extradata[i]);
  580. return 0;
  581. }
  582. static int flv_get_extradata(AVFormatContext *s, AVStream *st, int size)
  583. {
  584. av_freep(&st->codec->extradata);
  585. if (ff_get_extradata(st->codec, s->pb, size) < 0)
  586. return AVERROR(ENOMEM);
  587. return 0;
  588. }
  589. static int flv_queue_extradata(FLVContext *flv, AVIOContext *pb, int stream,
  590. int size)
  591. {
  592. av_free(flv->new_extradata[stream]);
  593. flv->new_extradata[stream] = av_mallocz(size +
  594. AV_INPUT_BUFFER_PADDING_SIZE);
  595. if (!flv->new_extradata[stream])
  596. return AVERROR(ENOMEM);
  597. flv->new_extradata_size[stream] = size;
  598. avio_read(pb, flv->new_extradata[stream], size);
  599. return 0;
  600. }
  601. static void clear_index_entries(AVFormatContext *s, int64_t pos)
  602. {
  603. int i, j, out;
  604. av_log(s, AV_LOG_WARNING,
  605. "Found invalid index entries, clearing the index.\n");
  606. for (i = 0; i < s->nb_streams; i++) {
  607. AVStream *st = s->streams[i];
  608. /* Remove all index entries that point to >= pos */
  609. out = 0;
  610. for (j = 0; j < st->nb_index_entries; j++)
  611. if (st->index_entries[j].pos < pos)
  612. st->index_entries[out++] = st->index_entries[j];
  613. st->nb_index_entries = out;
  614. }
  615. }
  616. static int amf_skip_tag(AVIOContext *pb, AMFDataType type)
  617. {
  618. int nb = -1, ret, parse_name = 1;
  619. switch (type) {
  620. case AMF_DATA_TYPE_NUMBER:
  621. avio_skip(pb, 8);
  622. break;
  623. case AMF_DATA_TYPE_BOOL:
  624. avio_skip(pb, 1);
  625. break;
  626. case AMF_DATA_TYPE_STRING:
  627. avio_skip(pb, avio_rb16(pb));
  628. break;
  629. case AMF_DATA_TYPE_ARRAY:
  630. parse_name = 0;
  631. case AMF_DATA_TYPE_MIXEDARRAY:
  632. nb = avio_rb32(pb);
  633. case AMF_DATA_TYPE_OBJECT:
  634. while(!pb->eof_reached && (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY)) {
  635. if (parse_name) {
  636. int size = avio_rb16(pb);
  637. if (!size) {
  638. avio_skip(pb, 1);
  639. break;
  640. }
  641. avio_skip(pb, size);
  642. }
  643. if ((ret = amf_skip_tag(pb, avio_r8(pb))) < 0)
  644. return ret;
  645. }
  646. break;
  647. case AMF_DATA_TYPE_NULL:
  648. case AMF_DATA_TYPE_OBJECT_END:
  649. break;
  650. default:
  651. return AVERROR_INVALIDDATA;
  652. }
  653. return 0;
  654. }
  655. static int flv_data_packet(AVFormatContext *s, AVPacket *pkt,
  656. int64_t dts, int64_t next)
  657. {
  658. AVIOContext *pb = s->pb;
  659. AVStream *st = NULL;
  660. char buf[20];
  661. int ret = AVERROR_INVALIDDATA;
  662. int i, length = -1;
  663. int array = 0;
  664. switch (avio_r8(pb)) {
  665. case AMF_DATA_TYPE_ARRAY:
  666. array = 1;
  667. case AMF_DATA_TYPE_MIXEDARRAY:
  668. avio_seek(pb, 4, SEEK_CUR);
  669. case AMF_DATA_TYPE_OBJECT:
  670. break;
  671. default:
  672. goto skip;
  673. }
  674. while (array || (ret = amf_get_string(pb, buf, sizeof(buf))) > 0) {
  675. AMFDataType type = avio_r8(pb);
  676. if (type == AMF_DATA_TYPE_STRING && (array || !strcmp(buf, "text"))) {
  677. length = avio_rb16(pb);
  678. ret = av_get_packet(pb, pkt, length);
  679. if (ret < 0)
  680. goto skip;
  681. else
  682. break;
  683. } else {
  684. if ((ret = amf_skip_tag(pb, type)) < 0)
  685. goto skip;
  686. }
  687. }
  688. if (length < 0) {
  689. ret = AVERROR_INVALIDDATA;
  690. goto skip;
  691. }
  692. for (i = 0; i < s->nb_streams; i++) {
  693. st = s->streams[i];
  694. if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE)
  695. break;
  696. }
  697. if (i == s->nb_streams) {
  698. st = create_stream(s, AVMEDIA_TYPE_SUBTITLE);
  699. if (!st)
  700. return AVERROR(ENOMEM);
  701. st->codec->codec_id = AV_CODEC_ID_TEXT;
  702. }
  703. pkt->dts = dts;
  704. pkt->pts = dts;
  705. pkt->size = ret;
  706. pkt->stream_index = st->index;
  707. pkt->flags |= AV_PKT_FLAG_KEY;
  708. skip:
  709. avio_seek(s->pb, next + 4, SEEK_SET);
  710. return ret;
  711. }
  712. static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
  713. {
  714. FLVContext *flv = s->priv_data;
  715. int ret, i, size, flags;
  716. enum FlvTagType type;
  717. int stream_type=-1;
  718. int64_t next, pos, meta_pos;
  719. int64_t dts, pts = AV_NOPTS_VALUE;
  720. int av_uninit(channels);
  721. int av_uninit(sample_rate);
  722. AVStream *st = NULL;
  723. int last = -1;
  724. /* pkt size is repeated at end. skip it */
  725. for (;; last = avio_rb32(s->pb)) {
  726. pos = avio_tell(s->pb);
  727. type = (avio_r8(s->pb) & 0x1F);
  728. size = avio_rb24(s->pb);
  729. dts = avio_rb24(s->pb);
  730. dts |= avio_r8(s->pb) << 24;
  731. av_log(s, AV_LOG_TRACE, "type:%d, size:%d, last:%d, dts:%"PRId64" pos:%"PRId64"\n", type, size, last, dts, avio_tell(s->pb));
  732. if (avio_feof(s->pb))
  733. return AVERROR_EOF;
  734. avio_skip(s->pb, 3); /* stream id, always 0 */
  735. flags = 0;
  736. if (flv->validate_next < flv->validate_count) {
  737. int64_t validate_pos = flv->validate_index[flv->validate_next].pos;
  738. if (pos == validate_pos) {
  739. if (FFABS(dts - flv->validate_index[flv->validate_next].dts) <=
  740. VALIDATE_INDEX_TS_THRESH) {
  741. flv->validate_next++;
  742. } else {
  743. clear_index_entries(s, validate_pos);
  744. flv->validate_count = 0;
  745. }
  746. } else if (pos > validate_pos) {
  747. clear_index_entries(s, validate_pos);
  748. flv->validate_count = 0;
  749. }
  750. }
  751. if (size == 0) {
  752. ret = AVERROR(EAGAIN);
  753. goto leave;
  754. }
  755. next = size + avio_tell(s->pb);
  756. if (type == FLV_TAG_TYPE_AUDIO) {
  757. stream_type = FLV_STREAM_TYPE_AUDIO;
  758. flags = avio_r8(s->pb);
  759. size--;
  760. } else if (type == FLV_TAG_TYPE_VIDEO) {
  761. stream_type = FLV_STREAM_TYPE_VIDEO;
  762. flags = avio_r8(s->pb);
  763. size--;
  764. if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_VIDEO_INFO_CMD)
  765. goto skip;
  766. } else if (type == FLV_TAG_TYPE_META) {
  767. stream_type=FLV_STREAM_TYPE_DATA;
  768. if (size > 13 + 1 + 4) { // Header-type metadata stuff
  769. int type;
  770. meta_pos = avio_tell(s->pb);
  771. type = flv_read_metabody(s, next);
  772. if (type == 0 && dts == 0 || type < 0 || type == TYPE_UNKNOWN) {
  773. if (type < 0 && flv->validate_count &&
  774. flv->validate_index[0].pos > next &&
  775. flv->validate_index[0].pos - 4 < next
  776. ) {
  777. av_log(s, AV_LOG_WARNING, "Adjusting next position due to index mismatch\n");
  778. next = flv->validate_index[0].pos - 4;
  779. }
  780. goto skip;
  781. } else if (type == TYPE_ONTEXTDATA) {
  782. avpriv_request_sample(s, "OnTextData packet");
  783. return flv_data_packet(s, pkt, dts, next);
  784. } else if (type == TYPE_ONCAPTION) {
  785. return flv_data_packet(s, pkt, dts, next);
  786. }
  787. avio_seek(s->pb, meta_pos, SEEK_SET);
  788. }
  789. } else {
  790. av_log(s, AV_LOG_DEBUG,
  791. "Skipping flv packet: type %d, size %d, flags %d.\n",
  792. type, size, flags);
  793. skip:
  794. avio_seek(s->pb, next, SEEK_SET);
  795. ret = AVERROR(EAGAIN);
  796. goto leave;
  797. }
  798. /* skip empty data packets */
  799. if (!size) {
  800. ret = AVERROR(EAGAIN);
  801. goto leave;
  802. }
  803. /* now find stream */
  804. for (i = 0; i < s->nb_streams; i++) {
  805. st = s->streams[i];
  806. if (stream_type == FLV_STREAM_TYPE_AUDIO) {
  807. if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO &&
  808. (s->audio_codec_id || flv_same_audio_codec(st->codec, flags)))
  809. break;
  810. } else if (stream_type == FLV_STREAM_TYPE_VIDEO) {
  811. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
  812. (s->video_codec_id || flv_same_video_codec(st->codec, flags)))
  813. break;
  814. } else if (stream_type == FLV_STREAM_TYPE_DATA) {
  815. if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE)
  816. break;
  817. }
  818. }
  819. if (i == s->nb_streams) {
  820. static const enum AVMediaType stream_types[] = {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_SUBTITLE};
  821. av_log(s, AV_LOG_WARNING, "Stream discovered after head already parsed\n");
  822. st = create_stream(s, stream_types[stream_type]);
  823. if (!st)
  824. return AVERROR(ENOMEM);
  825. }
  826. av_log(s, AV_LOG_TRACE, "%d %X %d \n", stream_type, flags, st->discard);
  827. if (s->pb->seekable &&
  828. ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY ||
  829. stream_type == FLV_STREAM_TYPE_AUDIO))
  830. av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME);
  831. if ( (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || (stream_type == FLV_STREAM_TYPE_AUDIO)))
  832. ||(st->discard >= AVDISCARD_BIDIR && ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && (stream_type == FLV_STREAM_TYPE_VIDEO)))
  833. || st->discard >= AVDISCARD_ALL
  834. ) {
  835. avio_seek(s->pb, next, SEEK_SET);
  836. ret = AVERROR(EAGAIN);
  837. goto leave;
  838. }
  839. break;
  840. }
  841. // if not streamed and no duration from metadata then seek to end to find
  842. // the duration from the timestamps
  843. if (s->pb->seekable && (!s->duration || s->duration == AV_NOPTS_VALUE) && !flv->searched_for_end) {
  844. int size;
  845. const int64_t pos = avio_tell(s->pb);
  846. // Read the last 4 bytes of the file, this should be the size of the
  847. // previous FLV tag. Use the timestamp of its payload as duration.
  848. int64_t fsize = avio_size(s->pb);
  849. retry_duration:
  850. avio_seek(s->pb, fsize - 4, SEEK_SET);
  851. size = avio_rb32(s->pb);
  852. // Seek to the start of the last FLV tag at position (fsize - 4 - size)
  853. // but skip the byte indicating the type.
  854. avio_seek(s->pb, fsize - 3 - size, SEEK_SET);
  855. if (size == avio_rb24(s->pb) + 11) {
  856. uint32_t ts = avio_rb24(s->pb);
  857. ts |= avio_r8(s->pb) << 24;
  858. if (ts)
  859. s->duration = ts * (int64_t)AV_TIME_BASE / 1000;
  860. else if (fsize >= 8 && fsize - 8 >= size) {
  861. fsize -= size+4;
  862. goto retry_duration;
  863. }
  864. }
  865. avio_seek(s->pb, pos, SEEK_SET);
  866. flv->searched_for_end = 1;
  867. }
  868. if (stream_type == FLV_STREAM_TYPE_AUDIO) {
  869. int bits_per_coded_sample;
  870. channels = (flags & FLV_AUDIO_CHANNEL_MASK) == FLV_STEREO ? 2 : 1;
  871. sample_rate = 44100 << ((flags & FLV_AUDIO_SAMPLERATE_MASK) >>
  872. FLV_AUDIO_SAMPLERATE_OFFSET) >> 3;
  873. bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
  874. if (!st->codec->channels || !st->codec->sample_rate ||
  875. !st->codec->bits_per_coded_sample) {
  876. st->codec->channels = channels;
  877. st->codec->channel_layout = channels == 1
  878. ? AV_CH_LAYOUT_MONO
  879. : AV_CH_LAYOUT_STEREO;
  880. st->codec->sample_rate = sample_rate;
  881. st->codec->bits_per_coded_sample = bits_per_coded_sample;
  882. }
  883. if (!st->codec->codec_id) {
  884. flv_set_audio_codec(s, st, st->codec,
  885. flags & FLV_AUDIO_CODECID_MASK);
  886. flv->last_sample_rate =
  887. sample_rate = st->codec->sample_rate;
  888. flv->last_channels =
  889. channels = st->codec->channels;
  890. } else {
  891. AVCodecContext ctx = {0};
  892. ctx.sample_rate = sample_rate;
  893. ctx.bits_per_coded_sample = bits_per_coded_sample;
  894. flv_set_audio_codec(s, st, &ctx, flags & FLV_AUDIO_CODECID_MASK);
  895. sample_rate = ctx.sample_rate;
  896. }
  897. } else if (stream_type == FLV_STREAM_TYPE_VIDEO) {
  898. size -= flv_set_video_codec(s, st, flags & FLV_VIDEO_CODECID_MASK, 1);
  899. } else if (stream_type == FLV_STREAM_TYPE_DATA) {
  900. st->codec->codec_id = AV_CODEC_ID_TEXT;
  901. }
  902. if (st->codec->codec_id == AV_CODEC_ID_AAC ||
  903. st->codec->codec_id == AV_CODEC_ID_H264 ||
  904. st->codec->codec_id == AV_CODEC_ID_MPEG4) {
  905. int type = avio_r8(s->pb);
  906. size--;
  907. if (st->codec->codec_id == AV_CODEC_ID_H264 || st->codec->codec_id == AV_CODEC_ID_MPEG4) {
  908. // sign extension
  909. int32_t cts = (avio_rb24(s->pb) + 0xff800000) ^ 0xff800000;
  910. pts = dts + cts;
  911. if (cts < 0) { // dts might be wrong
  912. if (!flv->wrong_dts)
  913. av_log(s, AV_LOG_WARNING,
  914. "Negative cts, previous timestamps might be wrong.\n");
  915. flv->wrong_dts = 1;
  916. } else if (FFABS(dts - pts) > 1000*60*15) {
  917. av_log(s, AV_LOG_WARNING,
  918. "invalid timestamps %"PRId64" %"PRId64"\n", dts, pts);
  919. dts = pts = AV_NOPTS_VALUE;
  920. }
  921. }
  922. if (type == 0 && (!st->codec->extradata || st->codec->codec_id == AV_CODEC_ID_AAC ||
  923. st->codec->codec_id == AV_CODEC_ID_H264)) {
  924. AVDictionaryEntry *t;
  925. if (st->codec->extradata) {
  926. if ((ret = flv_queue_extradata(flv, s->pb, stream_type, size)) < 0)
  927. return ret;
  928. ret = AVERROR(EAGAIN);
  929. goto leave;
  930. }
  931. if ((ret = flv_get_extradata(s, st, size)) < 0)
  932. return ret;
  933. /* Workaround for buggy Omnia A/XE encoder */
  934. t = av_dict_get(s->metadata, "Encoder", NULL, 0);
  935. if (st->codec->codec_id == AV_CODEC_ID_AAC && t && !strcmp(t->value, "Omnia A/XE"))
  936. st->codec->extradata_size = 2;
  937. if (st->codec->codec_id == AV_CODEC_ID_AAC && 0) {
  938. MPEG4AudioConfig cfg;
  939. if (avpriv_mpeg4audio_get_config(&cfg, st->codec->extradata,
  940. st->codec->extradata_size * 8, 1) >= 0) {
  941. st->codec->channels = cfg.channels;
  942. st->codec->channel_layout = 0;
  943. if (cfg.ext_sample_rate)
  944. st->codec->sample_rate = cfg.ext_sample_rate;
  945. else
  946. st->codec->sample_rate = cfg.sample_rate;
  947. av_log(s, AV_LOG_TRACE, "mp4a config channels %d sample rate %d\n",
  948. st->codec->channels, st->codec->sample_rate);
  949. }
  950. }
  951. ret = AVERROR(EAGAIN);
  952. goto leave;
  953. }
  954. }
  955. /* skip empty data packets */
  956. if (!size) {
  957. ret = AVERROR(EAGAIN);
  958. goto leave;
  959. }
  960. ret = av_get_packet(s->pb, pkt, size);
  961. if (ret < 0)
  962. return ret;
  963. pkt->dts = dts;
  964. pkt->pts = pts == AV_NOPTS_VALUE ? dts : pts;
  965. pkt->stream_index = st->index;
  966. if (flv->new_extradata[stream_type]) {
  967. uint8_t *side = av_packet_new_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA,
  968. flv->new_extradata_size[stream_type]);
  969. if (side) {
  970. memcpy(side, flv->new_extradata[stream_type],
  971. flv->new_extradata_size[stream_type]);
  972. av_freep(&flv->new_extradata[stream_type]);
  973. flv->new_extradata_size[stream_type] = 0;
  974. }
  975. }
  976. if (stream_type == FLV_STREAM_TYPE_AUDIO &&
  977. (sample_rate != flv->last_sample_rate ||
  978. channels != flv->last_channels)) {
  979. flv->last_sample_rate = sample_rate;
  980. flv->last_channels = channels;
  981. ff_add_param_change(pkt, channels, 0, sample_rate, 0, 0);
  982. }
  983. if ( stream_type == FLV_STREAM_TYPE_AUDIO ||
  984. ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY) ||
  985. stream_type == FLV_STREAM_TYPE_DATA)
  986. pkt->flags |= AV_PKT_FLAG_KEY;
  987. leave:
  988. avio_skip(s->pb, 4);
  989. return ret;
  990. }
  991. static int flv_read_seek(AVFormatContext *s, int stream_index,
  992. int64_t ts, int flags)
  993. {
  994. FLVContext *flv = s->priv_data;
  995. flv->validate_count = 0;
  996. return avio_seek_time(s->pb, stream_index, ts, flags);
  997. }
  998. #define OFFSET(x) offsetof(FLVContext, x)
  999. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  1000. static const AVOption options[] = {
  1001. { "flv_metadata", "Allocate streams according to the onMetaData array", OFFSET(trust_metadata), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VD },
  1002. { NULL }
  1003. };
  1004. static const AVClass flv_class = {
  1005. .class_name = "flvdec",
  1006. .item_name = av_default_item_name,
  1007. .option = options,
  1008. .version = LIBAVUTIL_VERSION_INT,
  1009. };
  1010. AVInputFormat ff_flv_demuxer = {
  1011. .name = "flv",
  1012. .long_name = NULL_IF_CONFIG_SMALL("FLV (Flash Video)"),
  1013. .priv_data_size = sizeof(FLVContext),
  1014. .read_probe = flv_probe,
  1015. .read_header = flv_read_header,
  1016. .read_packet = flv_read_packet,
  1017. .read_seek = flv_read_seek,
  1018. .read_close = flv_read_close,
  1019. .extensions = "flv",
  1020. .priv_class = &flv_class,
  1021. };
  1022. static const AVClass live_flv_class = {
  1023. .class_name = "live_flvdec",
  1024. .item_name = av_default_item_name,
  1025. .option = options,
  1026. .version = LIBAVUTIL_VERSION_INT,
  1027. };
  1028. AVInputFormat ff_live_flv_demuxer = {
  1029. .name = "live_flv",
  1030. .long_name = NULL_IF_CONFIG_SMALL("live RTMP FLV (Flash Video)"),
  1031. .priv_data_size = sizeof(FLVContext),
  1032. .read_probe = live_flv_probe,
  1033. .read_header = flv_read_header,
  1034. .read_packet = flv_read_packet,
  1035. .read_seek = flv_read_seek,
  1036. .read_close = flv_read_close,
  1037. .extensions = "flv",
  1038. .priv_class = &live_flv_class,
  1039. .flags = AVFMT_TS_DISCONT
  1040. };