You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1213 lines
42KB

  1. /*
  2. * FLV demuxer
  3. * Copyright (c) 2003 The FFmpeg Project
  4. *
  5. * This demuxer will generate a 1 byte extradata for VP6F content.
  6. * It is composed of:
  7. * - upper 4 bits: difference between encoded width and visible width
  8. * - lower 4 bits: difference between encoded height and visible height
  9. *
  10. * This file is part of FFmpeg.
  11. *
  12. * FFmpeg is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU Lesser General Public
  14. * License as published by the Free Software Foundation; either
  15. * version 2.1 of the License, or (at your option) any later version.
  16. *
  17. * FFmpeg is distributed in the hope that it will be useful,
  18. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  20. * Lesser General Public License for more details.
  21. *
  22. * You should have received a copy of the GNU Lesser General Public
  23. * License along with FFmpeg; if not, write to the Free Software
  24. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25. */
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/channel_layout.h"
  28. #include "libavutil/dict.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/intfloat.h"
  31. #include "libavutil/mathematics.h"
  32. #include "libavcodec/bytestream.h"
  33. #include "libavcodec/mpeg4audio.h"
  34. #include "avformat.h"
  35. #include "internal.h"
  36. #include "avio_internal.h"
  37. #include "flv.h"
  38. #define VALIDATE_INDEX_TS_THRESH 2500
  39. #define RESYNC_BUFFER_SIZE (1<<20)
  40. typedef struct FLVContext {
  41. const AVClass *class; ///< Class for private options.
  42. int trust_metadata; ///< configure streams according onMetaData
  43. int wrong_dts; ///< wrong dts due to negative cts
  44. uint8_t *new_extradata[FLV_STREAM_TYPE_NB];
  45. int new_extradata_size[FLV_STREAM_TYPE_NB];
  46. int last_sample_rate;
  47. int last_channels;
  48. struct {
  49. int64_t dts;
  50. int64_t pos;
  51. } validate_index[2];
  52. int validate_next;
  53. int validate_count;
  54. int searched_for_end;
  55. uint8_t resync_buffer[2*RESYNC_BUFFER_SIZE];
  56. int broken_sizes;
  57. int sum_flv_tag_size;
  58. } FLVContext;
  59. static int probe(AVProbeData *p, int live)
  60. {
  61. const uint8_t *d = p->buf;
  62. unsigned offset = AV_RB32(d + 5);
  63. if (d[0] == 'F' &&
  64. d[1] == 'L' &&
  65. d[2] == 'V' &&
  66. d[3] < 5 && d[5] == 0 &&
  67. offset + 100 < p->buf_size &&
  68. offset > 8) {
  69. int is_live = !memcmp(d + offset + 40, "NGINX RTMP", 10);
  70. if (live == is_live)
  71. return AVPROBE_SCORE_MAX;
  72. }
  73. return 0;
  74. }
  75. static int flv_probe(AVProbeData *p)
  76. {
  77. return probe(p, 0);
  78. }
  79. static int live_flv_probe(AVProbeData *p)
  80. {
  81. return probe(p, 1);
  82. }
  83. static AVStream *create_stream(AVFormatContext *s, int codec_type)
  84. {
  85. AVStream *st = avformat_new_stream(s, NULL);
  86. if (!st)
  87. return NULL;
  88. st->codecpar->codec_type = codec_type;
  89. if (s->nb_streams>=3 ||( s->nb_streams==2
  90. && s->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE
  91. && s->streams[1]->codecpar->codec_type != AVMEDIA_TYPE_SUBTITLE))
  92. s->ctx_flags &= ~AVFMTCTX_NOHEADER;
  93. avpriv_set_pts_info(st, 32, 1, 1000); /* 32 bit pts in ms */
  94. return st;
  95. }
  96. static int flv_same_audio_codec(AVCodecParameters *apar, int flags)
  97. {
  98. int bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
  99. int flv_codecid = flags & FLV_AUDIO_CODECID_MASK;
  100. int codec_id;
  101. if (!apar->codec_id && !apar->codec_tag)
  102. return 1;
  103. if (apar->bits_per_coded_sample != bits_per_coded_sample)
  104. return 0;
  105. switch (flv_codecid) {
  106. // no distinction between S16 and S8 PCM codec flags
  107. case FLV_CODECID_PCM:
  108. codec_id = bits_per_coded_sample == 8
  109. ? AV_CODEC_ID_PCM_U8
  110. #if HAVE_BIGENDIAN
  111. : AV_CODEC_ID_PCM_S16BE;
  112. #else
  113. : AV_CODEC_ID_PCM_S16LE;
  114. #endif
  115. return codec_id == apar->codec_id;
  116. case FLV_CODECID_PCM_LE:
  117. codec_id = bits_per_coded_sample == 8
  118. ? AV_CODEC_ID_PCM_U8
  119. : AV_CODEC_ID_PCM_S16LE;
  120. return codec_id == apar->codec_id;
  121. case FLV_CODECID_AAC:
  122. return apar->codec_id == AV_CODEC_ID_AAC;
  123. case FLV_CODECID_ADPCM:
  124. return apar->codec_id == AV_CODEC_ID_ADPCM_SWF;
  125. case FLV_CODECID_SPEEX:
  126. return apar->codec_id == AV_CODEC_ID_SPEEX;
  127. case FLV_CODECID_MP3:
  128. return apar->codec_id == AV_CODEC_ID_MP3;
  129. case FLV_CODECID_NELLYMOSER_8KHZ_MONO:
  130. case FLV_CODECID_NELLYMOSER_16KHZ_MONO:
  131. case FLV_CODECID_NELLYMOSER:
  132. return apar->codec_id == AV_CODEC_ID_NELLYMOSER;
  133. case FLV_CODECID_PCM_MULAW:
  134. return apar->sample_rate == 8000 &&
  135. apar->codec_id == AV_CODEC_ID_PCM_MULAW;
  136. case FLV_CODECID_PCM_ALAW:
  137. return apar->sample_rate == 8000 &&
  138. apar->codec_id == AV_CODEC_ID_PCM_ALAW;
  139. default:
  140. return apar->codec_tag == (flv_codecid >> FLV_AUDIO_CODECID_OFFSET);
  141. }
  142. }
  143. static void flv_set_audio_codec(AVFormatContext *s, AVStream *astream,
  144. AVCodecParameters *apar, int flv_codecid)
  145. {
  146. switch (flv_codecid) {
  147. // no distinction between S16 and S8 PCM codec flags
  148. case FLV_CODECID_PCM:
  149. apar->codec_id = apar->bits_per_coded_sample == 8
  150. ? AV_CODEC_ID_PCM_U8
  151. #if HAVE_BIGENDIAN
  152. : AV_CODEC_ID_PCM_S16BE;
  153. #else
  154. : AV_CODEC_ID_PCM_S16LE;
  155. #endif
  156. break;
  157. case FLV_CODECID_PCM_LE:
  158. apar->codec_id = apar->bits_per_coded_sample == 8
  159. ? AV_CODEC_ID_PCM_U8
  160. : AV_CODEC_ID_PCM_S16LE;
  161. break;
  162. case FLV_CODECID_AAC:
  163. apar->codec_id = AV_CODEC_ID_AAC;
  164. break;
  165. case FLV_CODECID_ADPCM:
  166. apar->codec_id = AV_CODEC_ID_ADPCM_SWF;
  167. break;
  168. case FLV_CODECID_SPEEX:
  169. apar->codec_id = AV_CODEC_ID_SPEEX;
  170. apar->sample_rate = 16000;
  171. break;
  172. case FLV_CODECID_MP3:
  173. apar->codec_id = AV_CODEC_ID_MP3;
  174. astream->need_parsing = AVSTREAM_PARSE_FULL;
  175. break;
  176. case FLV_CODECID_NELLYMOSER_8KHZ_MONO:
  177. // in case metadata does not otherwise declare samplerate
  178. apar->sample_rate = 8000;
  179. apar->codec_id = AV_CODEC_ID_NELLYMOSER;
  180. break;
  181. case FLV_CODECID_NELLYMOSER_16KHZ_MONO:
  182. apar->sample_rate = 16000;
  183. apar->codec_id = AV_CODEC_ID_NELLYMOSER;
  184. break;
  185. case FLV_CODECID_NELLYMOSER:
  186. apar->codec_id = AV_CODEC_ID_NELLYMOSER;
  187. break;
  188. case FLV_CODECID_PCM_MULAW:
  189. apar->sample_rate = 8000;
  190. apar->codec_id = AV_CODEC_ID_PCM_MULAW;
  191. break;
  192. case FLV_CODECID_PCM_ALAW:
  193. apar->sample_rate = 8000;
  194. apar->codec_id = AV_CODEC_ID_PCM_ALAW;
  195. break;
  196. default:
  197. avpriv_request_sample(s, "Audio codec (%x)",
  198. flv_codecid >> FLV_AUDIO_CODECID_OFFSET);
  199. apar->codec_tag = flv_codecid >> FLV_AUDIO_CODECID_OFFSET;
  200. }
  201. }
  202. static int flv_same_video_codec(AVCodecParameters *vpar, int flags)
  203. {
  204. int flv_codecid = flags & FLV_VIDEO_CODECID_MASK;
  205. if (!vpar->codec_id && !vpar->codec_tag)
  206. return 1;
  207. switch (flv_codecid) {
  208. case FLV_CODECID_H263:
  209. return vpar->codec_id == AV_CODEC_ID_FLV1;
  210. case FLV_CODECID_SCREEN:
  211. return vpar->codec_id == AV_CODEC_ID_FLASHSV;
  212. case FLV_CODECID_SCREEN2:
  213. return vpar->codec_id == AV_CODEC_ID_FLASHSV2;
  214. case FLV_CODECID_VP6:
  215. return vpar->codec_id == AV_CODEC_ID_VP6F;
  216. case FLV_CODECID_VP6A:
  217. return vpar->codec_id == AV_CODEC_ID_VP6A;
  218. case FLV_CODECID_H264:
  219. return vpar->codec_id == AV_CODEC_ID_H264;
  220. default:
  221. return vpar->codec_tag == flv_codecid;
  222. }
  223. }
  224. static int flv_set_video_codec(AVFormatContext *s, AVStream *vstream,
  225. int flv_codecid, int read)
  226. {
  227. AVCodecParameters *par = vstream->codecpar;
  228. switch (flv_codecid) {
  229. case FLV_CODECID_H263:
  230. par->codec_id = AV_CODEC_ID_FLV1;
  231. break;
  232. case FLV_CODECID_REALH263:
  233. par->codec_id = AV_CODEC_ID_H263;
  234. break; // Really mean it this time
  235. case FLV_CODECID_SCREEN:
  236. par->codec_id = AV_CODEC_ID_FLASHSV;
  237. break;
  238. case FLV_CODECID_SCREEN2:
  239. par->codec_id = AV_CODEC_ID_FLASHSV2;
  240. break;
  241. case FLV_CODECID_VP6:
  242. par->codec_id = AV_CODEC_ID_VP6F;
  243. case FLV_CODECID_VP6A:
  244. if (flv_codecid == FLV_CODECID_VP6A)
  245. par->codec_id = AV_CODEC_ID_VP6A;
  246. if (read) {
  247. if (par->extradata_size != 1) {
  248. ff_alloc_extradata(par, 1);
  249. }
  250. if (par->extradata)
  251. par->extradata[0] = avio_r8(s->pb);
  252. else
  253. avio_skip(s->pb, 1);
  254. }
  255. return 1; // 1 byte body size adjustment for flv_read_packet()
  256. case FLV_CODECID_H264:
  257. par->codec_id = AV_CODEC_ID_H264;
  258. vstream->need_parsing = AVSTREAM_PARSE_HEADERS;
  259. return 3; // not 4, reading packet type will consume one byte
  260. case FLV_CODECID_MPEG4:
  261. par->codec_id = AV_CODEC_ID_MPEG4;
  262. return 3;
  263. default:
  264. avpriv_request_sample(s, "Video codec (%x)", flv_codecid);
  265. par->codec_tag = flv_codecid;
  266. }
  267. return 0;
  268. }
  269. static int amf_get_string(AVIOContext *ioc, char *buffer, int buffsize)
  270. {
  271. int length = avio_rb16(ioc);
  272. if (length >= buffsize) {
  273. avio_skip(ioc, length);
  274. return -1;
  275. }
  276. avio_read(ioc, buffer, length);
  277. buffer[length] = '\0';
  278. return length;
  279. }
  280. static int parse_keyframes_index(AVFormatContext *s, AVIOContext *ioc,
  281. AVStream *vstream, int64_t max_pos)
  282. {
  283. FLVContext *flv = s->priv_data;
  284. unsigned int timeslen = 0, fileposlen = 0, i;
  285. char str_val[256];
  286. int64_t *times = NULL;
  287. int64_t *filepositions = NULL;
  288. int ret = AVERROR(ENOSYS);
  289. int64_t initial_pos = avio_tell(ioc);
  290. if (vstream->nb_index_entries>0) {
  291. av_log(s, AV_LOG_WARNING, "Skipping duplicate index\n");
  292. return 0;
  293. }
  294. if (s->flags & AVFMT_FLAG_IGNIDX)
  295. return 0;
  296. while (avio_tell(ioc) < max_pos - 2 &&
  297. amf_get_string(ioc, str_val, sizeof(str_val)) > 0) {
  298. int64_t **current_array;
  299. unsigned int arraylen;
  300. // Expect array object in context
  301. if (avio_r8(ioc) != AMF_DATA_TYPE_ARRAY)
  302. break;
  303. arraylen = avio_rb32(ioc);
  304. if (arraylen>>28)
  305. break;
  306. if (!strcmp(KEYFRAMES_TIMESTAMP_TAG , str_val) && !times) {
  307. current_array = &times;
  308. timeslen = arraylen;
  309. } else if (!strcmp(KEYFRAMES_BYTEOFFSET_TAG, str_val) &&
  310. !filepositions) {
  311. current_array = &filepositions;
  312. fileposlen = arraylen;
  313. } else
  314. // unexpected metatag inside keyframes, will not use such
  315. // metadata for indexing
  316. break;
  317. if (!(*current_array = av_mallocz(sizeof(**current_array) * arraylen))) {
  318. ret = AVERROR(ENOMEM);
  319. goto finish;
  320. }
  321. for (i = 0; i < arraylen && avio_tell(ioc) < max_pos - 1; i++) {
  322. if (avio_r8(ioc) != AMF_DATA_TYPE_NUMBER)
  323. goto invalid;
  324. current_array[0][i] = av_int2double(avio_rb64(ioc));
  325. }
  326. if (times && filepositions) {
  327. // All done, exiting at a position allowing amf_parse_object
  328. // to finish parsing the object
  329. ret = 0;
  330. break;
  331. }
  332. }
  333. if (timeslen == fileposlen && fileposlen>1 && max_pos <= filepositions[0]) {
  334. for (i = 0; i < fileposlen; i++) {
  335. av_add_index_entry(vstream, filepositions[i], times[i] * 1000,
  336. 0, 0, AVINDEX_KEYFRAME);
  337. if (i < 2) {
  338. flv->validate_index[i].pos = filepositions[i];
  339. flv->validate_index[i].dts = times[i] * 1000;
  340. flv->validate_count = i + 1;
  341. }
  342. }
  343. } else {
  344. invalid:
  345. av_log(s, AV_LOG_WARNING, "Invalid keyframes object, skipping.\n");
  346. }
  347. finish:
  348. av_freep(&times);
  349. av_freep(&filepositions);
  350. avio_seek(ioc, initial_pos, SEEK_SET);
  351. return ret;
  352. }
  353. static int amf_parse_object(AVFormatContext *s, AVStream *astream,
  354. AVStream *vstream, const char *key,
  355. int64_t max_pos, int depth)
  356. {
  357. AVCodecParameters *apar, *vpar;
  358. FLVContext *flv = s->priv_data;
  359. AVIOContext *ioc;
  360. AMFDataType amf_type;
  361. char str_val[1024];
  362. double num_val;
  363. num_val = 0;
  364. ioc = s->pb;
  365. amf_type = avio_r8(ioc);
  366. switch (amf_type) {
  367. case AMF_DATA_TYPE_NUMBER:
  368. num_val = av_int2double(avio_rb64(ioc));
  369. break;
  370. case AMF_DATA_TYPE_BOOL:
  371. num_val = avio_r8(ioc);
  372. break;
  373. case AMF_DATA_TYPE_STRING:
  374. if (amf_get_string(ioc, str_val, sizeof(str_val)) < 0) {
  375. av_log(s, AV_LOG_ERROR, "AMF_DATA_TYPE_STRING parsing failed\n");
  376. return -1;
  377. }
  378. break;
  379. case AMF_DATA_TYPE_OBJECT:
  380. if ((vstream || astream) && key &&
  381. ioc->seekable &&
  382. !strcmp(KEYFRAMES_TAG, key) && depth == 1)
  383. if (parse_keyframes_index(s, ioc, vstream ? vstream : astream,
  384. max_pos) < 0)
  385. av_log(s, AV_LOG_ERROR, "Keyframe index parsing failed\n");
  386. while (avio_tell(ioc) < max_pos - 2 &&
  387. amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
  388. if (amf_parse_object(s, astream, vstream, str_val, max_pos,
  389. depth + 1) < 0)
  390. return -1; // if we couldn't skip, bomb out.
  391. if (avio_r8(ioc) != AMF_END_OF_OBJECT) {
  392. av_log(s, AV_LOG_ERROR, "Missing AMF_END_OF_OBJECT in AMF_DATA_TYPE_OBJECT\n");
  393. return -1;
  394. }
  395. break;
  396. case AMF_DATA_TYPE_NULL:
  397. case AMF_DATA_TYPE_UNDEFINED:
  398. case AMF_DATA_TYPE_UNSUPPORTED:
  399. break; // these take up no additional space
  400. case AMF_DATA_TYPE_MIXEDARRAY:
  401. {
  402. unsigned v;
  403. avio_skip(ioc, 4); // skip 32-bit max array index
  404. while (avio_tell(ioc) < max_pos - 2 &&
  405. amf_get_string(ioc, str_val, sizeof(str_val)) > 0)
  406. // this is the only case in which we would want a nested
  407. // parse to not skip over the object
  408. if (amf_parse_object(s, astream, vstream, str_val, max_pos,
  409. depth + 1) < 0)
  410. return -1;
  411. v = avio_r8(ioc);
  412. if (v != AMF_END_OF_OBJECT) {
  413. av_log(s, AV_LOG_ERROR, "Missing AMF_END_OF_OBJECT in AMF_DATA_TYPE_MIXEDARRAY, found %d\n", v);
  414. return -1;
  415. }
  416. break;
  417. }
  418. case AMF_DATA_TYPE_ARRAY:
  419. {
  420. unsigned int arraylen, i;
  421. arraylen = avio_rb32(ioc);
  422. for (i = 0; i < arraylen && avio_tell(ioc) < max_pos - 1; i++)
  423. if (amf_parse_object(s, NULL, NULL, NULL, max_pos,
  424. depth + 1) < 0)
  425. return -1; // if we couldn't skip, bomb out.
  426. }
  427. break;
  428. case AMF_DATA_TYPE_DATE:
  429. avio_skip(ioc, 8 + 2); // timestamp (double) and UTC offset (int16)
  430. break;
  431. default: // unsupported type, we couldn't skip
  432. av_log(s, AV_LOG_ERROR, "unsupported amf type %d\n", amf_type);
  433. return -1;
  434. }
  435. if (key) {
  436. apar = astream ? astream->codecpar : NULL;
  437. vpar = vstream ? vstream->codecpar : NULL;
  438. // stream info doesn't live any deeper than the first object
  439. if (depth == 1) {
  440. if (amf_type == AMF_DATA_TYPE_NUMBER ||
  441. amf_type == AMF_DATA_TYPE_BOOL) {
  442. if (!strcmp(key, "duration"))
  443. s->duration = num_val * AV_TIME_BASE;
  444. else if (!strcmp(key, "videodatarate") && vpar &&
  445. 0 <= (int)(num_val * 1024.0))
  446. vpar->bit_rate = num_val * 1024.0;
  447. else if (!strcmp(key, "audiodatarate") && apar &&
  448. 0 <= (int)(num_val * 1024.0))
  449. apar->bit_rate = num_val * 1024.0;
  450. else if (!strcmp(key, "datastream")) {
  451. AVStream *st = create_stream(s, AVMEDIA_TYPE_SUBTITLE);
  452. if (!st)
  453. return AVERROR(ENOMEM);
  454. st->codecpar->codec_id = AV_CODEC_ID_TEXT;
  455. } else if (flv->trust_metadata) {
  456. if (!strcmp(key, "videocodecid") && vpar) {
  457. flv_set_video_codec(s, vstream, num_val, 0);
  458. } else if (!strcmp(key, "audiocodecid") && apar) {
  459. int id = ((int)num_val) << FLV_AUDIO_CODECID_OFFSET;
  460. flv_set_audio_codec(s, astream, apar, id);
  461. } else if (!strcmp(key, "audiosamplerate") && apar) {
  462. apar->sample_rate = num_val;
  463. } else if (!strcmp(key, "audiosamplesize") && apar) {
  464. apar->bits_per_coded_sample = num_val;
  465. } else if (!strcmp(key, "stereo") && apar) {
  466. apar->channels = num_val + 1;
  467. apar->channel_layout = apar->channels == 2 ?
  468. AV_CH_LAYOUT_STEREO :
  469. AV_CH_LAYOUT_MONO;
  470. } else if (!strcmp(key, "width") && vpar) {
  471. vpar->width = num_val;
  472. } else if (!strcmp(key, "height") && vpar) {
  473. vpar->height = num_val;
  474. }
  475. }
  476. }
  477. if (amf_type == AMF_DATA_TYPE_STRING) {
  478. if (!strcmp(key, "encoder")) {
  479. int version = -1;
  480. if (1 == sscanf(str_val, "Open Broadcaster Software v0.%d", &version)) {
  481. if (version > 0 && version <= 655)
  482. flv->broken_sizes = 1;
  483. }
  484. } else if (!strcmp(key, "metadatacreator") && !strcmp(str_val, "MEGA")) {
  485. flv->broken_sizes = 1;
  486. }
  487. }
  488. }
  489. if (amf_type == AMF_DATA_TYPE_OBJECT && s->nb_streams == 1 &&
  490. ((!apar && !strcmp(key, "audiocodecid")) ||
  491. (!vpar && !strcmp(key, "videocodecid"))))
  492. s->ctx_flags &= ~AVFMTCTX_NOHEADER; //If there is either audio/video missing, codecid will be an empty object
  493. if (!strcmp(key, "duration") ||
  494. !strcmp(key, "filesize") ||
  495. !strcmp(key, "width") ||
  496. !strcmp(key, "height") ||
  497. !strcmp(key, "videodatarate") ||
  498. !strcmp(key, "framerate") ||
  499. !strcmp(key, "videocodecid") ||
  500. !strcmp(key, "audiodatarate") ||
  501. !strcmp(key, "audiosamplerate") ||
  502. !strcmp(key, "audiosamplesize") ||
  503. !strcmp(key, "stereo") ||
  504. !strcmp(key, "audiocodecid") ||
  505. !strcmp(key, "datastream"))
  506. return 0;
  507. s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;
  508. if (amf_type == AMF_DATA_TYPE_BOOL) {
  509. av_strlcpy(str_val, num_val > 0 ? "true" : "false",
  510. sizeof(str_val));
  511. av_dict_set(&s->metadata, key, str_val, 0);
  512. } else if (amf_type == AMF_DATA_TYPE_NUMBER) {
  513. snprintf(str_val, sizeof(str_val), "%.f", num_val);
  514. av_dict_set(&s->metadata, key, str_val, 0);
  515. } else if (amf_type == AMF_DATA_TYPE_STRING)
  516. av_dict_set(&s->metadata, key, str_val, 0);
  517. }
  518. return 0;
  519. }
  520. #define TYPE_ONTEXTDATA 1
  521. #define TYPE_ONCAPTION 2
  522. #define TYPE_ONCAPTIONINFO 3
  523. #define TYPE_UNKNOWN 9
  524. static int flv_read_metabody(AVFormatContext *s, int64_t next_pos)
  525. {
  526. AMFDataType type;
  527. AVStream *stream, *astream, *vstream;
  528. AVStream av_unused *dstream;
  529. AVIOContext *ioc;
  530. int i;
  531. // only needs to hold the string "onMetaData".
  532. // Anything longer is something we don't want.
  533. char buffer[32];
  534. astream = NULL;
  535. vstream = NULL;
  536. dstream = NULL;
  537. ioc = s->pb;
  538. // first object needs to be "onMetaData" string
  539. type = avio_r8(ioc);
  540. if (type != AMF_DATA_TYPE_STRING ||
  541. amf_get_string(ioc, buffer, sizeof(buffer)) < 0)
  542. return TYPE_UNKNOWN;
  543. if (!strcmp(buffer, "onTextData"))
  544. return TYPE_ONTEXTDATA;
  545. if (!strcmp(buffer, "onCaption"))
  546. return TYPE_ONCAPTION;
  547. if (!strcmp(buffer, "onCaptionInfo"))
  548. return TYPE_ONCAPTIONINFO;
  549. if (strcmp(buffer, "onMetaData") && strcmp(buffer, "onCuePoint")) {
  550. av_log(s, AV_LOG_DEBUG, "Unknown type %s\n", buffer);
  551. return TYPE_UNKNOWN;
  552. }
  553. // find the streams now so that amf_parse_object doesn't need to do
  554. // the lookup every time it is called.
  555. for (i = 0; i < s->nb_streams; i++) {
  556. stream = s->streams[i];
  557. if (stream->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
  558. vstream = stream;
  559. else if (stream->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
  560. astream = stream;
  561. else if (stream->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
  562. dstream = stream;
  563. }
  564. // parse the second object (we want a mixed array)
  565. if (amf_parse_object(s, astream, vstream, buffer, next_pos, 0) < 0)
  566. return -1;
  567. return 0;
  568. }
  569. static int flv_read_header(AVFormatContext *s)
  570. {
  571. FLVContext *flv = s->priv_data;
  572. int offset;
  573. avio_skip(s->pb, 4);
  574. avio_r8(s->pb); // flags
  575. s->ctx_flags |= AVFMTCTX_NOHEADER;
  576. offset = avio_rb32(s->pb);
  577. avio_seek(s->pb, offset, SEEK_SET);
  578. avio_skip(s->pb, 4);
  579. s->start_time = 0;
  580. flv->sum_flv_tag_size = 0;
  581. return 0;
  582. }
  583. static int flv_read_close(AVFormatContext *s)
  584. {
  585. int i;
  586. FLVContext *flv = s->priv_data;
  587. for (i=0; i<FLV_STREAM_TYPE_NB; i++)
  588. av_freep(&flv->new_extradata[i]);
  589. return 0;
  590. }
  591. static int flv_get_extradata(AVFormatContext *s, AVStream *st, int size)
  592. {
  593. av_freep(&st->codecpar->extradata);
  594. if (ff_get_extradata(s, st->codecpar, s->pb, size) < 0)
  595. return AVERROR(ENOMEM);
  596. return 0;
  597. }
  598. static int flv_queue_extradata(FLVContext *flv, AVIOContext *pb, int stream,
  599. int size)
  600. {
  601. av_free(flv->new_extradata[stream]);
  602. flv->new_extradata[stream] = av_mallocz(size +
  603. AV_INPUT_BUFFER_PADDING_SIZE);
  604. if (!flv->new_extradata[stream])
  605. return AVERROR(ENOMEM);
  606. flv->new_extradata_size[stream] = size;
  607. avio_read(pb, flv->new_extradata[stream], size);
  608. return 0;
  609. }
  610. static void clear_index_entries(AVFormatContext *s, int64_t pos)
  611. {
  612. int i, j, out;
  613. av_log(s, AV_LOG_WARNING,
  614. "Found invalid index entries, clearing the index.\n");
  615. for (i = 0; i < s->nb_streams; i++) {
  616. AVStream *st = s->streams[i];
  617. /* Remove all index entries that point to >= pos */
  618. out = 0;
  619. for (j = 0; j < st->nb_index_entries; j++)
  620. if (st->index_entries[j].pos < pos)
  621. st->index_entries[out++] = st->index_entries[j];
  622. st->nb_index_entries = out;
  623. }
  624. }
  625. static int amf_skip_tag(AVIOContext *pb, AMFDataType type)
  626. {
  627. int nb = -1, ret, parse_name = 1;
  628. switch (type) {
  629. case AMF_DATA_TYPE_NUMBER:
  630. avio_skip(pb, 8);
  631. break;
  632. case AMF_DATA_TYPE_BOOL:
  633. avio_skip(pb, 1);
  634. break;
  635. case AMF_DATA_TYPE_STRING:
  636. avio_skip(pb, avio_rb16(pb));
  637. break;
  638. case AMF_DATA_TYPE_ARRAY:
  639. parse_name = 0;
  640. case AMF_DATA_TYPE_MIXEDARRAY:
  641. nb = avio_rb32(pb);
  642. case AMF_DATA_TYPE_OBJECT:
  643. while(!pb->eof_reached && (nb-- > 0 || type != AMF_DATA_TYPE_ARRAY)) {
  644. if (parse_name) {
  645. int size = avio_rb16(pb);
  646. if (!size) {
  647. avio_skip(pb, 1);
  648. break;
  649. }
  650. avio_skip(pb, size);
  651. }
  652. if ((ret = amf_skip_tag(pb, avio_r8(pb))) < 0)
  653. return ret;
  654. }
  655. break;
  656. case AMF_DATA_TYPE_NULL:
  657. case AMF_DATA_TYPE_OBJECT_END:
  658. break;
  659. default:
  660. return AVERROR_INVALIDDATA;
  661. }
  662. return 0;
  663. }
  664. static int flv_data_packet(AVFormatContext *s, AVPacket *pkt,
  665. int64_t dts, int64_t next)
  666. {
  667. AVIOContext *pb = s->pb;
  668. AVStream *st = NULL;
  669. char buf[20];
  670. int ret = AVERROR_INVALIDDATA;
  671. int i, length = -1;
  672. int array = 0;
  673. switch (avio_r8(pb)) {
  674. case AMF_DATA_TYPE_ARRAY:
  675. array = 1;
  676. case AMF_DATA_TYPE_MIXEDARRAY:
  677. avio_seek(pb, 4, SEEK_CUR);
  678. case AMF_DATA_TYPE_OBJECT:
  679. break;
  680. default:
  681. goto skip;
  682. }
  683. while (array || (ret = amf_get_string(pb, buf, sizeof(buf))) > 0) {
  684. AMFDataType type = avio_r8(pb);
  685. if (type == AMF_DATA_TYPE_STRING && (array || !strcmp(buf, "text"))) {
  686. length = avio_rb16(pb);
  687. ret = av_get_packet(pb, pkt, length);
  688. if (ret < 0)
  689. goto skip;
  690. else
  691. break;
  692. } else {
  693. if ((ret = amf_skip_tag(pb, type)) < 0)
  694. goto skip;
  695. }
  696. }
  697. if (length < 0) {
  698. ret = AVERROR_INVALIDDATA;
  699. goto skip;
  700. }
  701. for (i = 0; i < s->nb_streams; i++) {
  702. st = s->streams[i];
  703. if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
  704. break;
  705. }
  706. if (i == s->nb_streams) {
  707. st = create_stream(s, AVMEDIA_TYPE_SUBTITLE);
  708. if (!st)
  709. return AVERROR(ENOMEM);
  710. st->codecpar->codec_id = AV_CODEC_ID_TEXT;
  711. }
  712. pkt->dts = dts;
  713. pkt->pts = dts;
  714. pkt->size = ret;
  715. pkt->stream_index = st->index;
  716. pkt->flags |= AV_PKT_FLAG_KEY;
  717. skip:
  718. avio_seek(s->pb, next + 4, SEEK_SET);
  719. return ret;
  720. }
  721. static int resync(AVFormatContext *s)
  722. {
  723. FLVContext *flv = s->priv_data;
  724. int64_t i;
  725. int64_t pos = avio_tell(s->pb);
  726. for (i=0; !avio_feof(s->pb); i++) {
  727. int j = i & (RESYNC_BUFFER_SIZE-1);
  728. int j1 = j + RESYNC_BUFFER_SIZE;
  729. flv->resync_buffer[j ] =
  730. flv->resync_buffer[j1] = avio_r8(s->pb);
  731. if (i > 22) {
  732. unsigned lsize2 = AV_RB32(flv->resync_buffer + j1 - 4);
  733. if (lsize2 >= 11 && lsize2 + 8LL < FFMIN(i, RESYNC_BUFFER_SIZE)) {
  734. unsigned size2 = AV_RB24(flv->resync_buffer + j1 - lsize2 + 1 - 4);
  735. unsigned lsize1 = AV_RB32(flv->resync_buffer + j1 - lsize2 - 8);
  736. if (lsize1 >= 11 && lsize1 + 8LL + lsize2 < FFMIN(i, RESYNC_BUFFER_SIZE)) {
  737. unsigned size1 = AV_RB24(flv->resync_buffer + j1 - lsize1 + 1 - lsize2 - 8);
  738. if (size1 == lsize1 - 11 && size2 == lsize2 - 11) {
  739. avio_seek(s->pb, pos + i - lsize1 - lsize2 - 8, SEEK_SET);
  740. return 1;
  741. }
  742. }
  743. }
  744. }
  745. }
  746. return AVERROR_EOF;
  747. }
  748. static int flv_read_packet(AVFormatContext *s, AVPacket *pkt)
  749. {
  750. FLVContext *flv = s->priv_data;
  751. int ret, i, size, flags;
  752. enum FlvTagType type;
  753. int stream_type=-1;
  754. int64_t next, pos, meta_pos;
  755. int64_t dts, pts = AV_NOPTS_VALUE;
  756. int av_uninit(channels);
  757. int av_uninit(sample_rate);
  758. AVStream *st = NULL;
  759. int last = -1;
  760. int orig_size;
  761. retry:
  762. /* pkt size is repeated at end. skip it */
  763. pos = avio_tell(s->pb);
  764. type = (avio_r8(s->pb) & 0x1F);
  765. orig_size =
  766. size = avio_rb24(s->pb);
  767. flv->sum_flv_tag_size += size + 11;
  768. dts = avio_rb24(s->pb);
  769. dts |= (unsigned)avio_r8(s->pb) << 24;
  770. av_log(s, AV_LOG_TRACE, "type:%d, size:%d, last:%d, dts:%"PRId64" pos:%"PRId64"\n", type, size, last, dts, avio_tell(s->pb));
  771. if (avio_feof(s->pb))
  772. return AVERROR_EOF;
  773. avio_skip(s->pb, 3); /* stream id, always 0 */
  774. flags = 0;
  775. if (flv->validate_next < flv->validate_count) {
  776. int64_t validate_pos = flv->validate_index[flv->validate_next].pos;
  777. if (pos == validate_pos) {
  778. if (FFABS(dts - flv->validate_index[flv->validate_next].dts) <=
  779. VALIDATE_INDEX_TS_THRESH) {
  780. flv->validate_next++;
  781. } else {
  782. clear_index_entries(s, validate_pos);
  783. flv->validate_count = 0;
  784. }
  785. } else if (pos > validate_pos) {
  786. clear_index_entries(s, validate_pos);
  787. flv->validate_count = 0;
  788. }
  789. }
  790. if (size == 0) {
  791. ret = FFERROR_REDO;
  792. goto leave;
  793. }
  794. next = size + avio_tell(s->pb);
  795. if (type == FLV_TAG_TYPE_AUDIO) {
  796. stream_type = FLV_STREAM_TYPE_AUDIO;
  797. flags = avio_r8(s->pb);
  798. size--;
  799. } else if (type == FLV_TAG_TYPE_VIDEO) {
  800. stream_type = FLV_STREAM_TYPE_VIDEO;
  801. flags = avio_r8(s->pb);
  802. size--;
  803. if ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_VIDEO_INFO_CMD)
  804. goto skip;
  805. } else if (type == FLV_TAG_TYPE_META) {
  806. stream_type=FLV_STREAM_TYPE_DATA;
  807. if (size > 13 + 1 + 4) { // Header-type metadata stuff
  808. int type;
  809. meta_pos = avio_tell(s->pb);
  810. type = flv_read_metabody(s, next);
  811. if (type == 0 && dts == 0 || type < 0 || type == TYPE_UNKNOWN) {
  812. if (type < 0 && flv->validate_count &&
  813. flv->validate_index[0].pos > next &&
  814. flv->validate_index[0].pos - 4 < next
  815. ) {
  816. av_log(s, AV_LOG_WARNING, "Adjusting next position due to index mismatch\n");
  817. next = flv->validate_index[0].pos - 4;
  818. }
  819. goto skip;
  820. } else if (type == TYPE_ONTEXTDATA) {
  821. avpriv_request_sample(s, "OnTextData packet");
  822. return flv_data_packet(s, pkt, dts, next);
  823. } else if (type == TYPE_ONCAPTION) {
  824. return flv_data_packet(s, pkt, dts, next);
  825. }
  826. avio_seek(s->pb, meta_pos, SEEK_SET);
  827. }
  828. } else {
  829. av_log(s, AV_LOG_DEBUG,
  830. "Skipping flv packet: type %d, size %d, flags %d.\n",
  831. type, size, flags);
  832. skip:
  833. avio_seek(s->pb, next, SEEK_SET);
  834. ret = FFERROR_REDO;
  835. goto leave;
  836. }
  837. /* skip empty data packets */
  838. if (!size) {
  839. ret = FFERROR_REDO;
  840. goto leave;
  841. }
  842. /* now find stream */
  843. for (i = 0; i < s->nb_streams; i++) {
  844. st = s->streams[i];
  845. if (stream_type == FLV_STREAM_TYPE_AUDIO) {
  846. if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO &&
  847. (s->audio_codec_id || flv_same_audio_codec(st->codecpar, flags)))
  848. break;
  849. } else if (stream_type == FLV_STREAM_TYPE_VIDEO) {
  850. if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO &&
  851. (s->video_codec_id || flv_same_video_codec(st->codecpar, flags)))
  852. break;
  853. } else if (stream_type == FLV_STREAM_TYPE_DATA) {
  854. if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
  855. break;
  856. }
  857. }
  858. if (i == s->nb_streams) {
  859. static const enum AVMediaType stream_types[] = {AVMEDIA_TYPE_VIDEO, AVMEDIA_TYPE_AUDIO, AVMEDIA_TYPE_SUBTITLE};
  860. av_log(s, AV_LOG_WARNING, "%s stream discovered after head already parsed\n", av_get_media_type_string(stream_types[stream_type]));
  861. st = create_stream(s, stream_types[stream_type]);
  862. if (!st)
  863. return AVERROR(ENOMEM);
  864. }
  865. av_log(s, AV_LOG_TRACE, "%d %X %d \n", stream_type, flags, st->discard);
  866. if (s->pb->seekable &&
  867. ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY ||
  868. stream_type == FLV_STREAM_TYPE_AUDIO))
  869. av_add_index_entry(st, pos, dts, size, 0, AVINDEX_KEYFRAME);
  870. if ( (st->discard >= AVDISCARD_NONKEY && !((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY || (stream_type == FLV_STREAM_TYPE_AUDIO)))
  871. ||(st->discard >= AVDISCARD_BIDIR && ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_DISP_INTER && (stream_type == FLV_STREAM_TYPE_VIDEO)))
  872. || st->discard >= AVDISCARD_ALL
  873. ) {
  874. avio_seek(s->pb, next, SEEK_SET);
  875. ret = FFERROR_REDO;
  876. goto leave;
  877. }
  878. // if not streamed and no duration from metadata then seek to end to find
  879. // the duration from the timestamps
  880. if (s->pb->seekable && (!s->duration || s->duration == AV_NOPTS_VALUE) &&
  881. !flv->searched_for_end) {
  882. int size;
  883. const int64_t pos = avio_tell(s->pb);
  884. // Read the last 4 bytes of the file, this should be the size of the
  885. // previous FLV tag. Use the timestamp of its payload as duration.
  886. int64_t fsize = avio_size(s->pb);
  887. retry_duration:
  888. avio_seek(s->pb, fsize - 4, SEEK_SET);
  889. size = avio_rb32(s->pb);
  890. if (size > 0 && size < fsize) {
  891. // Seek to the start of the last FLV tag at position (fsize - 4 - size)
  892. // but skip the byte indicating the type.
  893. avio_seek(s->pb, fsize - 3 - size, SEEK_SET);
  894. if (size == avio_rb24(s->pb) + 11) {
  895. uint32_t ts = avio_rb24(s->pb);
  896. ts |= avio_r8(s->pb) << 24;
  897. if (ts)
  898. s->duration = ts * (int64_t)AV_TIME_BASE / 1000;
  899. else if (fsize >= 8 && fsize - 8 >= size) {
  900. fsize -= size+4;
  901. goto retry_duration;
  902. }
  903. }
  904. }
  905. avio_seek(s->pb, pos, SEEK_SET);
  906. flv->searched_for_end = 1;
  907. }
  908. if (stream_type == FLV_STREAM_TYPE_AUDIO) {
  909. int bits_per_coded_sample;
  910. channels = (flags & FLV_AUDIO_CHANNEL_MASK) == FLV_STEREO ? 2 : 1;
  911. sample_rate = 44100 << ((flags & FLV_AUDIO_SAMPLERATE_MASK) >>
  912. FLV_AUDIO_SAMPLERATE_OFFSET) >> 3;
  913. bits_per_coded_sample = (flags & FLV_AUDIO_SAMPLESIZE_MASK) ? 16 : 8;
  914. if (!st->codecpar->channels || !st->codecpar->sample_rate ||
  915. !st->codecpar->bits_per_coded_sample) {
  916. st->codecpar->channels = channels;
  917. st->codecpar->channel_layout = channels == 1
  918. ? AV_CH_LAYOUT_MONO
  919. : AV_CH_LAYOUT_STEREO;
  920. st->codecpar->sample_rate = sample_rate;
  921. st->codecpar->bits_per_coded_sample = bits_per_coded_sample;
  922. }
  923. if (!st->codecpar->codec_id) {
  924. flv_set_audio_codec(s, st, st->codecpar,
  925. flags & FLV_AUDIO_CODECID_MASK);
  926. flv->last_sample_rate =
  927. sample_rate = st->codecpar->sample_rate;
  928. flv->last_channels =
  929. channels = st->codecpar->channels;
  930. } else {
  931. AVCodecParameters *par = avcodec_parameters_alloc();
  932. if (!par) {
  933. ret = AVERROR(ENOMEM);
  934. goto leave;
  935. }
  936. par->sample_rate = sample_rate;
  937. par->bits_per_coded_sample = bits_per_coded_sample;
  938. flv_set_audio_codec(s, st, par, flags & FLV_AUDIO_CODECID_MASK);
  939. sample_rate = par->sample_rate;
  940. avcodec_parameters_free(&par);
  941. }
  942. } else if (stream_type == FLV_STREAM_TYPE_VIDEO) {
  943. size -= flv_set_video_codec(s, st, flags & FLV_VIDEO_CODECID_MASK, 1);
  944. } else if (stream_type == FLV_STREAM_TYPE_DATA) {
  945. st->codecpar->codec_id = AV_CODEC_ID_TEXT;
  946. }
  947. if (st->codecpar->codec_id == AV_CODEC_ID_AAC ||
  948. st->codecpar->codec_id == AV_CODEC_ID_H264 ||
  949. st->codecpar->codec_id == AV_CODEC_ID_MPEG4) {
  950. int type = avio_r8(s->pb);
  951. size--;
  952. if (st->codecpar->codec_id == AV_CODEC_ID_H264 || st->codecpar->codec_id == AV_CODEC_ID_MPEG4) {
  953. // sign extension
  954. int32_t cts = (avio_rb24(s->pb) + 0xff800000) ^ 0xff800000;
  955. pts = dts + cts;
  956. if (cts < 0) { // dts might be wrong
  957. if (!flv->wrong_dts)
  958. av_log(s, AV_LOG_WARNING,
  959. "Negative cts, previous timestamps might be wrong.\n");
  960. flv->wrong_dts = 1;
  961. } else if (FFABS(dts - pts) > 1000*60*15) {
  962. av_log(s, AV_LOG_WARNING,
  963. "invalid timestamps %"PRId64" %"PRId64"\n", dts, pts);
  964. dts = pts = AV_NOPTS_VALUE;
  965. }
  966. }
  967. if (type == 0 && (!st->codecpar->extradata || st->codecpar->codec_id == AV_CODEC_ID_AAC ||
  968. st->codecpar->codec_id == AV_CODEC_ID_H264)) {
  969. AVDictionaryEntry *t;
  970. if (st->codecpar->extradata) {
  971. if ((ret = flv_queue_extradata(flv, s->pb, stream_type, size)) < 0)
  972. return ret;
  973. ret = FFERROR_REDO;
  974. goto leave;
  975. }
  976. if ((ret = flv_get_extradata(s, st, size)) < 0)
  977. return ret;
  978. /* Workaround for buggy Omnia A/XE encoder */
  979. t = av_dict_get(s->metadata, "Encoder", NULL, 0);
  980. if (st->codecpar->codec_id == AV_CODEC_ID_AAC && t && !strcmp(t->value, "Omnia A/XE"))
  981. st->codecpar->extradata_size = 2;
  982. if (st->codecpar->codec_id == AV_CODEC_ID_AAC && 0) {
  983. MPEG4AudioConfig cfg;
  984. if (avpriv_mpeg4audio_get_config(&cfg, st->codecpar->extradata,
  985. st->codecpar->extradata_size * 8, 1) >= 0) {
  986. st->codecpar->channels = cfg.channels;
  987. st->codecpar->channel_layout = 0;
  988. if (cfg.ext_sample_rate)
  989. st->codecpar->sample_rate = cfg.ext_sample_rate;
  990. else
  991. st->codecpar->sample_rate = cfg.sample_rate;
  992. av_log(s, AV_LOG_TRACE, "mp4a config channels %d sample rate %d\n",
  993. st->codecpar->channels, st->codecpar->sample_rate);
  994. }
  995. }
  996. ret = FFERROR_REDO;
  997. goto leave;
  998. }
  999. }
  1000. /* skip empty data packets */
  1001. if (!size) {
  1002. ret = FFERROR_REDO;
  1003. goto leave;
  1004. }
  1005. ret = av_get_packet(s->pb, pkt, size);
  1006. if (ret < 0)
  1007. return ret;
  1008. pkt->dts = dts;
  1009. pkt->pts = pts == AV_NOPTS_VALUE ? dts : pts;
  1010. pkt->stream_index = st->index;
  1011. if (flv->new_extradata[stream_type]) {
  1012. uint8_t *side = av_packet_new_side_data(pkt, AV_PKT_DATA_NEW_EXTRADATA,
  1013. flv->new_extradata_size[stream_type]);
  1014. if (side) {
  1015. memcpy(side, flv->new_extradata[stream_type],
  1016. flv->new_extradata_size[stream_type]);
  1017. av_freep(&flv->new_extradata[stream_type]);
  1018. flv->new_extradata_size[stream_type] = 0;
  1019. }
  1020. }
  1021. if (stream_type == FLV_STREAM_TYPE_AUDIO &&
  1022. (sample_rate != flv->last_sample_rate ||
  1023. channels != flv->last_channels)) {
  1024. flv->last_sample_rate = sample_rate;
  1025. flv->last_channels = channels;
  1026. ff_add_param_change(pkt, channels, 0, sample_rate, 0, 0);
  1027. }
  1028. if ( stream_type == FLV_STREAM_TYPE_AUDIO ||
  1029. ((flags & FLV_VIDEO_FRAMETYPE_MASK) == FLV_FRAME_KEY) ||
  1030. stream_type == FLV_STREAM_TYPE_DATA)
  1031. pkt->flags |= AV_PKT_FLAG_KEY;
  1032. leave:
  1033. last = avio_rb32(s->pb);
  1034. if (last != orig_size + 11 && last != orig_size + 10 &&
  1035. !avio_feof(s->pb) &&
  1036. (last != orig_size || !last) && last != flv->sum_flv_tag_size &&
  1037. !flv->broken_sizes) {
  1038. av_log(s, AV_LOG_ERROR, "Packet mismatch %d %d %d\n", last, orig_size + 11, flv->sum_flv_tag_size);
  1039. avio_seek(s->pb, pos + 1, SEEK_SET);
  1040. ret = resync(s);
  1041. av_packet_unref(pkt);
  1042. if (ret >= 0) {
  1043. goto retry;
  1044. }
  1045. }
  1046. return ret;
  1047. }
  1048. static int flv_read_seek(AVFormatContext *s, int stream_index,
  1049. int64_t ts, int flags)
  1050. {
  1051. FLVContext *flv = s->priv_data;
  1052. flv->validate_count = 0;
  1053. return avio_seek_time(s->pb, stream_index, ts, flags);
  1054. }
  1055. #define OFFSET(x) offsetof(FLVContext, x)
  1056. #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
  1057. static const AVOption options[] = {
  1058. { "flv_metadata", "Allocate streams according to the onMetaData array", OFFSET(trust_metadata), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VD },
  1059. { NULL }
  1060. };
  1061. static const AVClass flv_class = {
  1062. .class_name = "flvdec",
  1063. .item_name = av_default_item_name,
  1064. .option = options,
  1065. .version = LIBAVUTIL_VERSION_INT,
  1066. };
  1067. AVInputFormat ff_flv_demuxer = {
  1068. .name = "flv",
  1069. .long_name = NULL_IF_CONFIG_SMALL("FLV (Flash Video)"),
  1070. .priv_data_size = sizeof(FLVContext),
  1071. .read_probe = flv_probe,
  1072. .read_header = flv_read_header,
  1073. .read_packet = flv_read_packet,
  1074. .read_seek = flv_read_seek,
  1075. .read_close = flv_read_close,
  1076. .extensions = "flv",
  1077. .priv_class = &flv_class,
  1078. };
  1079. static const AVClass live_flv_class = {
  1080. .class_name = "live_flvdec",
  1081. .item_name = av_default_item_name,
  1082. .option = options,
  1083. .version = LIBAVUTIL_VERSION_INT,
  1084. };
  1085. AVInputFormat ff_live_flv_demuxer = {
  1086. .name = "live_flv",
  1087. .long_name = NULL_IF_CONFIG_SMALL("live RTMP FLV (Flash Video)"),
  1088. .priv_data_size = sizeof(FLVContext),
  1089. .read_probe = live_flv_probe,
  1090. .read_header = flv_read_header,
  1091. .read_packet = flv_read_packet,
  1092. .read_seek = flv_read_seek,
  1093. .read_close = flv_read_close,
  1094. .extensions = "flv",
  1095. .priv_class = &live_flv_class,
  1096. .flags = AVFMT_TS_DISCONT
  1097. };