You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

973 lines
32KB

  1. /*
  2. * "Real" compatible demuxer.
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/channel_layout.h"
  23. #include "libavutil/internal.h"
  24. #include "libavutil/intreadwrite.h"
  25. #include "libavutil/dict.h"
  26. #include "avformat.h"
  27. #include "internal.h"
  28. #include "rmsipr.h"
  29. #include "rm.h"
  30. #define DEINT_ID_GENR MKTAG('g', 'e', 'n', 'r') ///< interleaving for Cooker/ATRAC
  31. #define DEINT_ID_INT0 MKTAG('I', 'n', 't', '0') ///< no interleaving needed
  32. #define DEINT_ID_INT4 MKTAG('I', 'n', 't', '4') ///< interleaving for 28.8
  33. #define DEINT_ID_SIPR MKTAG('s', 'i', 'p', 'r') ///< interleaving for Sipro
  34. #define DEINT_ID_VBRF MKTAG('v', 'b', 'r', 'f') ///< VBR case for AAC
  35. #define DEINT_ID_VBRS MKTAG('v', 'b', 'r', 's') ///< VBR case for AAC
  36. struct RMStream {
  37. AVPacket pkt; ///< place to store merged video frame / reordered audio data
  38. int videobufsize; ///< current assembled frame size
  39. int videobufpos; ///< position for the next slice in the video buffer
  40. int curpic_num; ///< picture number of current frame
  41. int cur_slice, slices;
  42. int64_t pktpos; ///< first slice position in file
  43. /// Audio descrambling matrix parameters
  44. int64_t audiotimestamp; ///< Audio packet timestamp
  45. int sub_packet_cnt; // Subpacket counter, used while reading
  46. int sub_packet_size, sub_packet_h, coded_framesize; ///< Descrambling parameters from container
  47. int audio_framesize; /// Audio frame size from container
  48. int sub_packet_lengths[16]; /// Length of each subpacket
  49. int32_t deint_id; ///< deinterleaver used in audio stream
  50. };
  51. typedef struct {
  52. int nb_packets;
  53. int old_format;
  54. int current_stream;
  55. int remaining_len;
  56. int audio_stream_num; ///< Stream number for audio packets
  57. int audio_pkt_cnt; ///< Output packet counter
  58. } RMDemuxContext;
  59. static inline void get_strl(AVIOContext *pb, char *buf, int buf_size, int len)
  60. {
  61. int i;
  62. char *q, r;
  63. q = buf;
  64. for(i=0;i<len;i++) {
  65. r = avio_r8(pb);
  66. if (i < buf_size - 1)
  67. *q++ = r;
  68. }
  69. if (buf_size > 0) *q = '\0';
  70. }
  71. static void get_str8(AVIOContext *pb, char *buf, int buf_size)
  72. {
  73. get_strl(pb, buf, buf_size, avio_r8(pb));
  74. }
  75. static int rm_read_extradata(AVIOContext *pb, AVCodecContext *avctx, unsigned size)
  76. {
  77. if (size >= 1<<24)
  78. return -1;
  79. avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
  80. if (!avctx->extradata)
  81. return AVERROR(ENOMEM);
  82. avctx->extradata_size = avio_read(pb, avctx->extradata, size);
  83. memset(avctx->extradata + avctx->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
  84. if (avctx->extradata_size != size)
  85. return AVERROR(EIO);
  86. return 0;
  87. }
  88. static void rm_read_metadata(AVFormatContext *s, AVIOContext *pb, int wide)
  89. {
  90. char buf[1024];
  91. int i;
  92. for (i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
  93. int len = wide ? avio_rb16(pb) : avio_r8(pb);
  94. get_strl(pb, buf, sizeof(buf), len);
  95. av_dict_set(&s->metadata, ff_rm_metadata[i], buf, 0);
  96. }
  97. }
  98. RMStream *ff_rm_alloc_rmstream (void)
  99. {
  100. RMStream *rms = av_mallocz(sizeof(RMStream));
  101. rms->curpic_num = -1;
  102. return rms;
  103. }
  104. void ff_rm_free_rmstream (RMStream *rms)
  105. {
  106. av_free_packet(&rms->pkt);
  107. }
  108. static int rm_read_audio_stream_info(AVFormatContext *s, AVIOContext *pb,
  109. AVStream *st, RMStream *ast, int read_all)
  110. {
  111. char buf[256];
  112. uint32_t version;
  113. int ret;
  114. /* ra type header */
  115. version = avio_rb16(pb); /* version */
  116. if (version == 3) {
  117. int header_size = avio_rb16(pb);
  118. int64_t startpos = avio_tell(pb);
  119. avio_skip(pb, 14);
  120. rm_read_metadata(s, pb, 0);
  121. if ((startpos + header_size) >= avio_tell(pb) + 2) {
  122. // fourcc (should always be "lpcJ")
  123. avio_r8(pb);
  124. get_str8(pb, buf, sizeof(buf));
  125. }
  126. // Skip extra header crap (this should never happen)
  127. if ((startpos + header_size) > avio_tell(pb))
  128. avio_skip(pb, header_size + startpos - avio_tell(pb));
  129. st->codec->sample_rate = 8000;
  130. st->codec->channels = 1;
  131. st->codec->channel_layout = AV_CH_LAYOUT_MONO;
  132. st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
  133. st->codec->codec_id = AV_CODEC_ID_RA_144;
  134. ast->deint_id = DEINT_ID_INT0;
  135. } else {
  136. int flavor, sub_packet_h, coded_framesize, sub_packet_size;
  137. int codecdata_length;
  138. /* old version (4) */
  139. avio_skip(pb, 2); /* unused */
  140. avio_rb32(pb); /* .ra4 */
  141. avio_rb32(pb); /* data size */
  142. avio_rb16(pb); /* version2 */
  143. avio_rb32(pb); /* header size */
  144. flavor= avio_rb16(pb); /* add codec info / flavor */
  145. ast->coded_framesize = coded_framesize = avio_rb32(pb); /* coded frame size */
  146. avio_rb32(pb); /* ??? */
  147. avio_rb32(pb); /* ??? */
  148. avio_rb32(pb); /* ??? */
  149. ast->sub_packet_h = sub_packet_h = avio_rb16(pb); /* 1 */
  150. st->codec->block_align= avio_rb16(pb); /* frame size */
  151. ast->sub_packet_size = sub_packet_size = avio_rb16(pb); /* sub packet size */
  152. avio_rb16(pb); /* ??? */
  153. if (version == 5) {
  154. avio_rb16(pb); avio_rb16(pb); avio_rb16(pb);
  155. }
  156. st->codec->sample_rate = avio_rb16(pb);
  157. avio_rb32(pb);
  158. st->codec->channels = avio_rb16(pb);
  159. if (version == 5) {
  160. ast->deint_id = avio_rl32(pb);
  161. avio_read(pb, buf, 4);
  162. buf[4] = 0;
  163. } else {
  164. get_str8(pb, buf, sizeof(buf)); /* desc */
  165. ast->deint_id = AV_RL32(buf);
  166. get_str8(pb, buf, sizeof(buf)); /* desc */
  167. }
  168. st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
  169. st->codec->codec_tag = AV_RL32(buf);
  170. st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags,
  171. st->codec->codec_tag);
  172. switch (st->codec->codec_id) {
  173. case AV_CODEC_ID_AC3:
  174. st->need_parsing = AVSTREAM_PARSE_FULL;
  175. break;
  176. case AV_CODEC_ID_RA_288:
  177. st->codec->extradata_size= 0;
  178. ast->audio_framesize = st->codec->block_align;
  179. st->codec->block_align = coded_framesize;
  180. break;
  181. case AV_CODEC_ID_COOK:
  182. st->need_parsing = AVSTREAM_PARSE_HEADERS;
  183. case AV_CODEC_ID_ATRAC3:
  184. case AV_CODEC_ID_SIPR:
  185. avio_rb16(pb); avio_r8(pb);
  186. if (version == 5)
  187. avio_r8(pb);
  188. codecdata_length = avio_rb32(pb);
  189. if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
  190. av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
  191. return -1;
  192. }
  193. ast->audio_framesize = st->codec->block_align;
  194. if (st->codec->codec_id == AV_CODEC_ID_SIPR) {
  195. if (flavor > 3) {
  196. av_log(s, AV_LOG_ERROR, "bad SIPR file flavor %d\n",
  197. flavor);
  198. return -1;
  199. }
  200. st->codec->block_align = ff_sipr_subpk_size[flavor];
  201. } else {
  202. if(sub_packet_size <= 0){
  203. av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n");
  204. return -1;
  205. }
  206. st->codec->block_align = ast->sub_packet_size;
  207. }
  208. if ((ret = rm_read_extradata(pb, st->codec, codecdata_length)) < 0)
  209. return ret;
  210. break;
  211. case AV_CODEC_ID_AAC:
  212. avio_rb16(pb); avio_r8(pb);
  213. if (version == 5)
  214. avio_r8(pb);
  215. codecdata_length = avio_rb32(pb);
  216. if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
  217. av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
  218. return -1;
  219. }
  220. if (codecdata_length >= 1) {
  221. avio_r8(pb);
  222. if ((ret = rm_read_extradata(pb, st->codec, codecdata_length - 1)) < 0)
  223. return ret;
  224. }
  225. break;
  226. default:
  227. av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name));
  228. }
  229. if (ast->deint_id == DEINT_ID_INT4 ||
  230. ast->deint_id == DEINT_ID_GENR ||
  231. ast->deint_id == DEINT_ID_SIPR) {
  232. if (st->codec->block_align <= 0 ||
  233. ast->audio_framesize * sub_packet_h > (unsigned)INT_MAX ||
  234. ast->audio_framesize * sub_packet_h < st->codec->block_align)
  235. return AVERROR_INVALIDDATA;
  236. if (av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h) < 0)
  237. return AVERROR(ENOMEM);
  238. }
  239. switch (ast->deint_id) {
  240. case DEINT_ID_INT4:
  241. if (ast->coded_framesize > ast->audio_framesize ||
  242. sub_packet_h <= 1 ||
  243. ast->coded_framesize * sub_packet_h > (2 + (sub_packet_h & 1)) * ast->audio_framesize)
  244. return AVERROR_INVALIDDATA;
  245. break;
  246. case DEINT_ID_GENR:
  247. if (ast->sub_packet_size <= 0 ||
  248. ast->sub_packet_size > ast->audio_framesize)
  249. return AVERROR_INVALIDDATA;
  250. break;
  251. case DEINT_ID_SIPR:
  252. case DEINT_ID_INT0:
  253. case DEINT_ID_VBRS:
  254. case DEINT_ID_VBRF:
  255. break;
  256. default:
  257. av_log(NULL,0,"Unknown interleaver %X\n", ast->deint_id);
  258. return AVERROR_INVALIDDATA;
  259. }
  260. if (read_all) {
  261. avio_r8(pb);
  262. avio_r8(pb);
  263. avio_r8(pb);
  264. rm_read_metadata(s, pb, 0);
  265. }
  266. }
  267. return 0;
  268. }
  269. int
  270. ff_rm_read_mdpr_codecdata (AVFormatContext *s, AVIOContext *pb,
  271. AVStream *st, RMStream *rst, int codec_data_size)
  272. {
  273. unsigned int v;
  274. int size;
  275. int64_t codec_pos;
  276. int ret;
  277. avpriv_set_pts_info(st, 64, 1, 1000);
  278. codec_pos = avio_tell(pb);
  279. v = avio_rb32(pb);
  280. if (v == MKTAG(0xfd, 'a', 'r', '.')) {
  281. /* ra type header */
  282. if (rm_read_audio_stream_info(s, pb, st, rst, 0))
  283. return -1;
  284. } else if (v == MKBETAG('L', 'S', 'D', ':')) {
  285. avio_seek(pb, -4, SEEK_CUR);
  286. if ((ret = rm_read_extradata(pb, st->codec, codec_data_size)) < 0)
  287. return ret;
  288. st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
  289. st->codec->codec_tag = AV_RL32(st->codec->extradata);
  290. st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags,
  291. st->codec->codec_tag);
  292. } else {
  293. int fps;
  294. if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) {
  295. fail1:
  296. av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n");
  297. goto skip;
  298. }
  299. st->codec->codec_tag = avio_rl32(pb);
  300. st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags,
  301. st->codec->codec_tag);
  302. av_dlog(s, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
  303. if (st->codec->codec_id == AV_CODEC_ID_NONE)
  304. goto fail1;
  305. st->codec->width = avio_rb16(pb);
  306. st->codec->height = avio_rb16(pb);
  307. avio_skip(pb, 2); // looks like bits per sample
  308. avio_skip(pb, 4); // always zero?
  309. st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
  310. st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
  311. fps = avio_rb32(pb);
  312. if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0)
  313. return ret;
  314. if (fps > 0) {
  315. av_reduce(&st->avg_frame_rate.den, &st->avg_frame_rate.num,
  316. 0x10000, fps, (1 << 30) - 1);
  317. } else if (s->error_recognition & AV_EF_EXPLODE) {
  318. av_log(s, AV_LOG_ERROR, "Invalid framerate\n");
  319. return AVERROR_INVALIDDATA;
  320. }
  321. }
  322. skip:
  323. /* skip codec info */
  324. size = avio_tell(pb) - codec_pos;
  325. avio_skip(pb, codec_data_size - size);
  326. return 0;
  327. }
  328. /** this function assumes that the demuxer has already seeked to the start
  329. * of the INDX chunk, and will bail out if not. */
  330. static int rm_read_index(AVFormatContext *s)
  331. {
  332. AVIOContext *pb = s->pb;
  333. unsigned int size, n_pkts, str_id, next_off, n, pos, pts;
  334. AVStream *st;
  335. do {
  336. if (avio_rl32(pb) != MKTAG('I','N','D','X'))
  337. return -1;
  338. size = avio_rb32(pb);
  339. if (size < 20)
  340. return -1;
  341. avio_skip(pb, 2);
  342. n_pkts = avio_rb32(pb);
  343. str_id = avio_rb16(pb);
  344. next_off = avio_rb32(pb);
  345. for (n = 0; n < s->nb_streams; n++)
  346. if (s->streams[n]->id == str_id) {
  347. st = s->streams[n];
  348. break;
  349. }
  350. if (n == s->nb_streams) {
  351. av_log(s, AV_LOG_ERROR,
  352. "Invalid stream index %d for index at pos %"PRId64"\n",
  353. str_id, avio_tell(pb));
  354. goto skip;
  355. } else if ((avio_size(pb) - avio_tell(pb)) / 14 < n_pkts) {
  356. av_log(s, AV_LOG_ERROR,
  357. "Nr. of packets in packet index for stream index %d "
  358. "exceeds filesize (%"PRId64" at %"PRId64" = %"PRId64")\n",
  359. str_id, avio_size(pb), avio_tell(pb),
  360. (avio_size(pb) - avio_tell(pb)) / 14);
  361. goto skip;
  362. }
  363. for (n = 0; n < n_pkts; n++) {
  364. avio_skip(pb, 2);
  365. pts = avio_rb32(pb);
  366. pos = avio_rb32(pb);
  367. avio_skip(pb, 4); /* packet no. */
  368. av_add_index_entry(st, pos, pts, 0, 0, AVINDEX_KEYFRAME);
  369. }
  370. skip:
  371. if (next_off && avio_tell(pb) < next_off &&
  372. avio_seek(pb, next_off, SEEK_SET) < 0) {
  373. av_log(s, AV_LOG_ERROR,
  374. "Non-linear index detected, not supported\n");
  375. return -1;
  376. }
  377. } while (next_off);
  378. return 0;
  379. }
  380. static int rm_read_header_old(AVFormatContext *s)
  381. {
  382. RMDemuxContext *rm = s->priv_data;
  383. AVStream *st;
  384. rm->old_format = 1;
  385. st = avformat_new_stream(s, NULL);
  386. if (!st)
  387. return -1;
  388. st->priv_data = ff_rm_alloc_rmstream();
  389. return rm_read_audio_stream_info(s, s->pb, st, st->priv_data, 1);
  390. }
  391. static int rm_read_header(AVFormatContext *s)
  392. {
  393. RMDemuxContext *rm = s->priv_data;
  394. AVStream *st;
  395. AVIOContext *pb = s->pb;
  396. unsigned int tag;
  397. int tag_size;
  398. unsigned int start_time, duration;
  399. unsigned int data_off = 0, indx_off = 0;
  400. char buf[128];
  401. int flags = 0;
  402. tag = avio_rl32(pb);
  403. if (tag == MKTAG('.', 'r', 'a', 0xfd)) {
  404. /* very old .ra format */
  405. return rm_read_header_old(s);
  406. } else if (tag != MKTAG('.', 'R', 'M', 'F')) {
  407. return AVERROR(EIO);
  408. }
  409. tag_size = avio_rb32(pb);
  410. avio_skip(pb, tag_size - 8);
  411. for(;;) {
  412. if (pb->eof_reached)
  413. return -1;
  414. tag = avio_rl32(pb);
  415. tag_size = avio_rb32(pb);
  416. avio_rb16(pb);
  417. av_dlog(s, "tag=%c%c%c%c (%08x) size=%d\n",
  418. (tag ) & 0xff,
  419. (tag >> 8) & 0xff,
  420. (tag >> 16) & 0xff,
  421. (tag >> 24) & 0xff,
  422. tag,
  423. tag_size);
  424. if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A'))
  425. return -1;
  426. switch(tag) {
  427. case MKTAG('P', 'R', 'O', 'P'):
  428. /* file header */
  429. avio_rb32(pb); /* max bit rate */
  430. avio_rb32(pb); /* avg bit rate */
  431. avio_rb32(pb); /* max packet size */
  432. avio_rb32(pb); /* avg packet size */
  433. avio_rb32(pb); /* nb packets */
  434. avio_rb32(pb); /* duration */
  435. avio_rb32(pb); /* preroll */
  436. indx_off = avio_rb32(pb); /* index offset */
  437. data_off = avio_rb32(pb); /* data offset */
  438. avio_rb16(pb); /* nb streams */
  439. flags = avio_rb16(pb); /* flags */
  440. break;
  441. case MKTAG('C', 'O', 'N', 'T'):
  442. rm_read_metadata(s, pb, 1);
  443. break;
  444. case MKTAG('M', 'D', 'P', 'R'):
  445. st = avformat_new_stream(s, NULL);
  446. if (!st)
  447. return AVERROR(ENOMEM);
  448. st->id = avio_rb16(pb);
  449. avio_rb32(pb); /* max bit rate */
  450. st->codec->bit_rate = avio_rb32(pb); /* bit rate */
  451. avio_rb32(pb); /* max packet size */
  452. avio_rb32(pb); /* avg packet size */
  453. start_time = avio_rb32(pb); /* start time */
  454. avio_rb32(pb); /* preroll */
  455. duration = avio_rb32(pb); /* duration */
  456. st->start_time = start_time;
  457. st->duration = duration;
  458. get_str8(pb, buf, sizeof(buf)); /* desc */
  459. get_str8(pb, buf, sizeof(buf)); /* mimetype */
  460. st->codec->codec_type = AVMEDIA_TYPE_DATA;
  461. st->priv_data = ff_rm_alloc_rmstream();
  462. if (ff_rm_read_mdpr_codecdata(s, s->pb, st, st->priv_data,
  463. avio_rb32(pb)) < 0)
  464. return -1;
  465. break;
  466. case MKTAG('D', 'A', 'T', 'A'):
  467. goto header_end;
  468. default:
  469. /* unknown tag: skip it */
  470. avio_skip(pb, tag_size - 10);
  471. break;
  472. }
  473. }
  474. header_end:
  475. rm->nb_packets = avio_rb32(pb); /* number of packets */
  476. if (!rm->nb_packets && (flags & 4))
  477. rm->nb_packets = 3600 * 25;
  478. avio_rb32(pb); /* next data header */
  479. if (!data_off)
  480. data_off = avio_tell(pb) - 18;
  481. if (indx_off && pb->seekable && !(s->flags & AVFMT_FLAG_IGNIDX) &&
  482. avio_seek(pb, indx_off, SEEK_SET) >= 0) {
  483. rm_read_index(s);
  484. avio_seek(pb, data_off + 18, SEEK_SET);
  485. }
  486. return 0;
  487. }
  488. static int get_num(AVIOContext *pb, int *len)
  489. {
  490. int n, n1;
  491. n = avio_rb16(pb);
  492. (*len)-=2;
  493. n &= 0x7FFF;
  494. if (n >= 0x4000) {
  495. return n - 0x4000;
  496. } else {
  497. n1 = avio_rb16(pb);
  498. (*len)-=2;
  499. return (n << 16) | n1;
  500. }
  501. }
  502. /* multiple of 20 bytes for ra144 (ugly) */
  503. #define RAW_PACKET_SIZE 1000
  504. static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_index, int64_t *pos){
  505. RMDemuxContext *rm = s->priv_data;
  506. AVIOContext *pb = s->pb;
  507. AVStream *st;
  508. uint32_t state=0xFFFFFFFF;
  509. while(!pb->eof_reached){
  510. int len, num, i;
  511. *pos= avio_tell(pb) - 3;
  512. if(rm->remaining_len > 0){
  513. num= rm->current_stream;
  514. len= rm->remaining_len;
  515. *timestamp = AV_NOPTS_VALUE;
  516. *flags= 0;
  517. }else{
  518. state= (state<<8) + avio_r8(pb);
  519. if(state == MKBETAG('I', 'N', 'D', 'X')){
  520. int n_pkts, expected_len;
  521. len = avio_rb32(pb);
  522. avio_skip(pb, 2);
  523. n_pkts = avio_rb32(pb);
  524. expected_len = 20 + n_pkts * 14;
  525. if (len == 20)
  526. /* some files don't add index entries to chunk size... */
  527. len = expected_len;
  528. else if (len != expected_len)
  529. av_log(s, AV_LOG_WARNING,
  530. "Index size %d (%d pkts) is wrong, should be %d.\n",
  531. len, n_pkts, expected_len);
  532. len -= 14; // we already read part of the index header
  533. if(len<0)
  534. continue;
  535. goto skip;
  536. } else if (state == MKBETAG('D','A','T','A')) {
  537. av_log(s, AV_LOG_WARNING,
  538. "DATA tag in middle of chunk, file may be broken.\n");
  539. }
  540. if(state > (unsigned)0xFFFF || state <= 12)
  541. continue;
  542. len=state - 12;
  543. state= 0xFFFFFFFF;
  544. num = avio_rb16(pb);
  545. *timestamp = avio_rb32(pb);
  546. avio_r8(pb); /* reserved */
  547. *flags = avio_r8(pb); /* flags */
  548. }
  549. for(i=0;i<s->nb_streams;i++) {
  550. st = s->streams[i];
  551. if (num == st->id)
  552. break;
  553. }
  554. if (i == s->nb_streams) {
  555. skip:
  556. /* skip packet if unknown number */
  557. avio_skip(pb, len);
  558. rm->remaining_len = 0;
  559. continue;
  560. }
  561. *stream_index= i;
  562. return len;
  563. }
  564. return -1;
  565. }
  566. static int rm_assemble_video_frame(AVFormatContext *s, AVIOContext *pb,
  567. RMDemuxContext *rm, RMStream *vst,
  568. AVPacket *pkt, int len, int *pseq,
  569. int64_t *timestamp)
  570. {
  571. int hdr, seq, pic_num, len2, pos;
  572. int type;
  573. hdr = avio_r8(pb); len--;
  574. type = hdr >> 6;
  575. if(type != 3){ // not frame as a part of packet
  576. seq = avio_r8(pb); len--;
  577. }
  578. if(type != 1){ // not whole frame
  579. len2 = get_num(pb, &len);
  580. pos = get_num(pb, &len);
  581. pic_num = avio_r8(pb); len--;
  582. }
  583. if(len<0)
  584. return -1;
  585. rm->remaining_len = len;
  586. if(type&1){ // frame, not slice
  587. if(type == 3){ // frame as a part of packet
  588. len= len2;
  589. *timestamp = pos;
  590. }
  591. if(rm->remaining_len < len)
  592. return -1;
  593. rm->remaining_len -= len;
  594. if(av_new_packet(pkt, len + 9) < 0)
  595. return AVERROR(EIO);
  596. pkt->data[0] = 0;
  597. AV_WL32(pkt->data + 1, 1);
  598. AV_WL32(pkt->data + 5, 0);
  599. avio_read(pb, pkt->data + 9, len);
  600. return 0;
  601. }
  602. //now we have to deal with single slice
  603. *pseq = seq;
  604. if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){
  605. vst->slices = ((hdr & 0x3F) << 1) + 1;
  606. vst->videobufsize = len2 + 8*vst->slices + 1;
  607. av_free_packet(&vst->pkt); //FIXME this should be output.
  608. if(av_new_packet(&vst->pkt, vst->videobufsize) < 0)
  609. return AVERROR(ENOMEM);
  610. vst->videobufpos = 8*vst->slices + 1;
  611. vst->cur_slice = 0;
  612. vst->curpic_num = pic_num;
  613. vst->pktpos = avio_tell(pb);
  614. }
  615. if(type == 2)
  616. len = FFMIN(len, pos);
  617. if(++vst->cur_slice > vst->slices)
  618. return 1;
  619. AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1);
  620. AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1);
  621. if(vst->videobufpos + len > vst->videobufsize)
  622. return 1;
  623. if (avio_read(pb, vst->pkt.data + vst->videobufpos, len) != len)
  624. return AVERROR(EIO);
  625. vst->videobufpos += len;
  626. rm->remaining_len-= len;
  627. if (type == 2 || vst->videobufpos == vst->videobufsize) {
  628. vst->pkt.data[0] = vst->cur_slice-1;
  629. *pkt= vst->pkt;
  630. vst->pkt.data= NULL;
  631. vst->pkt.size= 0;
  632. vst->pkt.buf = NULL;
  633. #if FF_API_DESTRUCT_PACKET
  634. FF_DISABLE_DEPRECATION_WARNINGS
  635. vst->pkt.destruct = NULL;
  636. FF_ENABLE_DEPRECATION_WARNINGS
  637. #endif
  638. if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin
  639. memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices,
  640. vst->videobufpos - 1 - 8*vst->slices);
  641. pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices);
  642. pkt->pts = AV_NOPTS_VALUE;
  643. pkt->pos = vst->pktpos;
  644. vst->slices = 0;
  645. return 0;
  646. }
  647. return 1;
  648. }
  649. static inline void
  650. rm_ac3_swap_bytes (AVStream *st, AVPacket *pkt)
  651. {
  652. uint8_t *ptr;
  653. int j;
  654. if (st->codec->codec_id == AV_CODEC_ID_AC3) {
  655. ptr = pkt->data;
  656. for (j=0;j<pkt->size;j+=2) {
  657. FFSWAP(int, ptr[0], ptr[1]);
  658. ptr += 2;
  659. }
  660. }
  661. }
  662. int
  663. ff_rm_parse_packet (AVFormatContext *s, AVIOContext *pb,
  664. AVStream *st, RMStream *ast, int len, AVPacket *pkt,
  665. int *seq, int flags, int64_t timestamp)
  666. {
  667. RMDemuxContext *rm = s->priv_data;
  668. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  669. rm->current_stream= st->id;
  670. if(rm_assemble_video_frame(s, pb, rm, ast, pkt, len, seq, &timestamp))
  671. return -1; //got partial frame
  672. } else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
  673. if ((ast->deint_id == DEINT_ID_GENR) ||
  674. (ast->deint_id == DEINT_ID_INT4) ||
  675. (ast->deint_id == DEINT_ID_SIPR)) {
  676. int x;
  677. int sps = ast->sub_packet_size;
  678. int cfs = ast->coded_framesize;
  679. int h = ast->sub_packet_h;
  680. int y = ast->sub_packet_cnt;
  681. int w = ast->audio_framesize;
  682. if (flags & 2)
  683. y = ast->sub_packet_cnt = 0;
  684. if (!y)
  685. ast->audiotimestamp = timestamp;
  686. switch (ast->deint_id) {
  687. case DEINT_ID_INT4:
  688. for (x = 0; x < h/2; x++)
  689. avio_read(pb, ast->pkt.data+x*2*w+y*cfs, cfs);
  690. break;
  691. case DEINT_ID_GENR:
  692. for (x = 0; x < w/sps; x++)
  693. avio_read(pb, ast->pkt.data+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps);
  694. break;
  695. case DEINT_ID_SIPR:
  696. avio_read(pb, ast->pkt.data + y * w, w);
  697. break;
  698. }
  699. if (++(ast->sub_packet_cnt) < h)
  700. return -1;
  701. if (ast->deint_id == DEINT_ID_SIPR)
  702. ff_rm_reorder_sipr_data(ast->pkt.data, h, w);
  703. ast->sub_packet_cnt = 0;
  704. rm->audio_stream_num = st->index;
  705. rm->audio_pkt_cnt = h * w / st->codec->block_align;
  706. } else if ((ast->deint_id == DEINT_ID_VBRF) ||
  707. (ast->deint_id == DEINT_ID_VBRS)) {
  708. int x;
  709. rm->audio_stream_num = st->index;
  710. ast->sub_packet_cnt = (avio_rb16(pb) & 0xf0) >> 4;
  711. if (ast->sub_packet_cnt) {
  712. for (x = 0; x < ast->sub_packet_cnt; x++)
  713. ast->sub_packet_lengths[x] = avio_rb16(pb);
  714. rm->audio_pkt_cnt = ast->sub_packet_cnt;
  715. ast->audiotimestamp = timestamp;
  716. } else
  717. return -1;
  718. } else {
  719. av_get_packet(pb, pkt, len);
  720. rm_ac3_swap_bytes(st, pkt);
  721. }
  722. } else
  723. av_get_packet(pb, pkt, len);
  724. pkt->stream_index = st->index;
  725. #if 0
  726. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  727. if(st->codec->codec_id == AV_CODEC_ID_RV20){
  728. int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1);
  729. av_log(s, AV_LOG_DEBUG, "%d %"PRId64" %d\n", *timestamp, *timestamp*512LL/25, seq);
  730. seq |= (timestamp&~0x3FFF);
  731. if(seq - timestamp > 0x2000) seq -= 0x4000;
  732. if(seq - timestamp < -0x2000) seq += 0x4000;
  733. }
  734. }
  735. #endif
  736. pkt->pts = timestamp;
  737. if (flags & 2)
  738. pkt->flags |= AV_PKT_FLAG_KEY;
  739. return st->codec->codec_type == AVMEDIA_TYPE_AUDIO ? rm->audio_pkt_cnt : 0;
  740. }
  741. int
  742. ff_rm_retrieve_cache (AVFormatContext *s, AVIOContext *pb,
  743. AVStream *st, RMStream *ast, AVPacket *pkt)
  744. {
  745. RMDemuxContext *rm = s->priv_data;
  746. assert (rm->audio_pkt_cnt > 0);
  747. if (ast->deint_id == DEINT_ID_VBRF ||
  748. ast->deint_id == DEINT_ID_VBRS)
  749. av_get_packet(pb, pkt, ast->sub_packet_lengths[ast->sub_packet_cnt - rm->audio_pkt_cnt]);
  750. else {
  751. av_new_packet(pkt, st->codec->block_align);
  752. memcpy(pkt->data, ast->pkt.data + st->codec->block_align * //FIXME avoid this
  753. (ast->sub_packet_h * ast->audio_framesize / st->codec->block_align - rm->audio_pkt_cnt),
  754. st->codec->block_align);
  755. }
  756. rm->audio_pkt_cnt--;
  757. if ((pkt->pts = ast->audiotimestamp) != AV_NOPTS_VALUE) {
  758. ast->audiotimestamp = AV_NOPTS_VALUE;
  759. pkt->flags = AV_PKT_FLAG_KEY;
  760. } else
  761. pkt->flags = 0;
  762. pkt->stream_index = st->index;
  763. return rm->audio_pkt_cnt;
  764. }
  765. static int rm_read_packet(AVFormatContext *s, AVPacket *pkt)
  766. {
  767. RMDemuxContext *rm = s->priv_data;
  768. AVStream *st;
  769. int i, len, res, seq = 1;
  770. int64_t timestamp, pos;
  771. int flags;
  772. for (;;) {
  773. if (rm->audio_pkt_cnt) {
  774. // If there are queued audio packet return them first
  775. st = s->streams[rm->audio_stream_num];
  776. ff_rm_retrieve_cache(s, s->pb, st, st->priv_data, pkt);
  777. flags = 0;
  778. } else {
  779. if (rm->old_format) {
  780. RMStream *ast;
  781. st = s->streams[0];
  782. ast = st->priv_data;
  783. timestamp = AV_NOPTS_VALUE;
  784. len = !ast->audio_framesize ? RAW_PACKET_SIZE :
  785. ast->coded_framesize * ast->sub_packet_h / 2;
  786. flags = (seq++ == 1) ? 2 : 0;
  787. pos = avio_tell(s->pb);
  788. } else {
  789. len=sync(s, &timestamp, &flags, &i, &pos);
  790. if (len > 0)
  791. st = s->streams[i];
  792. }
  793. if(len<0 || s->pb->eof_reached)
  794. return AVERROR(EIO);
  795. res = ff_rm_parse_packet (s, s->pb, st, st->priv_data, len, pkt,
  796. &seq, flags, timestamp);
  797. if((flags&2) && (seq&0x7F) == 1)
  798. av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME);
  799. if (res)
  800. continue;
  801. }
  802. if( (st->discard >= AVDISCARD_NONKEY && !(flags&2))
  803. || st->discard >= AVDISCARD_ALL){
  804. av_free_packet(pkt);
  805. } else
  806. break;
  807. }
  808. return 0;
  809. }
  810. static int rm_read_close(AVFormatContext *s)
  811. {
  812. int i;
  813. for (i=0;i<s->nb_streams;i++)
  814. ff_rm_free_rmstream(s->streams[i]->priv_data);
  815. return 0;
  816. }
  817. static int rm_probe(AVProbeData *p)
  818. {
  819. /* check file header */
  820. if ((p->buf[0] == '.' && p->buf[1] == 'R' &&
  821. p->buf[2] == 'M' && p->buf[3] == 'F' &&
  822. p->buf[4] == 0 && p->buf[5] == 0) ||
  823. (p->buf[0] == '.' && p->buf[1] == 'r' &&
  824. p->buf[2] == 'a' && p->buf[3] == 0xfd))
  825. return AVPROBE_SCORE_MAX;
  826. else
  827. return 0;
  828. }
  829. static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
  830. int64_t *ppos, int64_t pos_limit)
  831. {
  832. RMDemuxContext *rm = s->priv_data;
  833. int64_t pos, dts;
  834. int stream_index2, flags, len, h;
  835. pos = *ppos;
  836. if(rm->old_format)
  837. return AV_NOPTS_VALUE;
  838. avio_seek(s->pb, pos, SEEK_SET);
  839. rm->remaining_len=0;
  840. for(;;){
  841. int seq=1;
  842. AVStream *st;
  843. len=sync(s, &dts, &flags, &stream_index2, &pos);
  844. if(len<0)
  845. return AV_NOPTS_VALUE;
  846. st = s->streams[stream_index2];
  847. if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
  848. h= avio_r8(s->pb); len--;
  849. if(!(h & 0x40)){
  850. seq = avio_r8(s->pb); len--;
  851. }
  852. }
  853. if((flags&2) && (seq&0x7F) == 1){
  854. av_dlog(s, "%d %d-%d %"PRId64" %d\n",
  855. flags, stream_index2, stream_index, dts, seq);
  856. av_add_index_entry(st, pos, dts, 0, 0, AVINDEX_KEYFRAME);
  857. if(stream_index2 == stream_index)
  858. break;
  859. }
  860. avio_skip(s->pb, len);
  861. }
  862. *ppos = pos;
  863. return dts;
  864. }
  865. AVInputFormat ff_rm_demuxer = {
  866. .name = "rm",
  867. .long_name = NULL_IF_CONFIG_SMALL("RealMedia"),
  868. .priv_data_size = sizeof(RMDemuxContext),
  869. .read_probe = rm_probe,
  870. .read_header = rm_read_header,
  871. .read_packet = rm_read_packet,
  872. .read_close = rm_read_close,
  873. .read_timestamp = rm_read_dts,
  874. };
  875. AVInputFormat ff_rdt_demuxer = {
  876. .name = "rdt",
  877. .long_name = NULL_IF_CONFIG_SMALL("RDT demuxer"),
  878. .priv_data_size = sizeof(RMDemuxContext),
  879. .read_close = rm_read_close,
  880. .flags = AVFMT_NOFILE,
  881. };