You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

890 lines
29KB

  1. /*
  2. * "Real" compatible demuxer.
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/intreadwrite.h"
  23. #include "avformat.h"
  24. #include "rm.h"
  25. struct RMStream {
  26. AVPacket pkt; ///< place to store merged video frame / reordered audio data
  27. int videobufsize; ///< current assembled frame size
  28. int videobufpos; ///< position for the next slice in the video buffer
  29. int curpic_num; ///< picture number of current frame
  30. int cur_slice, slices;
  31. int64_t pktpos; ///< first slice position in file
  32. /// Audio descrambling matrix parameters
  33. int64_t audiotimestamp; ///< Audio packet timestamp
  34. int sub_packet_cnt; // Subpacket counter, used while reading
  35. int sub_packet_size, sub_packet_h, coded_framesize; ///< Descrambling parameters from container
  36. int audio_framesize; /// Audio frame size from container
  37. int sub_packet_lengths[16]; /// Length of each subpacket
  38. };
  39. typedef struct {
  40. int nb_packets;
  41. int old_format;
  42. int current_stream;
  43. int remaining_len;
  44. int audio_stream_num; ///< Stream number for audio packets
  45. int audio_pkt_cnt; ///< Output packet counter
  46. } RMDemuxContext;
  47. static inline void get_strl(ByteIOContext *pb, char *buf, int buf_size, int len)
  48. {
  49. int i;
  50. char *q, r;
  51. q = buf;
  52. for(i=0;i<len;i++) {
  53. r = get_byte(pb);
  54. if (i < buf_size - 1)
  55. *q++ = r;
  56. }
  57. if (buf_size > 0) *q = '\0';
  58. }
  59. static void get_str8(ByteIOContext *pb, char *buf, int buf_size)
  60. {
  61. get_strl(pb, buf, buf_size, get_byte(pb));
  62. }
  63. static void rm_read_metadata(AVFormatContext *s, int wide)
  64. {
  65. char buf[1024];
  66. int i;
  67. for (i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
  68. int len = wide ? get_be16(s->pb) : get_byte(s->pb);
  69. get_strl(s->pb, buf, sizeof(buf), len);
  70. av_metadata_set(&s->metadata, ff_rm_metadata[i], buf);
  71. }
  72. }
  73. RMStream *ff_rm_alloc_rmstream (void)
  74. {
  75. RMStream *rms = av_mallocz(sizeof(RMStream));
  76. rms->curpic_num = -1;
  77. return rms;
  78. }
  79. void ff_rm_free_rmstream (RMStream *rms)
  80. {
  81. av_free_packet(&rms->pkt);
  82. }
  83. static int rm_read_audio_stream_info(AVFormatContext *s, ByteIOContext *pb,
  84. AVStream *st, RMStream *ast, int read_all)
  85. {
  86. char buf[256];
  87. uint32_t version;
  88. /* ra type header */
  89. version = get_be32(pb); /* version */
  90. if (((version >> 16) & 0xff) == 3) {
  91. int64_t startpos = url_ftell(pb);
  92. url_fskip(pb, 14);
  93. rm_read_metadata(s, 0);
  94. if ((startpos + (version & 0xffff)) >= url_ftell(pb) + 2) {
  95. // fourcc (should always be "lpcJ")
  96. get_byte(pb);
  97. get_str8(pb, buf, sizeof(buf));
  98. }
  99. // Skip extra header crap (this should never happen)
  100. if ((startpos + (version & 0xffff)) > url_ftell(pb))
  101. url_fskip(pb, (version & 0xffff) + startpos - url_ftell(pb));
  102. st->codec->sample_rate = 8000;
  103. st->codec->channels = 1;
  104. st->codec->codec_type = CODEC_TYPE_AUDIO;
  105. st->codec->codec_id = CODEC_ID_RA_144;
  106. } else {
  107. int flavor, sub_packet_h, coded_framesize, sub_packet_size;
  108. /* old version (4) */
  109. get_be32(pb); /* .ra4 */
  110. get_be32(pb); /* data size */
  111. get_be16(pb); /* version2 */
  112. get_be32(pb); /* header size */
  113. flavor= get_be16(pb); /* add codec info / flavor */
  114. ast->coded_framesize = coded_framesize = get_be32(pb); /* coded frame size */
  115. get_be32(pb); /* ??? */
  116. get_be32(pb); /* ??? */
  117. get_be32(pb); /* ??? */
  118. ast->sub_packet_h = sub_packet_h = get_be16(pb); /* 1 */
  119. st->codec->block_align= get_be16(pb); /* frame size */
  120. ast->sub_packet_size = sub_packet_size = get_be16(pb); /* sub packet size */
  121. get_be16(pb); /* ??? */
  122. if (((version >> 16) & 0xff) == 5) {
  123. get_be16(pb); get_be16(pb); get_be16(pb);
  124. }
  125. st->codec->sample_rate = get_be16(pb);
  126. get_be32(pb);
  127. st->codec->channels = get_be16(pb);
  128. if (((version >> 16) & 0xff) == 5) {
  129. get_be32(pb);
  130. get_buffer(pb, buf, 4);
  131. buf[4] = 0;
  132. } else {
  133. get_str8(pb, buf, sizeof(buf)); /* desc */
  134. get_str8(pb, buf, sizeof(buf)); /* desc */
  135. }
  136. st->codec->codec_type = CODEC_TYPE_AUDIO;
  137. if (!strcmp(buf, "dnet")) {
  138. st->codec->codec_id = CODEC_ID_AC3;
  139. st->need_parsing = AVSTREAM_PARSE_FULL;
  140. } else if (!strcmp(buf, "28_8")) {
  141. st->codec->codec_id = CODEC_ID_RA_288;
  142. st->codec->extradata_size= 0;
  143. ast->audio_framesize = st->codec->block_align;
  144. st->codec->block_align = coded_framesize;
  145. if(ast->audio_framesize >= UINT_MAX / sub_packet_h){
  146. av_log(s, AV_LOG_ERROR, "ast->audio_framesize * sub_packet_h too large\n");
  147. return -1;
  148. }
  149. av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h);
  150. } else if ((!strcmp(buf, "cook")) || (!strcmp(buf, "atrc")) || (!strcmp(buf, "sipr"))) {
  151. int codecdata_length;
  152. get_be16(pb); get_byte(pb);
  153. if (((version >> 16) & 0xff) == 5)
  154. get_byte(pb);
  155. codecdata_length = get_be32(pb);
  156. if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
  157. av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
  158. return -1;
  159. }
  160. if(sub_packet_size <= 0){
  161. av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n");
  162. return -1;
  163. }
  164. if (!strcmp(buf, "cook")) st->codec->codec_id = CODEC_ID_COOK;
  165. else if (!strcmp(buf, "sipr")) st->codec->codec_id = CODEC_ID_SIPR;
  166. else st->codec->codec_id = CODEC_ID_ATRAC3;
  167. st->codec->extradata_size= codecdata_length;
  168. st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  169. get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
  170. ast->audio_framesize = st->codec->block_align;
  171. st->codec->block_align = ast->sub_packet_size;
  172. if(ast->audio_framesize >= UINT_MAX / sub_packet_h){
  173. av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n");
  174. return -1;
  175. }
  176. av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h);
  177. } else if (!strcmp(buf, "raac") || !strcmp(buf, "racp")) {
  178. int codecdata_length;
  179. get_be16(pb); get_byte(pb);
  180. if (((version >> 16) & 0xff) == 5)
  181. get_byte(pb);
  182. st->codec->codec_id = CODEC_ID_AAC;
  183. codecdata_length = get_be32(pb);
  184. if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
  185. av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
  186. return -1;
  187. }
  188. if (codecdata_length >= 1) {
  189. st->codec->extradata_size = codecdata_length - 1;
  190. st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  191. get_byte(pb);
  192. get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
  193. }
  194. } else {
  195. st->codec->codec_id = CODEC_ID_NONE;
  196. av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name));
  197. }
  198. if (read_all) {
  199. get_byte(pb);
  200. get_byte(pb);
  201. get_byte(pb);
  202. rm_read_metadata(s, 0);
  203. }
  204. }
  205. return 0;
  206. }
  207. int
  208. ff_rm_read_mdpr_codecdata (AVFormatContext *s, ByteIOContext *pb,
  209. AVStream *st, RMStream *rst, int codec_data_size)
  210. {
  211. unsigned int v;
  212. int size;
  213. int64_t codec_pos;
  214. av_set_pts_info(st, 64, 1, 1000);
  215. codec_pos = url_ftell(pb);
  216. v = get_be32(pb);
  217. if (v == MKTAG(0xfd, 'a', 'r', '.')) {
  218. /* ra type header */
  219. if (rm_read_audio_stream_info(s, pb, st, rst, 0))
  220. return -1;
  221. } else {
  222. int fps, fps2;
  223. if (get_le32(pb) != MKTAG('V', 'I', 'D', 'O')) {
  224. fail1:
  225. av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n");
  226. goto skip;
  227. }
  228. st->codec->codec_tag = get_le32(pb);
  229. // av_log(s, AV_LOG_DEBUG, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
  230. if ( st->codec->codec_tag != MKTAG('R', 'V', '1', '0')
  231. && st->codec->codec_tag != MKTAG('R', 'V', '2', '0')
  232. && st->codec->codec_tag != MKTAG('R', 'V', '3', '0')
  233. && st->codec->codec_tag != MKTAG('R', 'V', '4', '0')
  234. && st->codec->codec_tag != MKTAG('R', 'V', 'T', 'R'))
  235. goto fail1;
  236. st->codec->width = get_be16(pb);
  237. st->codec->height = get_be16(pb);
  238. st->codec->time_base.num= 1;
  239. fps= get_be16(pb);
  240. st->codec->codec_type = CODEC_TYPE_VIDEO;
  241. get_be32(pb);
  242. fps2= get_be16(pb);
  243. get_be16(pb);
  244. st->codec->extradata_size= codec_data_size - (url_ftell(pb) - codec_pos);
  245. if(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
  246. //check is redundant as get_buffer() will catch this
  247. av_log(s, AV_LOG_ERROR, "st->codec->extradata_size too large\n");
  248. return -1;
  249. }
  250. st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  251. if (!st->codec->extradata)
  252. return AVERROR(ENOMEM);
  253. get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
  254. // av_log(s, AV_LOG_DEBUG, "fps= %d fps2= %d\n", fps, fps2);
  255. st->codec->time_base.den = fps * st->codec->time_base.num;
  256. switch(((uint8_t*)st->codec->extradata)[4]>>4){
  257. case 1: st->codec->codec_id = CODEC_ID_RV10; break;
  258. case 2: st->codec->codec_id = CODEC_ID_RV20; break;
  259. case 3: st->codec->codec_id = CODEC_ID_RV30; break;
  260. case 4: st->codec->codec_id = CODEC_ID_RV40; break;
  261. default: goto fail1;
  262. }
  263. }
  264. skip:
  265. /* skip codec info */
  266. size = url_ftell(pb) - codec_pos;
  267. url_fskip(pb, codec_data_size - size);
  268. return 0;
  269. }
  270. /** this function assumes that the demuxer has already seeked to the start
  271. * of the INDX chunk, and will bail out if not. */
  272. static int rm_read_index(AVFormatContext *s)
  273. {
  274. ByteIOContext *pb = s->pb;
  275. unsigned int size, n_pkts, str_id, next_off, n, pos, pts;
  276. AVStream *st;
  277. do {
  278. if (get_le32(pb) != MKTAG('I','N','D','X'))
  279. return -1;
  280. size = get_be32(pb);
  281. if (size < 20)
  282. return -1;
  283. url_fskip(pb, 2);
  284. n_pkts = get_be32(pb);
  285. str_id = get_be16(pb);
  286. next_off = get_be32(pb);
  287. for (n = 0; n < s->nb_streams; n++)
  288. if (s->streams[n]->id == str_id) {
  289. st = s->streams[n];
  290. break;
  291. }
  292. if (n == s->nb_streams)
  293. goto skip;
  294. for (n = 0; n < n_pkts; n++) {
  295. url_fskip(pb, 2);
  296. pts = get_be32(pb);
  297. pos = get_be32(pb);
  298. url_fskip(pb, 4); /* packet no. */
  299. av_add_index_entry(st, pos, pts, 0, 0, AVINDEX_KEYFRAME);
  300. }
  301. skip:
  302. if (next_off && url_ftell(pb) != next_off &&
  303. url_fseek(pb, next_off, SEEK_SET) < 0)
  304. return -1;
  305. } while (next_off);
  306. return 0;
  307. }
  308. static int rm_read_header_old(AVFormatContext *s, AVFormatParameters *ap)
  309. {
  310. RMDemuxContext *rm = s->priv_data;
  311. AVStream *st;
  312. rm->old_format = 1;
  313. st = av_new_stream(s, 0);
  314. if (!st)
  315. return -1;
  316. st->priv_data = ff_rm_alloc_rmstream();
  317. return rm_read_audio_stream_info(s, s->pb, st, st->priv_data, 1);
  318. }
  319. static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
  320. {
  321. RMDemuxContext *rm = s->priv_data;
  322. AVStream *st;
  323. ByteIOContext *pb = s->pb;
  324. unsigned int tag;
  325. int tag_size;
  326. unsigned int start_time, duration;
  327. unsigned int data_off = 0, indx_off = 0;
  328. char buf[128];
  329. int flags = 0;
  330. tag = get_le32(pb);
  331. if (tag == MKTAG('.', 'r', 'a', 0xfd)) {
  332. /* very old .ra format */
  333. return rm_read_header_old(s, ap);
  334. } else if (tag != MKTAG('.', 'R', 'M', 'F')) {
  335. return AVERROR(EIO);
  336. }
  337. get_be32(pb); /* header size */
  338. get_be16(pb);
  339. get_be32(pb);
  340. get_be32(pb); /* number of headers */
  341. for(;;) {
  342. if (url_feof(pb))
  343. return -1;
  344. tag = get_le32(pb);
  345. tag_size = get_be32(pb);
  346. get_be16(pb);
  347. #if 0
  348. printf("tag=%c%c%c%c (%08x) size=%d\n",
  349. (tag) & 0xff,
  350. (tag >> 8) & 0xff,
  351. (tag >> 16) & 0xff,
  352. (tag >> 24) & 0xff,
  353. tag,
  354. tag_size);
  355. #endif
  356. if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A'))
  357. return -1;
  358. switch(tag) {
  359. case MKTAG('P', 'R', 'O', 'P'):
  360. /* file header */
  361. get_be32(pb); /* max bit rate */
  362. get_be32(pb); /* avg bit rate */
  363. get_be32(pb); /* max packet size */
  364. get_be32(pb); /* avg packet size */
  365. get_be32(pb); /* nb packets */
  366. get_be32(pb); /* duration */
  367. get_be32(pb); /* preroll */
  368. indx_off = get_be32(pb); /* index offset */
  369. data_off = get_be32(pb); /* data offset */
  370. get_be16(pb); /* nb streams */
  371. flags = get_be16(pb); /* flags */
  372. break;
  373. case MKTAG('C', 'O', 'N', 'T'):
  374. rm_read_metadata(s, 1);
  375. break;
  376. case MKTAG('M', 'D', 'P', 'R'):
  377. st = av_new_stream(s, 0);
  378. if (!st)
  379. return AVERROR(ENOMEM);
  380. st->id = get_be16(pb);
  381. get_be32(pb); /* max bit rate */
  382. st->codec->bit_rate = get_be32(pb); /* bit rate */
  383. get_be32(pb); /* max packet size */
  384. get_be32(pb); /* avg packet size */
  385. start_time = get_be32(pb); /* start time */
  386. get_be32(pb); /* preroll */
  387. duration = get_be32(pb); /* duration */
  388. st->start_time = start_time;
  389. st->duration = duration;
  390. get_str8(pb, buf, sizeof(buf)); /* desc */
  391. get_str8(pb, buf, sizeof(buf)); /* mimetype */
  392. st->codec->codec_type = CODEC_TYPE_DATA;
  393. st->priv_data = ff_rm_alloc_rmstream();
  394. if (ff_rm_read_mdpr_codecdata(s, s->pb, st, st->priv_data,
  395. get_be32(pb)) < 0)
  396. return -1;
  397. break;
  398. case MKTAG('D', 'A', 'T', 'A'):
  399. goto header_end;
  400. default:
  401. /* unknown tag: skip it */
  402. url_fskip(pb, tag_size - 10);
  403. break;
  404. }
  405. }
  406. header_end:
  407. rm->nb_packets = get_be32(pb); /* number of packets */
  408. if (!rm->nb_packets && (flags & 4))
  409. rm->nb_packets = 3600 * 25;
  410. get_be32(pb); /* next data header */
  411. if (!data_off)
  412. data_off = url_ftell(pb) - 18;
  413. if (indx_off && url_fseek(pb, indx_off, SEEK_SET) >= 0) {
  414. rm_read_index(s);
  415. url_fseek(pb, data_off + 18, SEEK_SET);
  416. }
  417. return 0;
  418. }
  419. static int get_num(ByteIOContext *pb, int *len)
  420. {
  421. int n, n1;
  422. n = get_be16(pb);
  423. (*len)-=2;
  424. n &= 0x7FFF;
  425. if (n >= 0x4000) {
  426. return n - 0x4000;
  427. } else {
  428. n1 = get_be16(pb);
  429. (*len)-=2;
  430. return (n << 16) | n1;
  431. }
  432. }
  433. /* multiple of 20 bytes for ra144 (ugly) */
  434. #define RAW_PACKET_SIZE 1000
  435. static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_index, int64_t *pos){
  436. RMDemuxContext *rm = s->priv_data;
  437. ByteIOContext *pb = s->pb;
  438. int len, num, res, i;
  439. AVStream *st;
  440. uint32_t state=0xFFFFFFFF;
  441. while(!url_feof(pb)){
  442. *pos= url_ftell(pb) - 3;
  443. if(rm->remaining_len > 0){
  444. num= rm->current_stream;
  445. len= rm->remaining_len;
  446. *timestamp = AV_NOPTS_VALUE;
  447. *flags= 0;
  448. }else{
  449. state= (state<<8) + get_byte(pb);
  450. if(state == MKBETAG('I', 'N', 'D', 'X')){
  451. int n_pkts, expected_len;
  452. len = get_be32(pb);
  453. url_fskip(pb, 2);
  454. n_pkts = get_be32(pb);
  455. expected_len = 20 + n_pkts * 14;
  456. if (len == 20)
  457. /* some files don't add index entries to chunk size... */
  458. len = expected_len;
  459. else if (len != expected_len)
  460. av_log(s, AV_LOG_WARNING,
  461. "Index size %d (%d pkts) is wrong, should be %d.\n",
  462. len, n_pkts, expected_len);
  463. len -= 14; // we already read part of the index header
  464. if(len<0)
  465. continue;
  466. goto skip;
  467. }
  468. if(state > (unsigned)0xFFFF || state < 12)
  469. continue;
  470. len=state;
  471. state= 0xFFFFFFFF;
  472. num = get_be16(pb);
  473. *timestamp = get_be32(pb);
  474. res= get_byte(pb); /* reserved */
  475. *flags = get_byte(pb); /* flags */
  476. len -= 12;
  477. }
  478. for(i=0;i<s->nb_streams;i++) {
  479. st = s->streams[i];
  480. if (num == st->id)
  481. break;
  482. }
  483. if (i == s->nb_streams) {
  484. skip:
  485. /* skip packet if unknown number */
  486. url_fskip(pb, len);
  487. rm->remaining_len = 0;
  488. continue;
  489. }
  490. *stream_index= i;
  491. return len;
  492. }
  493. return -1;
  494. }
  495. static int rm_assemble_video_frame(AVFormatContext *s, ByteIOContext *pb,
  496. RMDemuxContext *rm, RMStream *vst,
  497. AVPacket *pkt, int len, int *pseq)
  498. {
  499. int hdr, seq, pic_num, len2, pos;
  500. int type;
  501. hdr = get_byte(pb); len--;
  502. type = hdr >> 6;
  503. if(type != 3){ // not frame as a part of packet
  504. seq = get_byte(pb); len--;
  505. }
  506. if(type != 1){ // not whole frame
  507. len2 = get_num(pb, &len);
  508. pos = get_num(pb, &len);
  509. pic_num = get_byte(pb); len--;
  510. }
  511. if(len<0)
  512. return -1;
  513. rm->remaining_len = len;
  514. if(type&1){ // frame, not slice
  515. if(type == 3) // frame as a part of packet
  516. len= len2;
  517. if(rm->remaining_len < len)
  518. return -1;
  519. rm->remaining_len -= len;
  520. if(av_new_packet(pkt, len + 9) < 0)
  521. return AVERROR(EIO);
  522. pkt->data[0] = 0;
  523. AV_WL32(pkt->data + 1, 1);
  524. AV_WL32(pkt->data + 5, 0);
  525. get_buffer(pb, pkt->data + 9, len);
  526. return 0;
  527. }
  528. //now we have to deal with single slice
  529. *pseq = seq;
  530. if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){
  531. vst->slices = ((hdr & 0x3F) << 1) + 1;
  532. vst->videobufsize = len2 + 8*vst->slices + 1;
  533. av_free_packet(&vst->pkt); //FIXME this should be output.
  534. if(av_new_packet(&vst->pkt, vst->videobufsize) < 0)
  535. return AVERROR(ENOMEM);
  536. vst->videobufpos = 8*vst->slices + 1;
  537. vst->cur_slice = 0;
  538. vst->curpic_num = pic_num;
  539. vst->pktpos = url_ftell(pb);
  540. }
  541. if(type == 2)
  542. len = FFMIN(len, pos);
  543. if(++vst->cur_slice > vst->slices)
  544. return 1;
  545. AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1);
  546. AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1);
  547. if(vst->videobufpos + len > vst->videobufsize)
  548. return 1;
  549. if (get_buffer(pb, vst->pkt.data + vst->videobufpos, len) != len)
  550. return AVERROR(EIO);
  551. vst->videobufpos += len;
  552. rm->remaining_len-= len;
  553. if(type == 2 || (vst->videobufpos) == vst->videobufsize){
  554. vst->pkt.data[0] = vst->cur_slice-1;
  555. *pkt= vst->pkt;
  556. vst->pkt.data= NULL;
  557. vst->pkt.size= 0;
  558. if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin
  559. memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices,
  560. vst->videobufpos - 1 - 8*vst->slices);
  561. pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices);
  562. pkt->pts = AV_NOPTS_VALUE;
  563. pkt->pos = vst->pktpos;
  564. return 0;
  565. }
  566. return 1;
  567. }
  568. static inline void
  569. rm_ac3_swap_bytes (AVStream *st, AVPacket *pkt)
  570. {
  571. uint8_t *ptr;
  572. int j;
  573. if (st->codec->codec_id == CODEC_ID_AC3) {
  574. ptr = pkt->data;
  575. for (j=0;j<pkt->size;j+=2) {
  576. FFSWAP(int, ptr[0], ptr[1]);
  577. ptr += 2;
  578. }
  579. }
  580. }
  581. int
  582. ff_rm_parse_packet (AVFormatContext *s, ByteIOContext *pb,
  583. AVStream *st, RMStream *ast, int len, AVPacket *pkt,
  584. int *seq, int *flags, int64_t *timestamp)
  585. {
  586. RMDemuxContext *rm = s->priv_data;
  587. if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
  588. rm->current_stream= st->id;
  589. if(rm_assemble_video_frame(s, pb, rm, ast, pkt, len, seq))
  590. return -1; //got partial frame
  591. } else if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
  592. if ((st->codec->codec_id == CODEC_ID_RA_288) ||
  593. (st->codec->codec_id == CODEC_ID_COOK) ||
  594. (st->codec->codec_id == CODEC_ID_ATRAC3) ||
  595. (st->codec->codec_id == CODEC_ID_SIPR)) {
  596. int x;
  597. int sps = ast->sub_packet_size;
  598. int cfs = ast->coded_framesize;
  599. int h = ast->sub_packet_h;
  600. int y = ast->sub_packet_cnt;
  601. int w = ast->audio_framesize;
  602. if (*flags & 2)
  603. y = ast->sub_packet_cnt = 0;
  604. if (!y)
  605. ast->audiotimestamp = *timestamp;
  606. switch(st->codec->codec_id) {
  607. case CODEC_ID_RA_288:
  608. for (x = 0; x < h/2; x++)
  609. get_buffer(pb, ast->pkt.data+x*2*w+y*cfs, cfs);
  610. break;
  611. case CODEC_ID_ATRAC3:
  612. case CODEC_ID_COOK:
  613. for (x = 0; x < w/sps; x++)
  614. get_buffer(pb, ast->pkt.data+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps);
  615. break;
  616. }
  617. if (++(ast->sub_packet_cnt) < h)
  618. return -1;
  619. ast->sub_packet_cnt = 0;
  620. rm->audio_stream_num = st->index;
  621. rm->audio_pkt_cnt = h * w / st->codec->block_align;
  622. } else if (st->codec->codec_id == CODEC_ID_AAC) {
  623. int x;
  624. rm->audio_stream_num = st->index;
  625. ast->sub_packet_cnt = (get_be16(pb) & 0xf0) >> 4;
  626. if (ast->sub_packet_cnt) {
  627. for (x = 0; x < ast->sub_packet_cnt; x++)
  628. ast->sub_packet_lengths[x] = get_be16(pb);
  629. rm->audio_pkt_cnt = ast->sub_packet_cnt;
  630. ast->audiotimestamp = *timestamp;
  631. } else
  632. return -1;
  633. } else {
  634. av_get_packet(pb, pkt, len);
  635. rm_ac3_swap_bytes(st, pkt);
  636. }
  637. } else
  638. av_get_packet(pb, pkt, len);
  639. pkt->stream_index = st->index;
  640. #if 0
  641. if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
  642. if(st->codec->codec_id == CODEC_ID_RV20){
  643. int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1);
  644. av_log(s, AV_LOG_DEBUG, "%d %"PRId64" %d\n", *timestamp, *timestamp*512LL/25, seq);
  645. seq |= (*timestamp&~0x3FFF);
  646. if(seq - *timestamp > 0x2000) seq -= 0x4000;
  647. if(seq - *timestamp < -0x2000) seq += 0x4000;
  648. }
  649. }
  650. #endif
  651. pkt->pts= *timestamp;
  652. if (*flags & 2)
  653. pkt->flags |= PKT_FLAG_KEY;
  654. return st->codec->codec_type == CODEC_TYPE_AUDIO ? rm->audio_pkt_cnt : 0;
  655. }
  656. int
  657. ff_rm_retrieve_cache (AVFormatContext *s, ByteIOContext *pb,
  658. AVStream *st, RMStream *ast, AVPacket *pkt)
  659. {
  660. RMDemuxContext *rm = s->priv_data;
  661. assert (rm->audio_pkt_cnt > 0);
  662. if (st->codec->codec_id == CODEC_ID_AAC)
  663. av_get_packet(pb, pkt, ast->sub_packet_lengths[ast->sub_packet_cnt - rm->audio_pkt_cnt]);
  664. else {
  665. av_new_packet(pkt, st->codec->block_align);
  666. memcpy(pkt->data, ast->pkt.data + st->codec->block_align * //FIXME avoid this
  667. (ast->sub_packet_h * ast->audio_framesize / st->codec->block_align - rm->audio_pkt_cnt),
  668. st->codec->block_align);
  669. }
  670. rm->audio_pkt_cnt--;
  671. if ((pkt->pts = ast->audiotimestamp) != AV_NOPTS_VALUE) {
  672. ast->audiotimestamp = AV_NOPTS_VALUE;
  673. pkt->flags = PKT_FLAG_KEY;
  674. } else
  675. pkt->flags = 0;
  676. pkt->stream_index = st->index;
  677. return rm->audio_pkt_cnt;
  678. }
  679. static int rm_read_packet(AVFormatContext *s, AVPacket *pkt)
  680. {
  681. RMDemuxContext *rm = s->priv_data;
  682. AVStream *st;
  683. int i, len, res, seq = 1;
  684. int64_t timestamp, pos;
  685. int old_flags, flags;
  686. for (;;) {
  687. if (rm->audio_pkt_cnt) {
  688. // If there are queued audio packet return them first
  689. st = s->streams[rm->audio_stream_num];
  690. ff_rm_retrieve_cache(s, s->pb, st, st->priv_data, pkt);
  691. } else {
  692. if (rm->old_format) {
  693. RMStream *ast;
  694. st = s->streams[0];
  695. ast = st->priv_data;
  696. timestamp = AV_NOPTS_VALUE;
  697. len = !ast->audio_framesize ? RAW_PACKET_SIZE :
  698. ast->coded_framesize * ast->sub_packet_h / 2;
  699. flags = (seq++ == 1) ? 2 : 0;
  700. } else {
  701. len=sync(s, &timestamp, &flags, &i, &pos);
  702. if (len > 0)
  703. st = s->streams[i];
  704. }
  705. if(len<0 || url_feof(s->pb))
  706. return AVERROR(EIO);
  707. old_flags = flags;
  708. res = ff_rm_parse_packet (s, s->pb, st, st->priv_data, len, pkt,
  709. &seq, &flags, &timestamp);
  710. if((old_flags&2) && (seq&0x7F) == 1)
  711. av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME);
  712. if (res)
  713. continue;
  714. }
  715. if( (st->discard >= AVDISCARD_NONKEY && !(flags&2))
  716. || st->discard >= AVDISCARD_ALL){
  717. av_free_packet(pkt);
  718. } else
  719. break;
  720. }
  721. return 0;
  722. }
  723. static int rm_read_close(AVFormatContext *s)
  724. {
  725. int i;
  726. for (i=0;i<s->nb_streams;i++)
  727. ff_rm_free_rmstream(s->streams[i]->priv_data);
  728. return 0;
  729. }
  730. static int rm_probe(AVProbeData *p)
  731. {
  732. /* check file header */
  733. if ((p->buf[0] == '.' && p->buf[1] == 'R' &&
  734. p->buf[2] == 'M' && p->buf[3] == 'F' &&
  735. p->buf[4] == 0 && p->buf[5] == 0) ||
  736. (p->buf[0] == '.' && p->buf[1] == 'r' &&
  737. p->buf[2] == 'a' && p->buf[3] == 0xfd))
  738. return AVPROBE_SCORE_MAX;
  739. else
  740. return 0;
  741. }
  742. static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
  743. int64_t *ppos, int64_t pos_limit)
  744. {
  745. RMDemuxContext *rm = s->priv_data;
  746. int64_t pos, dts;
  747. int stream_index2, flags, len, h;
  748. pos = *ppos;
  749. if(rm->old_format)
  750. return AV_NOPTS_VALUE;
  751. url_fseek(s->pb, pos, SEEK_SET);
  752. rm->remaining_len=0;
  753. for(;;){
  754. int seq=1;
  755. AVStream *st;
  756. len=sync(s, &dts, &flags, &stream_index2, &pos);
  757. if(len<0)
  758. return AV_NOPTS_VALUE;
  759. st = s->streams[stream_index2];
  760. if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
  761. h= get_byte(s->pb); len--;
  762. if(!(h & 0x40)){
  763. seq = get_byte(s->pb); len--;
  764. }
  765. }
  766. if((flags&2) && (seq&0x7F) == 1){
  767. // av_log(s, AV_LOG_DEBUG, "%d %d-%d %"PRId64" %d\n", flags, stream_index2, stream_index, dts, seq);
  768. av_add_index_entry(st, pos, dts, 0, 0, AVINDEX_KEYFRAME);
  769. if(stream_index2 == stream_index)
  770. break;
  771. }
  772. url_fskip(s->pb, len);
  773. }
  774. *ppos = pos;
  775. return dts;
  776. }
  777. AVInputFormat rm_demuxer = {
  778. "rm",
  779. NULL_IF_CONFIG_SMALL("RealMedia format"),
  780. sizeof(RMDemuxContext),
  781. rm_probe,
  782. rm_read_header,
  783. rm_read_packet,
  784. rm_read_close,
  785. NULL,
  786. rm_read_dts,
  787. };
  788. AVInputFormat rdt_demuxer = {
  789. "rdt",
  790. NULL_IF_CONFIG_SMALL("RDT demuxer"),
  791. sizeof(RMDemuxContext),
  792. NULL,
  793. NULL,
  794. NULL,
  795. rm_read_close,
  796. };