You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

846 lines
27KB

  1. /*
  2. * "Real" compatible demuxer.
  3. * Copyright (c) 2000, 2001 Fabrice Bellard
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "libavutil/avstring.h"
  22. #include "libavutil/intreadwrite.h"
  23. #include "avformat.h"
  24. #include "rm.h"
  25. struct RMStream {
  26. AVPacket pkt; ///< place to store merged video frame / reordered audio data
  27. int videobufsize; ///< current assembled frame size
  28. int videobufpos; ///< position for the next slice in the video buffer
  29. int curpic_num; ///< picture number of current frame
  30. int cur_slice, slices;
  31. int64_t pktpos; ///< first slice position in file
  32. /// Audio descrambling matrix parameters
  33. int64_t audiotimestamp; ///< Audio packet timestamp
  34. int sub_packet_cnt; // Subpacket counter, used while reading
  35. int sub_packet_size, sub_packet_h, coded_framesize; ///< Descrambling parameters from container
  36. int audio_framesize; /// Audio frame size from container
  37. int sub_packet_lengths[16]; /// Length of each subpacket
  38. };
  39. typedef struct {
  40. int nb_packets;
  41. int old_format;
  42. int current_stream;
  43. int remaining_len;
  44. int audio_stream_num; ///< Stream number for audio packets
  45. int audio_pkt_cnt; ///< Output packet counter
  46. } RMDemuxContext;
  47. static inline void get_strl(ByteIOContext *pb, char *buf, int buf_size, int len)
  48. {
  49. int i;
  50. char *q, r;
  51. q = buf;
  52. for(i=0;i<len;i++) {
  53. r = get_byte(pb);
  54. if (i < buf_size - 1)
  55. *q++ = r;
  56. }
  57. if (buf_size > 0) *q = '\0';
  58. }
  59. static void get_str8(ByteIOContext *pb, char *buf, int buf_size)
  60. {
  61. get_strl(pb, buf, buf_size, get_byte(pb));
  62. }
  63. static void rm_read_metadata(AVFormatContext *s, int wide)
  64. {
  65. char buf[1024];
  66. int i;
  67. for (i=0; i<FF_ARRAY_ELEMS(ff_rm_metadata); i++) {
  68. int len = wide ? get_be16(s->pb) : get_byte(s->pb);
  69. get_strl(s->pb, buf, sizeof(buf), len);
  70. av_metadata_set(&s->metadata, ff_rm_metadata[i], buf);
  71. }
  72. }
  73. RMStream *ff_rm_alloc_rmstream (void)
  74. {
  75. RMStream *rms = av_mallocz(sizeof(RMStream));
  76. rms->curpic_num = -1;
  77. return rms;
  78. }
  79. void ff_rm_free_rmstream (RMStream *rms)
  80. {
  81. av_free_packet(&rms->pkt);
  82. }
  83. static int rm_read_audio_stream_info(AVFormatContext *s, ByteIOContext *pb,
  84. AVStream *st, RMStream *ast, int read_all)
  85. {
  86. char buf[256];
  87. uint32_t version;
  88. /* ra type header */
  89. version = get_be32(pb); /* version */
  90. if (((version >> 16) & 0xff) == 3) {
  91. int64_t startpos = url_ftell(pb);
  92. url_fskip(pb, 14);
  93. rm_read_metadata(s, 0);
  94. if ((startpos + (version & 0xffff)) >= url_ftell(pb) + 2) {
  95. // fourcc (should always be "lpcJ")
  96. get_byte(pb);
  97. get_str8(pb, buf, sizeof(buf));
  98. }
  99. // Skip extra header crap (this should never happen)
  100. if ((startpos + (version & 0xffff)) > url_ftell(pb))
  101. url_fskip(pb, (version & 0xffff) + startpos - url_ftell(pb));
  102. st->codec->sample_rate = 8000;
  103. st->codec->channels = 1;
  104. st->codec->codec_type = CODEC_TYPE_AUDIO;
  105. st->codec->codec_id = CODEC_ID_RA_144;
  106. } else {
  107. int flavor, sub_packet_h, coded_framesize, sub_packet_size;
  108. /* old version (4) */
  109. get_be32(pb); /* .ra4 */
  110. get_be32(pb); /* data size */
  111. get_be16(pb); /* version2 */
  112. get_be32(pb); /* header size */
  113. flavor= get_be16(pb); /* add codec info / flavor */
  114. ast->coded_framesize = coded_framesize = get_be32(pb); /* coded frame size */
  115. get_be32(pb); /* ??? */
  116. get_be32(pb); /* ??? */
  117. get_be32(pb); /* ??? */
  118. ast->sub_packet_h = sub_packet_h = get_be16(pb); /* 1 */
  119. st->codec->block_align= get_be16(pb); /* frame size */
  120. ast->sub_packet_size = sub_packet_size = get_be16(pb); /* sub packet size */
  121. get_be16(pb); /* ??? */
  122. if (((version >> 16) & 0xff) == 5) {
  123. get_be16(pb); get_be16(pb); get_be16(pb);
  124. }
  125. st->codec->sample_rate = get_be16(pb);
  126. get_be32(pb);
  127. st->codec->channels = get_be16(pb);
  128. if (((version >> 16) & 0xff) == 5) {
  129. get_be32(pb);
  130. get_buffer(pb, buf, 4);
  131. buf[4] = 0;
  132. } else {
  133. get_str8(pb, buf, sizeof(buf)); /* desc */
  134. get_str8(pb, buf, sizeof(buf)); /* desc */
  135. }
  136. st->codec->codec_type = CODEC_TYPE_AUDIO;
  137. if (!strcmp(buf, "dnet")) {
  138. st->codec->codec_id = CODEC_ID_AC3;
  139. st->need_parsing = AVSTREAM_PARSE_FULL;
  140. } else if (!strcmp(buf, "28_8")) {
  141. st->codec->codec_id = CODEC_ID_RA_288;
  142. st->codec->extradata_size= 0;
  143. ast->audio_framesize = st->codec->block_align;
  144. st->codec->block_align = coded_framesize;
  145. if(ast->audio_framesize >= UINT_MAX / sub_packet_h){
  146. av_log(s, AV_LOG_ERROR, "ast->audio_framesize * sub_packet_h too large\n");
  147. return -1;
  148. }
  149. av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h);
  150. } else if ((!strcmp(buf, "cook")) || (!strcmp(buf, "atrc")) || (!strcmp(buf, "sipr"))) {
  151. int codecdata_length;
  152. get_be16(pb); get_byte(pb);
  153. if (((version >> 16) & 0xff) == 5)
  154. get_byte(pb);
  155. codecdata_length = get_be32(pb);
  156. if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
  157. av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
  158. return -1;
  159. }
  160. if(sub_packet_size <= 0){
  161. av_log(s, AV_LOG_ERROR, "sub_packet_size is invalid\n");
  162. return -1;
  163. }
  164. if (!strcmp(buf, "cook")) st->codec->codec_id = CODEC_ID_COOK;
  165. else if (!strcmp(buf, "sipr")) st->codec->codec_id = CODEC_ID_SIPR;
  166. else st->codec->codec_id = CODEC_ID_ATRAC3;
  167. st->codec->extradata_size= codecdata_length;
  168. st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  169. get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
  170. ast->audio_framesize = st->codec->block_align;
  171. st->codec->block_align = ast->sub_packet_size;
  172. if(ast->audio_framesize >= UINT_MAX / sub_packet_h){
  173. av_log(s, AV_LOG_ERROR, "rm->audio_framesize * sub_packet_h too large\n");
  174. return -1;
  175. }
  176. av_new_packet(&ast->pkt, ast->audio_framesize * sub_packet_h);
  177. } else if (!strcmp(buf, "raac") || !strcmp(buf, "racp")) {
  178. int codecdata_length;
  179. get_be16(pb); get_byte(pb);
  180. if (((version >> 16) & 0xff) == 5)
  181. get_byte(pb);
  182. st->codec->codec_id = CODEC_ID_AAC;
  183. codecdata_length = get_be32(pb);
  184. if(codecdata_length + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)codecdata_length){
  185. av_log(s, AV_LOG_ERROR, "codecdata_length too large\n");
  186. return -1;
  187. }
  188. if (codecdata_length >= 1) {
  189. st->codec->extradata_size = codecdata_length - 1;
  190. st->codec->extradata = av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  191. get_byte(pb);
  192. get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
  193. }
  194. } else {
  195. st->codec->codec_id = CODEC_ID_NONE;
  196. av_strlcpy(st->codec->codec_name, buf, sizeof(st->codec->codec_name));
  197. }
  198. if (read_all) {
  199. get_byte(pb);
  200. get_byte(pb);
  201. get_byte(pb);
  202. rm_read_metadata(s, 0);
  203. }
  204. }
  205. return 0;
  206. }
  207. int
  208. ff_rm_read_mdpr_codecdata (AVFormatContext *s, ByteIOContext *pb,
  209. AVStream *st, RMStream *rst, int codec_data_size)
  210. {
  211. unsigned int v;
  212. int size;
  213. int64_t codec_pos;
  214. av_set_pts_info(st, 64, 1, 1000);
  215. codec_pos = url_ftell(pb);
  216. v = get_be32(pb);
  217. if (v == MKTAG(0xfd, 'a', 'r', '.')) {
  218. /* ra type header */
  219. if (rm_read_audio_stream_info(s, pb, st, rst, 0))
  220. return -1;
  221. } else {
  222. int fps, fps2;
  223. if (get_le32(pb) != MKTAG('V', 'I', 'D', 'O')) {
  224. fail1:
  225. av_log(st->codec, AV_LOG_ERROR, "Unsupported video codec\n");
  226. goto skip;
  227. }
  228. st->codec->codec_tag = get_le32(pb);
  229. // av_log(s, AV_LOG_DEBUG, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
  230. if ( st->codec->codec_tag != MKTAG('R', 'V', '1', '0')
  231. && st->codec->codec_tag != MKTAG('R', 'V', '2', '0')
  232. && st->codec->codec_tag != MKTAG('R', 'V', '3', '0')
  233. && st->codec->codec_tag != MKTAG('R', 'V', '4', '0')
  234. && st->codec->codec_tag != MKTAG('R', 'V', 'T', 'R'))
  235. goto fail1;
  236. st->codec->width = get_be16(pb);
  237. st->codec->height = get_be16(pb);
  238. st->codec->time_base.num= 1;
  239. fps= get_be16(pb);
  240. st->codec->codec_type = CODEC_TYPE_VIDEO;
  241. get_be32(pb);
  242. fps2= get_be16(pb);
  243. get_be16(pb);
  244. st->codec->extradata_size= codec_data_size - (url_ftell(pb) - codec_pos);
  245. if(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE <= (unsigned)st->codec->extradata_size){
  246. //check is redundant as get_buffer() will catch this
  247. av_log(s, AV_LOG_ERROR, "st->codec->extradata_size too large\n");
  248. return -1;
  249. }
  250. st->codec->extradata= av_mallocz(st->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
  251. if (!st->codec->extradata)
  252. return AVERROR(ENOMEM);
  253. get_buffer(pb, st->codec->extradata, st->codec->extradata_size);
  254. // av_log(s, AV_LOG_DEBUG, "fps= %d fps2= %d\n", fps, fps2);
  255. st->codec->time_base.den = fps * st->codec->time_base.num;
  256. switch(((uint8_t*)st->codec->extradata)[4]>>4){
  257. case 1: st->codec->codec_id = CODEC_ID_RV10; break;
  258. case 2: st->codec->codec_id = CODEC_ID_RV20; break;
  259. case 3: st->codec->codec_id = CODEC_ID_RV30; break;
  260. case 4: st->codec->codec_id = CODEC_ID_RV40; break;
  261. default: goto fail1;
  262. }
  263. }
  264. skip:
  265. /* skip codec info */
  266. size = url_ftell(pb) - codec_pos;
  267. url_fskip(pb, codec_data_size - size);
  268. return 0;
  269. }
  270. static int rm_read_header_old(AVFormatContext *s, AVFormatParameters *ap)
  271. {
  272. RMDemuxContext *rm = s->priv_data;
  273. AVStream *st;
  274. rm->old_format = 1;
  275. st = av_new_stream(s, 0);
  276. if (!st)
  277. return -1;
  278. st->priv_data = ff_rm_alloc_rmstream();
  279. return rm_read_audio_stream_info(s, s->pb, st, st->priv_data, 1);
  280. }
  281. static int rm_read_header(AVFormatContext *s, AVFormatParameters *ap)
  282. {
  283. RMDemuxContext *rm = s->priv_data;
  284. AVStream *st;
  285. ByteIOContext *pb = s->pb;
  286. unsigned int tag;
  287. int tag_size;
  288. unsigned int start_time, duration;
  289. char buf[128];
  290. int flags = 0;
  291. tag = get_le32(pb);
  292. if (tag == MKTAG('.', 'r', 'a', 0xfd)) {
  293. /* very old .ra format */
  294. return rm_read_header_old(s, ap);
  295. } else if (tag != MKTAG('.', 'R', 'M', 'F')) {
  296. return AVERROR(EIO);
  297. }
  298. get_be32(pb); /* header size */
  299. get_be16(pb);
  300. get_be32(pb);
  301. get_be32(pb); /* number of headers */
  302. for(;;) {
  303. if (url_feof(pb))
  304. return -1;
  305. tag = get_le32(pb);
  306. tag_size = get_be32(pb);
  307. get_be16(pb);
  308. #if 0
  309. printf("tag=%c%c%c%c (%08x) size=%d\n",
  310. (tag) & 0xff,
  311. (tag >> 8) & 0xff,
  312. (tag >> 16) & 0xff,
  313. (tag >> 24) & 0xff,
  314. tag,
  315. tag_size);
  316. #endif
  317. if (tag_size < 10 && tag != MKTAG('D', 'A', 'T', 'A'))
  318. return -1;
  319. switch(tag) {
  320. case MKTAG('P', 'R', 'O', 'P'):
  321. /* file header */
  322. get_be32(pb); /* max bit rate */
  323. get_be32(pb); /* avg bit rate */
  324. get_be32(pb); /* max packet size */
  325. get_be32(pb); /* avg packet size */
  326. get_be32(pb); /* nb packets */
  327. get_be32(pb); /* duration */
  328. get_be32(pb); /* preroll */
  329. get_be32(pb); /* index offset */
  330. get_be32(pb); /* data offset */
  331. get_be16(pb); /* nb streams */
  332. flags = get_be16(pb); /* flags */
  333. break;
  334. case MKTAG('C', 'O', 'N', 'T'):
  335. rm_read_metadata(s, 1);
  336. break;
  337. case MKTAG('M', 'D', 'P', 'R'):
  338. st = av_new_stream(s, 0);
  339. if (!st)
  340. return AVERROR(ENOMEM);
  341. st->id = get_be16(pb);
  342. get_be32(pb); /* max bit rate */
  343. st->codec->bit_rate = get_be32(pb); /* bit rate */
  344. get_be32(pb); /* max packet size */
  345. get_be32(pb); /* avg packet size */
  346. start_time = get_be32(pb); /* start time */
  347. get_be32(pb); /* preroll */
  348. duration = get_be32(pb); /* duration */
  349. st->start_time = start_time;
  350. st->duration = duration;
  351. get_str8(pb, buf, sizeof(buf)); /* desc */
  352. get_str8(pb, buf, sizeof(buf)); /* mimetype */
  353. st->codec->codec_type = CODEC_TYPE_DATA;
  354. st->priv_data = ff_rm_alloc_rmstream();
  355. if (ff_rm_read_mdpr_codecdata(s, s->pb, st, st->priv_data,
  356. get_be32(pb)) < 0)
  357. return -1;
  358. break;
  359. case MKTAG('D', 'A', 'T', 'A'):
  360. goto header_end;
  361. default:
  362. /* unknown tag: skip it */
  363. url_fskip(pb, tag_size - 10);
  364. break;
  365. }
  366. }
  367. header_end:
  368. rm->nb_packets = get_be32(pb); /* number of packets */
  369. if (!rm->nb_packets && (flags & 4))
  370. rm->nb_packets = 3600 * 25;
  371. get_be32(pb); /* next data header */
  372. return 0;
  373. }
  374. static int get_num(ByteIOContext *pb, int *len)
  375. {
  376. int n, n1;
  377. n = get_be16(pb);
  378. (*len)-=2;
  379. n &= 0x7FFF;
  380. if (n >= 0x4000) {
  381. return n - 0x4000;
  382. } else {
  383. n1 = get_be16(pb);
  384. (*len)-=2;
  385. return (n << 16) | n1;
  386. }
  387. }
  388. /* multiple of 20 bytes for ra144 (ugly) */
  389. #define RAW_PACKET_SIZE 1000
  390. static int sync(AVFormatContext *s, int64_t *timestamp, int *flags, int *stream_index, int64_t *pos){
  391. RMDemuxContext *rm = s->priv_data;
  392. ByteIOContext *pb = s->pb;
  393. int len, num, res, i;
  394. AVStream *st;
  395. uint32_t state=0xFFFFFFFF;
  396. while(!url_feof(pb)){
  397. *pos= url_ftell(pb) - 3;
  398. if(rm->remaining_len > 0){
  399. num= rm->current_stream;
  400. len= rm->remaining_len;
  401. *timestamp = AV_NOPTS_VALUE;
  402. *flags= 0;
  403. }else{
  404. state= (state<<8) + get_byte(pb);
  405. if(state == MKBETAG('I', 'N', 'D', 'X')){
  406. len = get_be16(pb) - 6;
  407. if(len<0)
  408. continue;
  409. goto skip;
  410. }
  411. if(state > (unsigned)0xFFFF || state < 12)
  412. continue;
  413. len=state;
  414. state= 0xFFFFFFFF;
  415. num = get_be16(pb);
  416. *timestamp = get_be32(pb);
  417. res= get_byte(pb); /* reserved */
  418. *flags = get_byte(pb); /* flags */
  419. len -= 12;
  420. }
  421. for(i=0;i<s->nb_streams;i++) {
  422. st = s->streams[i];
  423. if (num == st->id)
  424. break;
  425. }
  426. if (i == s->nb_streams) {
  427. skip:
  428. /* skip packet if unknown number */
  429. url_fskip(pb, len);
  430. rm->remaining_len -= len;
  431. continue;
  432. }
  433. *stream_index= i;
  434. return len;
  435. }
  436. return -1;
  437. }
  438. static int rm_assemble_video_frame(AVFormatContext *s, ByteIOContext *pb,
  439. RMDemuxContext *rm, RMStream *vst,
  440. AVPacket *pkt, int len)
  441. {
  442. int hdr, seq, pic_num, len2, pos;
  443. int type;
  444. hdr = get_byte(pb); len--;
  445. type = hdr >> 6;
  446. if(type != 3){ // not frame as a part of packet
  447. seq = get_byte(pb); len--;
  448. }
  449. if(type != 1){ // not whole frame
  450. len2 = get_num(pb, &len);
  451. pos = get_num(pb, &len);
  452. pic_num = get_byte(pb); len--;
  453. }
  454. if(len<0)
  455. return -1;
  456. rm->remaining_len = len;
  457. if(type&1){ // frame, not slice
  458. if(type == 3) // frame as a part of packet
  459. len= len2;
  460. if(rm->remaining_len < len)
  461. return -1;
  462. rm->remaining_len -= len;
  463. if(av_new_packet(pkt, len + 9) < 0)
  464. return AVERROR(EIO);
  465. pkt->data[0] = 0;
  466. AV_WL32(pkt->data + 1, 1);
  467. AV_WL32(pkt->data + 5, 0);
  468. get_buffer(pb, pkt->data + 9, len);
  469. return 0;
  470. }
  471. //now we have to deal with single slice
  472. if((seq & 0x7F) == 1 || vst->curpic_num != pic_num){
  473. vst->slices = ((hdr & 0x3F) << 1) + 1;
  474. vst->videobufsize = len2 + 8*vst->slices + 1;
  475. av_free_packet(&vst->pkt); //FIXME this should be output.
  476. if(av_new_packet(&vst->pkt, vst->videobufsize) < 0)
  477. return AVERROR(ENOMEM);
  478. vst->videobufpos = 8*vst->slices + 1;
  479. vst->cur_slice = 0;
  480. vst->curpic_num = pic_num;
  481. vst->pktpos = url_ftell(pb);
  482. }
  483. if(type == 2)
  484. len = FFMIN(len, pos);
  485. if(++vst->cur_slice > vst->slices)
  486. return 1;
  487. AV_WL32(vst->pkt.data - 7 + 8*vst->cur_slice, 1);
  488. AV_WL32(vst->pkt.data - 3 + 8*vst->cur_slice, vst->videobufpos - 8*vst->slices - 1);
  489. if(vst->videobufpos + len > vst->videobufsize)
  490. return 1;
  491. if (get_buffer(pb, vst->pkt.data + vst->videobufpos, len) != len)
  492. return AVERROR(EIO);
  493. vst->videobufpos += len;
  494. rm->remaining_len-= len;
  495. if(type == 2 || (vst->videobufpos) == vst->videobufsize){
  496. vst->pkt.data[0] = vst->cur_slice-1;
  497. *pkt= vst->pkt;
  498. vst->pkt.data= NULL;
  499. vst->pkt.size= 0;
  500. if(vst->slices != vst->cur_slice) //FIXME find out how to set slices correct from the begin
  501. memmove(pkt->data + 1 + 8*vst->cur_slice, pkt->data + 1 + 8*vst->slices,
  502. vst->videobufpos - 1 - 8*vst->slices);
  503. pkt->size = vst->videobufpos + 8*(vst->cur_slice - vst->slices);
  504. pkt->pts = AV_NOPTS_VALUE;
  505. pkt->pos = vst->pktpos;
  506. return 0;
  507. }
  508. return 1;
  509. }
  510. static inline void
  511. rm_ac3_swap_bytes (AVStream *st, AVPacket *pkt)
  512. {
  513. uint8_t *ptr;
  514. int j;
  515. if (st->codec->codec_id == CODEC_ID_AC3) {
  516. ptr = pkt->data;
  517. for (j=0;j<pkt->size;j+=2) {
  518. FFSWAP(int, ptr[0], ptr[1]);
  519. ptr += 2;
  520. }
  521. }
  522. }
  523. int
  524. ff_rm_parse_packet (AVFormatContext *s, ByteIOContext *pb,
  525. AVStream *st, RMStream *ast, int len, AVPacket *pkt,
  526. int *seq, int *flags, int64_t *timestamp)
  527. {
  528. RMDemuxContext *rm = s->priv_data;
  529. if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
  530. rm->current_stream= st->id;
  531. if(rm_assemble_video_frame(s, pb, rm, ast, pkt, len))
  532. return -1; //got partial frame
  533. } else if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
  534. if ((st->codec->codec_id == CODEC_ID_RA_288) ||
  535. (st->codec->codec_id == CODEC_ID_COOK) ||
  536. (st->codec->codec_id == CODEC_ID_ATRAC3) ||
  537. (st->codec->codec_id == CODEC_ID_SIPR)) {
  538. int x;
  539. int sps = ast->sub_packet_size;
  540. int cfs = ast->coded_framesize;
  541. int h = ast->sub_packet_h;
  542. int y = ast->sub_packet_cnt;
  543. int w = ast->audio_framesize;
  544. if (*flags & 2)
  545. y = ast->sub_packet_cnt = 0;
  546. if (!y)
  547. ast->audiotimestamp = *timestamp;
  548. switch(st->codec->codec_id) {
  549. case CODEC_ID_RA_288:
  550. for (x = 0; x < h/2; x++)
  551. get_buffer(pb, ast->pkt.data+x*2*w+y*cfs, cfs);
  552. break;
  553. case CODEC_ID_ATRAC3:
  554. case CODEC_ID_COOK:
  555. for (x = 0; x < w/sps; x++)
  556. get_buffer(pb, ast->pkt.data+sps*(h*x+((h+1)/2)*(y&1)+(y>>1)), sps);
  557. break;
  558. }
  559. if (++(ast->sub_packet_cnt) < h)
  560. return -1;
  561. else {
  562. ast->sub_packet_cnt = 0;
  563. rm->audio_stream_num = st->index;
  564. rm->audio_pkt_cnt = h * w / st->codec->block_align - 1;
  565. // Release first audio packet
  566. av_new_packet(pkt, st->codec->block_align);
  567. memcpy(pkt->data, ast->pkt.data, st->codec->block_align); //FIXME avoid this
  568. *timestamp = ast->audiotimestamp;
  569. *flags = 2; // Mark first packet as keyframe
  570. }
  571. } else if (st->codec->codec_id == CODEC_ID_AAC) {
  572. int x;
  573. rm->audio_stream_num = st->index;
  574. ast->sub_packet_cnt = (get_be16(pb) & 0xf0) >> 4;
  575. if (ast->sub_packet_cnt) {
  576. for (x = 0; x < ast->sub_packet_cnt; x++)
  577. ast->sub_packet_lengths[x] = get_be16(pb);
  578. // Release first audio packet
  579. rm->audio_pkt_cnt = ast->sub_packet_cnt - 1;
  580. av_get_packet(pb, pkt, ast->sub_packet_lengths[0]);
  581. *flags = 2; // Mark first packet as keyframe
  582. }
  583. } else {
  584. av_get_packet(pb, pkt, len);
  585. rm_ac3_swap_bytes(st, pkt);
  586. }
  587. } else
  588. av_get_packet(pb, pkt, len);
  589. if( (st->discard >= AVDISCARD_NONKEY && !(*flags&2))
  590. || st->discard >= AVDISCARD_ALL){
  591. av_free_packet(pkt);
  592. return -1;
  593. }
  594. pkt->stream_index = st->index;
  595. #if 0
  596. if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
  597. if(st->codec->codec_id == CODEC_ID_RV20){
  598. int seq= 128*(pkt->data[2]&0x7F) + (pkt->data[3]>>1);
  599. av_log(s, AV_LOG_DEBUG, "%d %"PRId64" %d\n", *timestamp, *timestamp*512LL/25, seq);
  600. seq |= (*timestamp&~0x3FFF);
  601. if(seq - *timestamp > 0x2000) seq -= 0x4000;
  602. if(seq - *timestamp < -0x2000) seq += 0x4000;
  603. }
  604. }
  605. #endif
  606. pkt->pts= *timestamp;
  607. if (*flags & 2)
  608. pkt->flags |= PKT_FLAG_KEY;
  609. return st->codec->codec_type == CODEC_TYPE_AUDIO ? rm->audio_pkt_cnt : 0;
  610. }
  611. int
  612. ff_rm_retrieve_cache (AVFormatContext *s, ByteIOContext *pb,
  613. AVStream *st, RMStream *ast, AVPacket *pkt)
  614. {
  615. RMDemuxContext *rm = s->priv_data;
  616. assert (rm->audio_pkt_cnt > 0);
  617. if (st->codec->codec_id == CODEC_ID_AAC)
  618. av_get_packet(pb, pkt, ast->sub_packet_lengths[ast->sub_packet_cnt - rm->audio_pkt_cnt]);
  619. else {
  620. av_new_packet(pkt, st->codec->block_align);
  621. memcpy(pkt->data, ast->pkt.data + st->codec->block_align * //FIXME avoid this
  622. (ast->sub_packet_h * ast->audio_framesize / st->codec->block_align - rm->audio_pkt_cnt),
  623. st->codec->block_align);
  624. }
  625. rm->audio_pkt_cnt--;
  626. pkt->flags = 0;
  627. pkt->stream_index = st->index;
  628. return rm->audio_pkt_cnt;
  629. }
  630. static int rm_read_packet(AVFormatContext *s, AVPacket *pkt)
  631. {
  632. RMDemuxContext *rm = s->priv_data;
  633. ByteIOContext *pb = s->pb;
  634. AVStream *st;
  635. int i, len;
  636. int64_t timestamp, pos;
  637. int flags;
  638. if (rm->audio_pkt_cnt) {
  639. // If there are queued audio packet return them first
  640. st = s->streams[rm->audio_stream_num];
  641. ff_rm_retrieve_cache(s, s->pb, st, st->priv_data, pkt);
  642. } else if (rm->old_format) {
  643. RMStream *ast;
  644. st = s->streams[0];
  645. ast = st->priv_data;
  646. if (st->codec->codec_id == CODEC_ID_RA_288) {
  647. int x, y;
  648. for (y = 0; y < ast->sub_packet_h; y++)
  649. for (x = 0; x < ast->sub_packet_h/2; x++)
  650. if (get_buffer(pb, ast->pkt.data+x*2*ast->audio_framesize+y*ast->coded_framesize, ast->coded_framesize) <= 0)
  651. return AVERROR(EIO);
  652. rm->audio_stream_num = 0;
  653. rm->audio_pkt_cnt = ast->sub_packet_h * ast->audio_framesize / st->codec->block_align - 1;
  654. // Release first audio packet
  655. av_new_packet(pkt, st->codec->block_align);
  656. memcpy(pkt->data, ast->pkt.data, st->codec->block_align); //FIXME avoid this
  657. pkt->flags |= PKT_FLAG_KEY; // Mark first packet as keyframe
  658. pkt->stream_index = 0;
  659. } else {
  660. /* just read raw bytes */
  661. len = RAW_PACKET_SIZE;
  662. len= av_get_packet(pb, pkt, len);
  663. pkt->stream_index = 0;
  664. if (len <= 0) {
  665. return AVERROR(EIO);
  666. }
  667. pkt->size = len;
  668. }
  669. rm_ac3_swap_bytes(st, pkt);
  670. } else {
  671. int seq=1;
  672. resync:
  673. len=sync(s, &timestamp, &flags, &i, &pos);
  674. if(len<0)
  675. return AVERROR(EIO);
  676. st = s->streams[i];
  677. if (ff_rm_parse_packet (s, s->pb, st, st->priv_data, len, pkt,
  678. &seq, &flags, &timestamp) < 0)
  679. goto resync;
  680. if((flags&2) && (seq&0x7F) == 1)
  681. av_add_index_entry(st, pos, timestamp, 0, 0, AVINDEX_KEYFRAME);
  682. }
  683. return 0;
  684. }
  685. static int rm_read_close(AVFormatContext *s)
  686. {
  687. int i;
  688. for (i=0;i<s->nb_streams;i++)
  689. ff_rm_free_rmstream(s->streams[i]->priv_data);
  690. return 0;
  691. }
  692. static int rm_probe(AVProbeData *p)
  693. {
  694. /* check file header */
  695. if ((p->buf[0] == '.' && p->buf[1] == 'R' &&
  696. p->buf[2] == 'M' && p->buf[3] == 'F' &&
  697. p->buf[4] == 0 && p->buf[5] == 0) ||
  698. (p->buf[0] == '.' && p->buf[1] == 'r' &&
  699. p->buf[2] == 'a' && p->buf[3] == 0xfd))
  700. return AVPROBE_SCORE_MAX;
  701. else
  702. return 0;
  703. }
  704. static int64_t rm_read_dts(AVFormatContext *s, int stream_index,
  705. int64_t *ppos, int64_t pos_limit)
  706. {
  707. RMDemuxContext *rm = s->priv_data;
  708. int64_t pos, dts;
  709. int stream_index2, flags, len, h;
  710. pos = *ppos;
  711. if(rm->old_format)
  712. return AV_NOPTS_VALUE;
  713. url_fseek(s->pb, pos, SEEK_SET);
  714. rm->remaining_len=0;
  715. for(;;){
  716. int seq=1;
  717. AVStream *st;
  718. len=sync(s, &dts, &flags, &stream_index2, &pos);
  719. if(len<0)
  720. return AV_NOPTS_VALUE;
  721. st = s->streams[stream_index2];
  722. if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
  723. h= get_byte(s->pb); len--;
  724. if(!(h & 0x40)){
  725. seq = get_byte(s->pb); len--;
  726. }
  727. }
  728. if((flags&2) && (seq&0x7F) == 1){
  729. // av_log(s, AV_LOG_DEBUG, "%d %d-%d %"PRId64" %d\n", flags, stream_index2, stream_index, dts, seq);
  730. av_add_index_entry(st, pos, dts, 0, 0, AVINDEX_KEYFRAME);
  731. if(stream_index2 == stream_index)
  732. break;
  733. }
  734. url_fskip(s->pb, len);
  735. }
  736. *ppos = pos;
  737. return dts;
  738. }
  739. AVInputFormat rm_demuxer = {
  740. "rm",
  741. NULL_IF_CONFIG_SMALL("RealMedia format"),
  742. sizeof(RMDemuxContext),
  743. rm_probe,
  744. rm_read_header,
  745. rm_read_packet,
  746. rm_read_close,
  747. NULL,
  748. rm_read_dts,
  749. };
  750. AVInputFormat rdt_demuxer = {
  751. "rdt",
  752. NULL_IF_CONFIG_SMALL("RDT demuxer"),
  753. sizeof(RMDemuxContext),
  754. NULL,
  755. NULL,
  756. NULL,
  757. rm_read_close,
  758. };