You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1502 lines
49KB

  1. /*
  2. * RTMP network protocol
  3. * Copyright (c) 2009 Kostya Shishkov
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RTMP protocol
  24. */
  25. #include "libavcodec/bytestream.h"
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/intfloat.h"
  28. #include "libavutil/lfg.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/sha.h"
  31. #include "avformat.h"
  32. #include "internal.h"
  33. #include "network.h"
  34. #include "flv.h"
  35. #include "rtmp.h"
  36. #include "rtmppkt.h"
  37. #include "url.h"
  38. //#define DEBUG
  39. #define APP_MAX_LENGTH 128
  40. #define PLAYPATH_MAX_LENGTH 256
  41. #define TCURL_MAX_LENGTH 512
  42. #define FLASHVER_MAX_LENGTH 64
  43. /** RTMP protocol handler state */
  44. typedef enum {
  45. STATE_START, ///< client has not done anything yet
  46. STATE_HANDSHAKED, ///< client has performed handshake
  47. STATE_RELEASING, ///< client releasing stream before publish it (for output)
  48. STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
  49. STATE_CONNECTING, ///< client connected to server successfully
  50. STATE_READY, ///< client has sent all needed commands and waits for server reply
  51. STATE_PLAYING, ///< client has started receiving multimedia data from server
  52. STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
  53. STATE_STOPPED, ///< the broadcast has been stopped
  54. } ClientState;
  55. /** protocol handler context */
  56. typedef struct RTMPContext {
  57. const AVClass *class;
  58. URLContext* stream; ///< TCP stream used in interactions with RTMP server
  59. RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets
  60. int chunk_size; ///< size of the chunks RTMP packets are divided into
  61. int is_input; ///< input/output flag
  62. char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
  63. int live; ///< 0: recorded, -1: live, -2: both
  64. char *app; ///< name of application
  65. char *conn; ///< append arbitrary AMF data to the Connect message
  66. ClientState state; ///< current state
  67. int main_channel_id; ///< an additional channel ID which is used for some invocations
  68. uint8_t* flv_data; ///< buffer with data for demuxer
  69. int flv_size; ///< current buffer size
  70. int flv_off; ///< number of bytes read from current buffer
  71. int flv_nb_packets; ///< number of flv packets published
  72. RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output)
  73. uint32_t client_report_size; ///< number of bytes after which client should report to server
  74. uint32_t bytes_read; ///< number of bytes read from server
  75. uint32_t last_bytes_read; ///< number of bytes read last reported to server
  76. int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call
  77. uint8_t flv_header[11]; ///< partial incoming flv packet header
  78. int flv_header_bytes; ///< number of initialized bytes in flv_header
  79. int nb_invokes; ///< keeps track of invoke messages
  80. int create_stream_invoke; ///< invoke id for the create stream command
  81. char* tcurl; ///< url of the target stream
  82. char* flashver; ///< version of the flash plugin
  83. char* swfurl; ///< url of the swf player
  84. int server_bw; ///< server bandwidth
  85. int client_buffer_time; ///< client buffer time in ms
  86. int flush_interval; ///< number of packets flushed in the same request (RTMPT only)
  87. } RTMPContext;
  88. #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
  89. /** Client key used for digest signing */
  90. static const uint8_t rtmp_player_key[] = {
  91. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  92. 'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1',
  93. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  94. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  95. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  96. };
  97. #define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing
  98. /** Key used for RTMP server digest signing */
  99. static const uint8_t rtmp_server_key[] = {
  100. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  101. 'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ',
  102. 'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1',
  103. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  104. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  105. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  106. };
  107. static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p)
  108. {
  109. char *field, *value;
  110. char type;
  111. /* The type must be B for Boolean, N for number, S for string, O for
  112. * object, or Z for null. For Booleans the data must be either 0 or 1 for
  113. * FALSE or TRUE, respectively. Likewise for Objects the data must be
  114. * 0 or 1 to end or begin an object, respectively. Data items in subobjects
  115. * may be named, by prefixing the type with 'N' and specifying the name
  116. * before the value (ie. NB:myFlag:1). This option may be used multiple times
  117. * to construct arbitrary AMF sequences. */
  118. if (param[0] && param[1] == ':') {
  119. type = param[0];
  120. value = param + 2;
  121. } else if (param[0] == 'N' && param[1] && param[2] == ':') {
  122. type = param[1];
  123. field = param + 3;
  124. value = strchr(field, ':');
  125. if (!value)
  126. goto fail;
  127. *value = '\0';
  128. value++;
  129. if (!field || !value)
  130. goto fail;
  131. ff_amf_write_field_name(p, field);
  132. } else {
  133. goto fail;
  134. }
  135. switch (type) {
  136. case 'B':
  137. ff_amf_write_bool(p, value[0] != '0');
  138. break;
  139. case 'S':
  140. ff_amf_write_string(p, value);
  141. break;
  142. case 'N':
  143. ff_amf_write_number(p, strtod(value, NULL));
  144. break;
  145. case 'Z':
  146. ff_amf_write_null(p);
  147. break;
  148. case 'O':
  149. if (value[0] != '0')
  150. ff_amf_write_object_start(p);
  151. else
  152. ff_amf_write_object_end(p);
  153. break;
  154. default:
  155. goto fail;
  156. break;
  157. }
  158. return 0;
  159. fail:
  160. av_log(s, AV_LOG_ERROR, "Invalid AMF parameter: %s\n", param);
  161. return AVERROR(EINVAL);
  162. }
  163. /**
  164. * Generate 'connect' call and send it to the server.
  165. */
  166. static int gen_connect(URLContext *s, RTMPContext *rt)
  167. {
  168. RTMPPacket pkt;
  169. uint8_t *p;
  170. int ret;
  171. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  172. 0, 4096)) < 0)
  173. return ret;
  174. p = pkt.data;
  175. ff_amf_write_string(&p, "connect");
  176. ff_amf_write_number(&p, ++rt->nb_invokes);
  177. ff_amf_write_object_start(&p);
  178. ff_amf_write_field_name(&p, "app");
  179. ff_amf_write_string(&p, rt->app);
  180. if (!rt->is_input) {
  181. ff_amf_write_field_name(&p, "type");
  182. ff_amf_write_string(&p, "nonprivate");
  183. }
  184. ff_amf_write_field_name(&p, "flashVer");
  185. ff_amf_write_string(&p, rt->flashver);
  186. if (rt->swfurl) {
  187. ff_amf_write_field_name(&p, "swfUrl");
  188. ff_amf_write_string(&p, rt->swfurl);
  189. }
  190. ff_amf_write_field_name(&p, "tcUrl");
  191. ff_amf_write_string(&p, rt->tcurl);
  192. if (rt->is_input) {
  193. ff_amf_write_field_name(&p, "fpad");
  194. ff_amf_write_bool(&p, 0);
  195. ff_amf_write_field_name(&p, "capabilities");
  196. ff_amf_write_number(&p, 15.0);
  197. /* Tell the server we support all the audio codecs except
  198. * SUPPORT_SND_INTEL (0x0008) and SUPPORT_SND_UNUSED (0x0010)
  199. * which are unused in the RTMP protocol implementation. */
  200. ff_amf_write_field_name(&p, "audioCodecs");
  201. ff_amf_write_number(&p, 4071.0);
  202. ff_amf_write_field_name(&p, "videoCodecs");
  203. ff_amf_write_number(&p, 252.0);
  204. ff_amf_write_field_name(&p, "videoFunction");
  205. ff_amf_write_number(&p, 1.0);
  206. }
  207. ff_amf_write_object_end(&p);
  208. if (rt->conn) {
  209. char *param = rt->conn;
  210. // Write arbitrary AMF data to the Connect message.
  211. while (param != NULL) {
  212. char *sep;
  213. param += strspn(param, " ");
  214. if (!*param)
  215. break;
  216. sep = strchr(param, ' ');
  217. if (sep)
  218. *sep = '\0';
  219. if ((ret = rtmp_write_amf_data(s, param, &p)) < 0) {
  220. // Invalid AMF parameter.
  221. ff_rtmp_packet_destroy(&pkt);
  222. return ret;
  223. }
  224. if (sep)
  225. param = sep + 1;
  226. else
  227. break;
  228. }
  229. }
  230. pkt.data_size = p - pkt.data;
  231. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  232. rt->prev_pkt[1]);
  233. ff_rtmp_packet_destroy(&pkt);
  234. return ret;
  235. }
  236. /**
  237. * Generate 'releaseStream' call and send it to the server. It should make
  238. * the server release some channel for media streams.
  239. */
  240. static int gen_release_stream(URLContext *s, RTMPContext *rt)
  241. {
  242. RTMPPacket pkt;
  243. uint8_t *p;
  244. int ret;
  245. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  246. 0, 29 + strlen(rt->playpath))) < 0)
  247. return ret;
  248. av_log(s, AV_LOG_DEBUG, "Releasing stream...\n");
  249. p = pkt.data;
  250. ff_amf_write_string(&p, "releaseStream");
  251. ff_amf_write_number(&p, ++rt->nb_invokes);
  252. ff_amf_write_null(&p);
  253. ff_amf_write_string(&p, rt->playpath);
  254. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  255. rt->prev_pkt[1]);
  256. ff_rtmp_packet_destroy(&pkt);
  257. return ret;
  258. }
  259. /**
  260. * Generate 'FCPublish' call and send it to the server. It should make
  261. * the server preapare for receiving media streams.
  262. */
  263. static int gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
  264. {
  265. RTMPPacket pkt;
  266. uint8_t *p;
  267. int ret;
  268. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  269. 0, 25 + strlen(rt->playpath))) < 0)
  270. return ret;
  271. av_log(s, AV_LOG_DEBUG, "FCPublish stream...\n");
  272. p = pkt.data;
  273. ff_amf_write_string(&p, "FCPublish");
  274. ff_amf_write_number(&p, ++rt->nb_invokes);
  275. ff_amf_write_null(&p);
  276. ff_amf_write_string(&p, rt->playpath);
  277. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  278. rt->prev_pkt[1]);
  279. ff_rtmp_packet_destroy(&pkt);
  280. return ret;
  281. }
  282. /**
  283. * Generate 'FCUnpublish' call and send it to the server. It should make
  284. * the server destroy stream.
  285. */
  286. static int gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
  287. {
  288. RTMPPacket pkt;
  289. uint8_t *p;
  290. int ret;
  291. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  292. 0, 27 + strlen(rt->playpath))) < 0)
  293. return ret;
  294. av_log(s, AV_LOG_DEBUG, "UnPublishing stream...\n");
  295. p = pkt.data;
  296. ff_amf_write_string(&p, "FCUnpublish");
  297. ff_amf_write_number(&p, ++rt->nb_invokes);
  298. ff_amf_write_null(&p);
  299. ff_amf_write_string(&p, rt->playpath);
  300. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  301. rt->prev_pkt[1]);
  302. ff_rtmp_packet_destroy(&pkt);
  303. return ret;
  304. }
  305. /**
  306. * Generate 'createStream' call and send it to the server. It should make
  307. * the server allocate some channel for media streams.
  308. */
  309. static int gen_create_stream(URLContext *s, RTMPContext *rt)
  310. {
  311. RTMPPacket pkt;
  312. uint8_t *p;
  313. int ret;
  314. av_log(s, AV_LOG_DEBUG, "Creating stream...\n");
  315. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  316. 0, 25)) < 0)
  317. return ret;
  318. p = pkt.data;
  319. ff_amf_write_string(&p, "createStream");
  320. ff_amf_write_number(&p, ++rt->nb_invokes);
  321. ff_amf_write_null(&p);
  322. rt->create_stream_invoke = rt->nb_invokes;
  323. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  324. rt->prev_pkt[1]);
  325. ff_rtmp_packet_destroy(&pkt);
  326. return ret;
  327. }
  328. /**
  329. * Generate 'deleteStream' call and send it to the server. It should make
  330. * the server remove some channel for media streams.
  331. */
  332. static int gen_delete_stream(URLContext *s, RTMPContext *rt)
  333. {
  334. RTMPPacket pkt;
  335. uint8_t *p;
  336. int ret;
  337. av_log(s, AV_LOG_DEBUG, "Deleting stream...\n");
  338. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  339. 0, 34)) < 0)
  340. return ret;
  341. p = pkt.data;
  342. ff_amf_write_string(&p, "deleteStream");
  343. ff_amf_write_number(&p, ++rt->nb_invokes);
  344. ff_amf_write_null(&p);
  345. ff_amf_write_number(&p, rt->main_channel_id);
  346. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  347. rt->prev_pkt[1]);
  348. ff_rtmp_packet_destroy(&pkt);
  349. return ret;
  350. }
  351. /**
  352. * Generate client buffer time and send it to the server.
  353. */
  354. static int gen_buffer_time(URLContext *s, RTMPContext *rt)
  355. {
  356. RTMPPacket pkt;
  357. uint8_t *p;
  358. int ret;
  359. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
  360. 1, 10)) < 0)
  361. return ret;
  362. p = pkt.data;
  363. bytestream_put_be16(&p, 3);
  364. bytestream_put_be32(&p, rt->main_channel_id);
  365. bytestream_put_be32(&p, rt->client_buffer_time);
  366. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  367. rt->prev_pkt[1]);
  368. ff_rtmp_packet_destroy(&pkt);
  369. return ret;
  370. }
  371. /**
  372. * Generate 'play' call and send it to the server, then ping the server
  373. * to start actual playing.
  374. */
  375. static int gen_play(URLContext *s, RTMPContext *rt)
  376. {
  377. RTMPPacket pkt;
  378. uint8_t *p;
  379. int ret;
  380. av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
  381. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE,
  382. 0, 29 + strlen(rt->playpath))) < 0)
  383. return ret;
  384. pkt.extra = rt->main_channel_id;
  385. p = pkt.data;
  386. ff_amf_write_string(&p, "play");
  387. ff_amf_write_number(&p, ++rt->nb_invokes);
  388. ff_amf_write_null(&p);
  389. ff_amf_write_string(&p, rt->playpath);
  390. ff_amf_write_number(&p, rt->live);
  391. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  392. rt->prev_pkt[1]);
  393. ff_rtmp_packet_destroy(&pkt);
  394. return ret;
  395. }
  396. /**
  397. * Generate 'publish' call and send it to the server.
  398. */
  399. static int gen_publish(URLContext *s, RTMPContext *rt)
  400. {
  401. RTMPPacket pkt;
  402. uint8_t *p;
  403. int ret;
  404. av_log(s, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
  405. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
  406. 0, 30 + strlen(rt->playpath))) < 0)
  407. return ret;
  408. pkt.extra = rt->main_channel_id;
  409. p = pkt.data;
  410. ff_amf_write_string(&p, "publish");
  411. ff_amf_write_number(&p, ++rt->nb_invokes);
  412. ff_amf_write_null(&p);
  413. ff_amf_write_string(&p, rt->playpath);
  414. ff_amf_write_string(&p, "live");
  415. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  416. rt->prev_pkt[1]);
  417. ff_rtmp_packet_destroy(&pkt);
  418. return ret;
  419. }
  420. /**
  421. * Generate ping reply and send it to the server.
  422. */
  423. static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
  424. {
  425. RTMPPacket pkt;
  426. uint8_t *p;
  427. int ret;
  428. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
  429. ppkt->timestamp + 1, 6)) < 0)
  430. return ret;
  431. p = pkt.data;
  432. bytestream_put_be16(&p, 7);
  433. bytestream_put_be32(&p, AV_RB32(ppkt->data+2));
  434. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  435. rt->prev_pkt[1]);
  436. ff_rtmp_packet_destroy(&pkt);
  437. return ret;
  438. }
  439. /**
  440. * Generate server bandwidth message and send it to the server.
  441. */
  442. static int gen_server_bw(URLContext *s, RTMPContext *rt)
  443. {
  444. RTMPPacket pkt;
  445. uint8_t *p;
  446. int ret;
  447. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_SERVER_BW,
  448. 0, 4)) < 0)
  449. return ret;
  450. p = pkt.data;
  451. bytestream_put_be32(&p, rt->server_bw);
  452. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  453. rt->prev_pkt[1]);
  454. ff_rtmp_packet_destroy(&pkt);
  455. return ret;
  456. }
  457. /**
  458. * Generate check bandwidth message and send it to the server.
  459. */
  460. static int gen_check_bw(URLContext *s, RTMPContext *rt)
  461. {
  462. RTMPPacket pkt;
  463. uint8_t *p;
  464. int ret;
  465. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  466. 0, 21)) < 0)
  467. return ret;
  468. p = pkt.data;
  469. ff_amf_write_string(&p, "_checkbw");
  470. ff_amf_write_number(&p, ++rt->nb_invokes);
  471. ff_amf_write_null(&p);
  472. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  473. rt->prev_pkt[1]);
  474. ff_rtmp_packet_destroy(&pkt);
  475. return ret;
  476. }
  477. /**
  478. * Generate report on bytes read so far and send it to the server.
  479. */
  480. static int gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
  481. {
  482. RTMPPacket pkt;
  483. uint8_t *p;
  484. int ret;
  485. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ,
  486. ts, 4)) < 0)
  487. return ret;
  488. p = pkt.data;
  489. bytestream_put_be32(&p, rt->bytes_read);
  490. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  491. rt->prev_pkt[1]);
  492. ff_rtmp_packet_destroy(&pkt);
  493. return ret;
  494. }
  495. int ff_rtmp_calc_digest(const uint8_t *src, int len, int gap,
  496. const uint8_t *key, int keylen, uint8_t *dst)
  497. {
  498. struct AVSHA *sha;
  499. uint8_t hmac_buf[64+32] = {0};
  500. int i;
  501. sha = av_mallocz(av_sha_size);
  502. if (!sha)
  503. return AVERROR(ENOMEM);
  504. if (keylen < 64) {
  505. memcpy(hmac_buf, key, keylen);
  506. } else {
  507. av_sha_init(sha, 256);
  508. av_sha_update(sha,key, keylen);
  509. av_sha_final(sha, hmac_buf);
  510. }
  511. for (i = 0; i < 64; i++)
  512. hmac_buf[i] ^= HMAC_IPAD_VAL;
  513. av_sha_init(sha, 256);
  514. av_sha_update(sha, hmac_buf, 64);
  515. if (gap <= 0) {
  516. av_sha_update(sha, src, len);
  517. } else { //skip 32 bytes used for storing digest
  518. av_sha_update(sha, src, gap);
  519. av_sha_update(sha, src + gap + 32, len - gap - 32);
  520. }
  521. av_sha_final(sha, hmac_buf + 64);
  522. for (i = 0; i < 64; i++)
  523. hmac_buf[i] ^= HMAC_IPAD_VAL ^ HMAC_OPAD_VAL; //reuse XORed key for opad
  524. av_sha_init(sha, 256);
  525. av_sha_update(sha, hmac_buf, 64+32);
  526. av_sha_final(sha, dst);
  527. av_free(sha);
  528. return 0;
  529. }
  530. int ff_rtmp_calc_digest_pos(const uint8_t *buf, int off, int mod_val,
  531. int add_val)
  532. {
  533. int i, digest_pos = 0;
  534. for (i = 0; i < 4; i++)
  535. digest_pos += buf[i + off];
  536. digest_pos = digest_pos % mod_val + add_val;
  537. return digest_pos;
  538. }
  539. /**
  540. * Put HMAC-SHA2 digest of packet data (except for the bytes where this digest
  541. * will be stored) into that packet.
  542. *
  543. * @param buf handshake data (1536 bytes)
  544. * @return offset to the digest inside input data
  545. */
  546. static int rtmp_handshake_imprint_with_digest(uint8_t *buf)
  547. {
  548. int ret, digest_pos;
  549. digest_pos = ff_rtmp_calc_digest_pos(buf, 8, 728, 12);
  550. ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  551. rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
  552. buf + digest_pos);
  553. if (ret < 0)
  554. return ret;
  555. return digest_pos;
  556. }
  557. /**
  558. * Verify that the received server response has the expected digest value.
  559. *
  560. * @param buf handshake data received from the server (1536 bytes)
  561. * @param off position to search digest offset from
  562. * @return 0 if digest is valid, digest position otherwise
  563. */
  564. static int rtmp_validate_digest(uint8_t *buf, int off)
  565. {
  566. uint8_t digest[32];
  567. int ret, digest_pos;
  568. digest_pos = ff_rtmp_calc_digest_pos(buf, off, 728, off + 4);
  569. ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  570. rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
  571. digest);
  572. if (ret < 0)
  573. return ret;
  574. if (!memcmp(digest, buf + digest_pos, 32))
  575. return digest_pos;
  576. return 0;
  577. }
  578. /**
  579. * Perform handshake with the server by means of exchanging pseudorandom data
  580. * signed with HMAC-SHA2 digest.
  581. *
  582. * @return 0 if handshake succeeds, negative value otherwise
  583. */
  584. static int rtmp_handshake(URLContext *s, RTMPContext *rt)
  585. {
  586. AVLFG rnd;
  587. uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = {
  588. 3, // unencrypted data
  589. 0, 0, 0, 0, // client uptime
  590. RTMP_CLIENT_VER1,
  591. RTMP_CLIENT_VER2,
  592. RTMP_CLIENT_VER3,
  593. RTMP_CLIENT_VER4,
  594. };
  595. uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE];
  596. uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1];
  597. int i;
  598. int server_pos, client_pos;
  599. uint8_t digest[32];
  600. int ret;
  601. av_log(s, AV_LOG_DEBUG, "Handshaking...\n");
  602. av_lfg_init(&rnd, 0xDEADC0DE);
  603. // generate handshake packet - 1536 bytes of pseudorandom data
  604. for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
  605. tosend[i] = av_lfg_get(&rnd) >> 24;
  606. client_pos = rtmp_handshake_imprint_with_digest(tosend + 1);
  607. if (client_pos < 0)
  608. return client_pos;
  609. if ((ret = ffurl_write(rt->stream, tosend,
  610. RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
  611. av_log(s, AV_LOG_ERROR, "Cannot write RTMP handshake request\n");
  612. return ret;
  613. }
  614. if ((ret = ffurl_read_complete(rt->stream, serverdata,
  615. RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
  616. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  617. return ret;
  618. }
  619. if ((ret = ffurl_read_complete(rt->stream, clientdata,
  620. RTMP_HANDSHAKE_PACKET_SIZE)) < 0) {
  621. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  622. return ret;
  623. }
  624. av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
  625. serverdata[5], serverdata[6], serverdata[7], serverdata[8]);
  626. if (rt->is_input && serverdata[5] >= 3) {
  627. server_pos = rtmp_validate_digest(serverdata + 1, 772);
  628. if (server_pos < 0)
  629. return server_pos;
  630. if (!server_pos) {
  631. server_pos = rtmp_validate_digest(serverdata + 1, 8);
  632. if (server_pos < 0)
  633. return server_pos;
  634. if (!server_pos) {
  635. av_log(s, AV_LOG_ERROR, "Server response validating failed\n");
  636. return AVERROR(EIO);
  637. }
  638. }
  639. ret = ff_rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
  640. rtmp_server_key, sizeof(rtmp_server_key),
  641. digest);
  642. if (ret < 0)
  643. return ret;
  644. ret = ff_rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE - 32,
  645. 0, digest, 32, digest);
  646. if (ret < 0)
  647. return ret;
  648. if (memcmp(digest, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
  649. av_log(s, AV_LOG_ERROR, "Signature mismatch\n");
  650. return AVERROR(EIO);
  651. }
  652. for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
  653. tosend[i] = av_lfg_get(&rnd) >> 24;
  654. ret = ff_rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
  655. rtmp_player_key, sizeof(rtmp_player_key),
  656. digest);
  657. if (ret < 0)
  658. return ret;
  659. ret = ff_rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
  660. digest, 32,
  661. tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
  662. if (ret < 0)
  663. return ret;
  664. // write reply back to the server
  665. if ((ret = ffurl_write(rt->stream, tosend,
  666. RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
  667. return ret;
  668. } else {
  669. if ((ret = ffurl_write(rt->stream, serverdata + 1,
  670. RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
  671. return ret;
  672. }
  673. return 0;
  674. }
  675. /**
  676. * Parse received packet and possibly perform some action depending on
  677. * the packet contents.
  678. * @return 0 for no errors, negative values for serious errors which prevent
  679. * further communications, positive values for uncritical errors
  680. */
  681. static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
  682. {
  683. int i, t;
  684. const uint8_t *data_end = pkt->data + pkt->data_size;
  685. int ret;
  686. #ifdef DEBUG
  687. ff_rtmp_packet_dump(s, pkt);
  688. #endif
  689. switch (pkt->type) {
  690. case RTMP_PT_CHUNK_SIZE:
  691. if (pkt->data_size != 4) {
  692. av_log(s, AV_LOG_ERROR,
  693. "Chunk size change packet is not 4 bytes long (%d)\n", pkt->data_size);
  694. return -1;
  695. }
  696. if (!rt->is_input)
  697. if ((ret = ff_rtmp_packet_write(rt->stream, pkt, rt->chunk_size,
  698. rt->prev_pkt[1])) < 0)
  699. return ret;
  700. rt->chunk_size = AV_RB32(pkt->data);
  701. if (rt->chunk_size <= 0) {
  702. av_log(s, AV_LOG_ERROR, "Incorrect chunk size %d\n", rt->chunk_size);
  703. return -1;
  704. }
  705. av_log(s, AV_LOG_DEBUG, "New chunk size = %d\n", rt->chunk_size);
  706. break;
  707. case RTMP_PT_PING:
  708. t = AV_RB16(pkt->data);
  709. if (t == 6)
  710. if ((ret = gen_pong(s, rt, pkt)) < 0)
  711. return ret;
  712. break;
  713. case RTMP_PT_CLIENT_BW:
  714. if (pkt->data_size < 4) {
  715. av_log(s, AV_LOG_ERROR,
  716. "Client bandwidth report packet is less than 4 bytes long (%d)\n",
  717. pkt->data_size);
  718. return -1;
  719. }
  720. av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", AV_RB32(pkt->data));
  721. rt->client_report_size = AV_RB32(pkt->data) >> 1;
  722. break;
  723. case RTMP_PT_SERVER_BW:
  724. rt->server_bw = AV_RB32(pkt->data);
  725. if (rt->server_bw <= 0) {
  726. av_log(s, AV_LOG_ERROR, "Incorrect server bandwidth %d\n", rt->server_bw);
  727. return AVERROR(EINVAL);
  728. }
  729. av_log(s, AV_LOG_DEBUG, "Server bandwidth = %d\n", rt->server_bw);
  730. break;
  731. case RTMP_PT_INVOKE:
  732. //TODO: check for the messages sent for wrong state?
  733. if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
  734. uint8_t tmpstr[256];
  735. if (!ff_amf_get_field_value(pkt->data + 9, data_end,
  736. "description", tmpstr, sizeof(tmpstr)))
  737. av_log(s, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  738. return -1;
  739. } else if (!memcmp(pkt->data, "\002\000\007_result", 10)) {
  740. switch (rt->state) {
  741. case STATE_HANDSHAKED:
  742. if (!rt->is_input) {
  743. if ((ret = gen_release_stream(s, rt)) < 0)
  744. return ret;
  745. if ((ret = gen_fcpublish_stream(s, rt)) < 0)
  746. return ret;
  747. rt->state = STATE_RELEASING;
  748. } else {
  749. if ((ret = gen_server_bw(s, rt)) < 0)
  750. return ret;
  751. rt->state = STATE_CONNECTING;
  752. }
  753. if ((ret = gen_create_stream(s, rt)) < 0)
  754. return ret;
  755. break;
  756. case STATE_FCPUBLISH:
  757. rt->state = STATE_CONNECTING;
  758. break;
  759. case STATE_RELEASING:
  760. rt->state = STATE_FCPUBLISH;
  761. /* hack for Wowza Media Server, it does not send result for
  762. * releaseStream and FCPublish calls */
  763. if (!pkt->data[10]) {
  764. int pkt_id = av_int2double(AV_RB64(pkt->data + 11));
  765. if (pkt_id == rt->create_stream_invoke)
  766. rt->state = STATE_CONNECTING;
  767. }
  768. if (rt->state != STATE_CONNECTING)
  769. break;
  770. case STATE_CONNECTING:
  771. //extract a number from the result
  772. if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
  773. av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
  774. } else {
  775. rt->main_channel_id = av_int2double(AV_RB64(pkt->data + 21));
  776. }
  777. if (rt->is_input) {
  778. if ((ret = gen_play(s, rt)) < 0)
  779. return ret;
  780. if ((ret = gen_buffer_time(s, rt)) < 0)
  781. return ret;
  782. } else {
  783. if ((ret = gen_publish(s, rt)) < 0)
  784. return ret;
  785. }
  786. rt->state = STATE_READY;
  787. break;
  788. }
  789. } else if (!memcmp(pkt->data, "\002\000\010onStatus", 11)) {
  790. const uint8_t* ptr = pkt->data + 11;
  791. uint8_t tmpstr[256];
  792. for (i = 0; i < 2; i++) {
  793. t = ff_amf_tag_size(ptr, data_end);
  794. if (t < 0)
  795. return 1;
  796. ptr += t;
  797. }
  798. t = ff_amf_get_field_value(ptr, data_end,
  799. "level", tmpstr, sizeof(tmpstr));
  800. if (!t && !strcmp(tmpstr, "error")) {
  801. if (!ff_amf_get_field_value(ptr, data_end,
  802. "description", tmpstr, sizeof(tmpstr)))
  803. av_log(s, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  804. return -1;
  805. }
  806. t = ff_amf_get_field_value(ptr, data_end,
  807. "code", tmpstr, sizeof(tmpstr));
  808. if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING;
  809. if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
  810. if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
  811. if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
  812. } else if (!memcmp(pkt->data, "\002\000\010onBWDone", 11)) {
  813. if ((ret = gen_check_bw(s, rt)) < 0)
  814. return ret;
  815. }
  816. break;
  817. case RTMP_PT_VIDEO:
  818. case RTMP_PT_AUDIO:
  819. /* Audio and Video packets are parsed in get_packet() */
  820. break;
  821. default:
  822. av_log(s, AV_LOG_VERBOSE, "Unknown packet type received 0x%02X\n", pkt->type);
  823. break;
  824. }
  825. return 0;
  826. }
  827. /**
  828. * Interact with the server by receiving and sending RTMP packets until
  829. * there is some significant data (media data or expected status notification).
  830. *
  831. * @param s reading context
  832. * @param for_header non-zero value tells function to work until it
  833. * gets notification from the server that playing has been started,
  834. * otherwise function will work until some media data is received (or
  835. * an error happens)
  836. * @return 0 for successful operation, negative value in case of error
  837. */
  838. static int get_packet(URLContext *s, int for_header)
  839. {
  840. RTMPContext *rt = s->priv_data;
  841. int ret;
  842. uint8_t *p;
  843. const uint8_t *next;
  844. uint32_t data_size;
  845. uint32_t ts, cts, pts=0;
  846. if (rt->state == STATE_STOPPED)
  847. return AVERROR_EOF;
  848. for (;;) {
  849. RTMPPacket rpkt = { 0 };
  850. if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
  851. rt->chunk_size, rt->prev_pkt[0])) <= 0) {
  852. if (ret == 0) {
  853. return AVERROR(EAGAIN);
  854. } else {
  855. return AVERROR(EIO);
  856. }
  857. }
  858. rt->bytes_read += ret;
  859. if (rt->bytes_read > rt->last_bytes_read + rt->client_report_size) {
  860. av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
  861. if ((ret = gen_bytes_read(s, rt, rpkt.timestamp + 1)) < 0)
  862. return ret;
  863. rt->last_bytes_read = rt->bytes_read;
  864. }
  865. ret = rtmp_parse_result(s, rt, &rpkt);
  866. if (ret < 0) {//serious error in current packet
  867. ff_rtmp_packet_destroy(&rpkt);
  868. return ret;
  869. }
  870. if (rt->state == STATE_STOPPED) {
  871. ff_rtmp_packet_destroy(&rpkt);
  872. return AVERROR_EOF;
  873. }
  874. if (for_header && (rt->state == STATE_PLAYING || rt->state == STATE_PUBLISHING)) {
  875. ff_rtmp_packet_destroy(&rpkt);
  876. return 0;
  877. }
  878. if (!rpkt.data_size || !rt->is_input) {
  879. ff_rtmp_packet_destroy(&rpkt);
  880. continue;
  881. }
  882. if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO ||
  883. (rpkt.type == RTMP_PT_NOTIFY && !memcmp("\002\000\012onMetaData", rpkt.data, 13))) {
  884. ts = rpkt.timestamp;
  885. // generate packet header and put data into buffer for FLV demuxer
  886. rt->flv_off = 0;
  887. rt->flv_size = rpkt.data_size + 15;
  888. rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size);
  889. bytestream_put_byte(&p, rpkt.type);
  890. bytestream_put_be24(&p, rpkt.data_size);
  891. bytestream_put_be24(&p, ts);
  892. bytestream_put_byte(&p, ts >> 24);
  893. bytestream_put_be24(&p, 0);
  894. bytestream_put_buffer(&p, rpkt.data, rpkt.data_size);
  895. bytestream_put_be32(&p, 0);
  896. ff_rtmp_packet_destroy(&rpkt);
  897. return 0;
  898. } else if (rpkt.type == RTMP_PT_METADATA) {
  899. // we got raw FLV data, make it available for FLV demuxer
  900. rt->flv_off = 0;
  901. rt->flv_size = rpkt.data_size;
  902. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  903. /* rewrite timestamps */
  904. next = rpkt.data;
  905. ts = rpkt.timestamp;
  906. while (next - rpkt.data < rpkt.data_size - 11) {
  907. next++;
  908. data_size = bytestream_get_be24(&next);
  909. p=next;
  910. cts = bytestream_get_be24(&next);
  911. cts |= bytestream_get_byte(&next) << 24;
  912. if (pts==0)
  913. pts=cts;
  914. ts += cts - pts;
  915. pts = cts;
  916. bytestream_put_be24(&p, ts);
  917. bytestream_put_byte(&p, ts >> 24);
  918. next += data_size + 3 + 4;
  919. }
  920. memcpy(rt->flv_data, rpkt.data, rpkt.data_size);
  921. ff_rtmp_packet_destroy(&rpkt);
  922. return 0;
  923. }
  924. ff_rtmp_packet_destroy(&rpkt);
  925. }
  926. }
  927. static int rtmp_close(URLContext *h)
  928. {
  929. RTMPContext *rt = h->priv_data;
  930. int ret = 0;
  931. if (!rt->is_input) {
  932. rt->flv_data = NULL;
  933. if (rt->out_pkt.data_size)
  934. ff_rtmp_packet_destroy(&rt->out_pkt);
  935. if (rt->state > STATE_FCPUBLISH)
  936. ret = gen_fcunpublish_stream(h, rt);
  937. }
  938. if (rt->state > STATE_HANDSHAKED)
  939. ret = gen_delete_stream(h, rt);
  940. av_freep(&rt->flv_data);
  941. ffurl_close(rt->stream);
  942. return ret;
  943. }
  944. /**
  945. * Open RTMP connection and verify that the stream can be played.
  946. *
  947. * URL syntax: rtmp://server[:port][/app][/playpath]
  948. * where 'app' is first one or two directories in the path
  949. * (e.g. /ondemand/, /flash/live/, etc.)
  950. * and 'playpath' is a file name (the rest of the path,
  951. * may be prefixed with "mp4:")
  952. */
  953. static int rtmp_open(URLContext *s, const char *uri, int flags)
  954. {
  955. RTMPContext *rt = s->priv_data;
  956. char proto[8], hostname[256], path[1024], *fname;
  957. char *old_app;
  958. uint8_t buf[2048];
  959. int port;
  960. AVDictionary *opts = NULL;
  961. int ret;
  962. rt->is_input = !(flags & AVIO_FLAG_WRITE);
  963. av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
  964. path, sizeof(path), s->filename);
  965. if (!strcmp(proto, "rtmpt") || !strcmp(proto, "rtmpts")) {
  966. if (!strcmp(proto, "rtmpts"))
  967. av_dict_set(&opts, "ffrtmphttp_tls", "1", 1);
  968. /* open the http tunneling connection */
  969. ff_url_join(buf, sizeof(buf), "ffrtmphttp", NULL, hostname, port, NULL);
  970. } else if (!strcmp(proto, "rtmps")) {
  971. /* open the tls connection */
  972. if (port < 0)
  973. port = RTMPS_DEFAULT_PORT;
  974. ff_url_join(buf, sizeof(buf), "tls", NULL, hostname, port, NULL);
  975. } else {
  976. /* open the tcp connection */
  977. if (port < 0)
  978. port = RTMP_DEFAULT_PORT;
  979. ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
  980. }
  981. if ((ret = ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
  982. &s->interrupt_callback, &opts)) < 0) {
  983. av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf);
  984. goto fail;
  985. }
  986. rt->state = STATE_START;
  987. if ((ret = rtmp_handshake(s, rt)) < 0)
  988. goto fail;
  989. rt->chunk_size = 128;
  990. rt->state = STATE_HANDSHAKED;
  991. // Keep the application name when it has been defined by the user.
  992. old_app = rt->app;
  993. rt->app = av_malloc(APP_MAX_LENGTH);
  994. if (!rt->app) {
  995. ret = AVERROR(ENOMEM);
  996. goto fail;
  997. }
  998. //extract "app" part from path
  999. if (!strncmp(path, "/ondemand/", 10)) {
  1000. fname = path + 10;
  1001. memcpy(rt->app, "ondemand", 9);
  1002. } else {
  1003. char *next = *path ? path + 1 : path;
  1004. char *p = strchr(next, '/');
  1005. if (!p) {
  1006. fname = next;
  1007. rt->app[0] = '\0';
  1008. } else {
  1009. // make sure we do not mismatch a playpath for an application instance
  1010. char *c = strchr(p + 1, ':');
  1011. fname = strchr(p + 1, '/');
  1012. if (!fname || (c && c < fname)) {
  1013. fname = p + 1;
  1014. av_strlcpy(rt->app, path + 1, p - path);
  1015. } else {
  1016. fname++;
  1017. av_strlcpy(rt->app, path + 1, fname - path - 1);
  1018. }
  1019. }
  1020. }
  1021. if (old_app) {
  1022. // The name of application has been defined by the user, override it.
  1023. av_free(rt->app);
  1024. rt->app = old_app;
  1025. }
  1026. if (!rt->playpath) {
  1027. int len = strlen(fname);
  1028. rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
  1029. if (!rt->playpath) {
  1030. ret = AVERROR(ENOMEM);
  1031. goto fail;
  1032. }
  1033. if (!strchr(fname, ':') && len >= 4 &&
  1034. (!strcmp(fname + len - 4, ".f4v") ||
  1035. !strcmp(fname + len - 4, ".mp4"))) {
  1036. memcpy(rt->playpath, "mp4:", 5);
  1037. } else if (len >= 4 && !strcmp(fname + len - 4, ".flv")) {
  1038. fname[len - 4] = '\0';
  1039. } else {
  1040. rt->playpath[0] = 0;
  1041. }
  1042. strncat(rt->playpath, fname, PLAYPATH_MAX_LENGTH - 5);
  1043. }
  1044. if (!rt->tcurl) {
  1045. rt->tcurl = av_malloc(TCURL_MAX_LENGTH);
  1046. if (!rt->tcurl) {
  1047. ret = AVERROR(ENOMEM);
  1048. goto fail;
  1049. }
  1050. ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname,
  1051. port, "/%s", rt->app);
  1052. }
  1053. if (!rt->flashver) {
  1054. rt->flashver = av_malloc(FLASHVER_MAX_LENGTH);
  1055. if (!rt->flashver) {
  1056. ret = AVERROR(ENOMEM);
  1057. goto fail;
  1058. }
  1059. if (rt->is_input) {
  1060. snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d",
  1061. RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2,
  1062. RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
  1063. } else {
  1064. snprintf(rt->flashver, FLASHVER_MAX_LENGTH,
  1065. "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
  1066. }
  1067. }
  1068. rt->client_report_size = 1048576;
  1069. rt->bytes_read = 0;
  1070. rt->last_bytes_read = 0;
  1071. rt->server_bw = 2500000;
  1072. av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
  1073. proto, path, rt->app, rt->playpath);
  1074. if ((ret = gen_connect(s, rt)) < 0)
  1075. goto fail;
  1076. do {
  1077. ret = get_packet(s, 1);
  1078. } while (ret == EAGAIN);
  1079. if (ret < 0)
  1080. goto fail;
  1081. if (rt->is_input) {
  1082. // generate FLV header for demuxer
  1083. rt->flv_size = 13;
  1084. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  1085. rt->flv_off = 0;
  1086. memcpy(rt->flv_data, "FLV\1\5\0\0\0\011\0\0\0\0", rt->flv_size);
  1087. } else {
  1088. rt->flv_size = 0;
  1089. rt->flv_data = NULL;
  1090. rt->flv_off = 0;
  1091. rt->skip_bytes = 13;
  1092. }
  1093. s->max_packet_size = rt->stream->max_packet_size;
  1094. s->is_streamed = 1;
  1095. return 0;
  1096. fail:
  1097. av_dict_free(&opts);
  1098. rtmp_close(s);
  1099. return ret;
  1100. }
  1101. static int rtmp_read(URLContext *s, uint8_t *buf, int size)
  1102. {
  1103. RTMPContext *rt = s->priv_data;
  1104. int orig_size = size;
  1105. int ret;
  1106. while (size > 0) {
  1107. int data_left = rt->flv_size - rt->flv_off;
  1108. if (data_left >= size) {
  1109. memcpy(buf, rt->flv_data + rt->flv_off, size);
  1110. rt->flv_off += size;
  1111. return orig_size;
  1112. }
  1113. if (data_left > 0) {
  1114. memcpy(buf, rt->flv_data + rt->flv_off, data_left);
  1115. buf += data_left;
  1116. size -= data_left;
  1117. rt->flv_off = rt->flv_size;
  1118. return data_left;
  1119. }
  1120. if ((ret = get_packet(s, 0)) < 0)
  1121. return ret;
  1122. }
  1123. return orig_size;
  1124. }
  1125. static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
  1126. {
  1127. RTMPContext *rt = s->priv_data;
  1128. int size_temp = size;
  1129. int pktsize, pkttype;
  1130. uint32_t ts;
  1131. const uint8_t *buf_temp = buf;
  1132. uint8_t c;
  1133. int ret;
  1134. do {
  1135. if (rt->skip_bytes) {
  1136. int skip = FFMIN(rt->skip_bytes, size_temp);
  1137. buf_temp += skip;
  1138. size_temp -= skip;
  1139. rt->skip_bytes -= skip;
  1140. continue;
  1141. }
  1142. if (rt->flv_header_bytes < 11) {
  1143. const uint8_t *header = rt->flv_header;
  1144. int copy = FFMIN(11 - rt->flv_header_bytes, size_temp);
  1145. bytestream_get_buffer(&buf_temp, rt->flv_header + rt->flv_header_bytes, copy);
  1146. rt->flv_header_bytes += copy;
  1147. size_temp -= copy;
  1148. if (rt->flv_header_bytes < 11)
  1149. break;
  1150. pkttype = bytestream_get_byte(&header);
  1151. pktsize = bytestream_get_be24(&header);
  1152. ts = bytestream_get_be24(&header);
  1153. ts |= bytestream_get_byte(&header) << 24;
  1154. bytestream_get_be24(&header);
  1155. rt->flv_size = pktsize;
  1156. //force 12bytes header
  1157. if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
  1158. pkttype == RTMP_PT_NOTIFY) {
  1159. if (pkttype == RTMP_PT_NOTIFY)
  1160. pktsize += 16;
  1161. rt->prev_pkt[1][RTMP_SOURCE_CHANNEL].channel_id = 0;
  1162. }
  1163. //this can be a big packet, it's better to send it right here
  1164. if ((ret = ff_rtmp_packet_create(&rt->out_pkt, RTMP_SOURCE_CHANNEL,
  1165. pkttype, ts, pktsize)) < 0)
  1166. return ret;
  1167. rt->out_pkt.extra = rt->main_channel_id;
  1168. rt->flv_data = rt->out_pkt.data;
  1169. if (pkttype == RTMP_PT_NOTIFY)
  1170. ff_amf_write_string(&rt->flv_data, "@setDataFrame");
  1171. }
  1172. if (rt->flv_size - rt->flv_off > size_temp) {
  1173. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, size_temp);
  1174. rt->flv_off += size_temp;
  1175. size_temp = 0;
  1176. } else {
  1177. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, rt->flv_size - rt->flv_off);
  1178. size_temp -= rt->flv_size - rt->flv_off;
  1179. rt->flv_off += rt->flv_size - rt->flv_off;
  1180. }
  1181. if (rt->flv_off == rt->flv_size) {
  1182. rt->skip_bytes = 4;
  1183. if ((ret = ff_rtmp_packet_write(rt->stream, &rt->out_pkt,
  1184. rt->chunk_size, rt->prev_pkt[1])) < 0)
  1185. return ret;
  1186. ff_rtmp_packet_destroy(&rt->out_pkt);
  1187. rt->flv_size = 0;
  1188. rt->flv_off = 0;
  1189. rt->flv_header_bytes = 0;
  1190. rt->flv_nb_packets++;
  1191. }
  1192. } while (buf_temp - buf < size);
  1193. if (rt->flv_nb_packets < rt->flush_interval)
  1194. return size;
  1195. rt->flv_nb_packets = 0;
  1196. /* set stream into nonblocking mode */
  1197. rt->stream->flags |= AVIO_FLAG_NONBLOCK;
  1198. /* try to read one byte from the stream */
  1199. ret = ffurl_read(rt->stream, &c, 1);
  1200. /* switch the stream back into blocking mode */
  1201. rt->stream->flags &= ~AVIO_FLAG_NONBLOCK;
  1202. if (ret == AVERROR(EAGAIN)) {
  1203. /* no incoming data to handle */
  1204. return size;
  1205. } else if (ret < 0) {
  1206. return ret;
  1207. } else if (ret == 1) {
  1208. RTMPPacket rpkt = { 0 };
  1209. if ((ret = ff_rtmp_packet_read_internal(rt->stream, &rpkt,
  1210. rt->chunk_size,
  1211. rt->prev_pkt[0], c)) <= 0)
  1212. return ret;
  1213. if ((ret = rtmp_parse_result(s, rt, &rpkt)) < 0)
  1214. return ret;
  1215. ff_rtmp_packet_destroy(&rpkt);
  1216. }
  1217. return size;
  1218. }
  1219. #define OFFSET(x) offsetof(RTMPContext, x)
  1220. #define DEC AV_OPT_FLAG_DECODING_PARAM
  1221. #define ENC AV_OPT_FLAG_ENCODING_PARAM
  1222. static const AVOption rtmp_options[] = {
  1223. {"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1224. {"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {3000}, 0, INT_MAX, DEC|ENC},
  1225. {"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1226. {"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1227. {"rtmp_flush_interval", "Number of packets flushed in the same request (RTMPT only).", OFFSET(flush_interval), AV_OPT_TYPE_INT, {10}, 0, INT_MAX, ENC},
  1228. {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
  1229. {"any", "both", 0, AV_OPT_TYPE_CONST, {-2}, 0, 0, DEC, "rtmp_live"},
  1230. {"live", "live stream", 0, AV_OPT_TYPE_CONST, {-1}, 0, 0, DEC, "rtmp_live"},
  1231. {"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {0}, 0, 0, DEC, "rtmp_live"},
  1232. {"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1233. {"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1234. {"rtmp_tcurl", "URL of the target stream. Defaults to rtmp://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1235. { NULL },
  1236. };
  1237. static const AVClass rtmp_class = {
  1238. .class_name = "rtmp",
  1239. .item_name = av_default_item_name,
  1240. .option = rtmp_options,
  1241. .version = LIBAVUTIL_VERSION_INT,
  1242. };
  1243. URLProtocol ff_rtmp_protocol = {
  1244. .name = "rtmp",
  1245. .url_open = rtmp_open,
  1246. .url_read = rtmp_read,
  1247. .url_write = rtmp_write,
  1248. .url_close = rtmp_close,
  1249. .priv_data_size = sizeof(RTMPContext),
  1250. .flags = URL_PROTOCOL_FLAG_NETWORK,
  1251. .priv_data_class= &rtmp_class,
  1252. };
  1253. static const AVClass rtmps_class = {
  1254. .class_name = "rtmps",
  1255. .item_name = av_default_item_name,
  1256. .option = rtmp_options,
  1257. .version = LIBAVUTIL_VERSION_INT,
  1258. };
  1259. URLProtocol ff_rtmps_protocol = {
  1260. .name = "rtmps",
  1261. .url_open = rtmp_open,
  1262. .url_read = rtmp_read,
  1263. .url_write = rtmp_write,
  1264. .url_close = rtmp_close,
  1265. .priv_data_size = sizeof(RTMPContext),
  1266. .flags = URL_PROTOCOL_FLAG_NETWORK,
  1267. .priv_data_class = &rtmps_class,
  1268. };
  1269. static const AVClass rtmpt_class = {
  1270. .class_name = "rtmpt",
  1271. .item_name = av_default_item_name,
  1272. .option = rtmp_options,
  1273. .version = LIBAVUTIL_VERSION_INT,
  1274. };
  1275. URLProtocol ff_rtmpt_protocol = {
  1276. .name = "rtmpt",
  1277. .url_open = rtmp_open,
  1278. .url_read = rtmp_read,
  1279. .url_write = rtmp_write,
  1280. .url_close = rtmp_close,
  1281. .priv_data_size = sizeof(RTMPContext),
  1282. .flags = URL_PROTOCOL_FLAG_NETWORK,
  1283. .priv_data_class = &rtmpt_class,
  1284. };
  1285. static const AVClass rtmpts_class = {
  1286. .class_name = "rtmpts",
  1287. .item_name = av_default_item_name,
  1288. .option = rtmp_options,
  1289. .version = LIBAVUTIL_VERSION_INT,
  1290. };
  1291. URLProtocol ff_rtmpts_protocol = {
  1292. .name = "rtmpts",
  1293. .url_open = rtmp_open,
  1294. .url_read = rtmp_read,
  1295. .url_write = rtmp_write,
  1296. .url_close = rtmp_close,
  1297. .priv_data_size = sizeof(RTMPContext),
  1298. .flags = URL_PROTOCOL_FLAG_NETWORK,
  1299. .priv_data_class = &rtmpts_class,
  1300. };