You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

934 lines
31KB

  1. /*
  2. * RTMP network protocol
  3. * Copyright (c) 2009 Kostya Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavformat/rtmpproto.c
  23. * RTMP protocol
  24. */
  25. #include "libavcodec/bytestream.h"
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/lfg.h"
  28. #include "libavutil/sha.h"
  29. #include "avformat.h"
  30. #include "network.h"
  31. #include "flv.h"
  32. #include "rtmp.h"
  33. #include "rtmppkt.h"
  34. /* we can't use av_log() with URLContext yet... */
  35. #if LIBAVFORMAT_VERSION_MAJOR < 53
  36. #define LOG_CONTEXT NULL
  37. #else
  38. #define LOG_CONTEXT s
  39. #endif
  40. /** RTMP protocol handler state */
  41. typedef enum {
  42. STATE_START, ///< client has not done anything yet
  43. STATE_HANDSHAKED, ///< client has performed handshake
  44. STATE_RELEASING, ///< client releasing stream before publish it (for output)
  45. STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
  46. STATE_CONNECTING, ///< client connected to server successfully
  47. STATE_READY, ///< client has sent all needed commands and waits for server reply
  48. STATE_PLAYING, ///< client has started receiving multimedia data from server
  49. STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
  50. STATE_STOPPED, ///< the broadcast has been stopped
  51. } ClientState;
  52. /** protocol handler context */
  53. typedef struct RTMPContext {
  54. URLContext* stream; ///< TCP stream used in interactions with RTMP server
  55. RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets
  56. int chunk_size; ///< size of the chunks RTMP packets are divided into
  57. int is_input; ///< input/output flag
  58. char playpath[256]; ///< path to filename to play (with possible "mp4:" prefix)
  59. char app[128]; ///< application
  60. ClientState state; ///< current state
  61. int main_channel_id; ///< an additional channel ID which is used for some invocations
  62. uint8_t* flv_data; ///< buffer with data for demuxer
  63. int flv_size; ///< current buffer size
  64. int flv_off; ///< number of bytes read from current buffer
  65. RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output)
  66. } RTMPContext;
  67. #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
  68. /** Client key used for digest signing */
  69. static const uint8_t rtmp_player_key[] = {
  70. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  71. 'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1',
  72. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  73. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  74. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  75. };
  76. #define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing
  77. /** Key used for RTMP server digest signing */
  78. static const uint8_t rtmp_server_key[] = {
  79. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  80. 'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ',
  81. 'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1',
  82. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  83. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  84. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  85. };
  86. /**
  87. * Generates 'connect' call and sends it to the server.
  88. */
  89. static void gen_connect(URLContext *s, RTMPContext *rt, const char *proto,
  90. const char *host, int port)
  91. {
  92. RTMPPacket pkt;
  93. uint8_t ver[64], *p;
  94. char tcurl[512];
  95. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 4096);
  96. p = pkt.data;
  97. snprintf(tcurl, sizeof(tcurl), "%s://%s:%d/%s", proto, host, port, rt->app);
  98. ff_amf_write_string(&p, "connect");
  99. ff_amf_write_number(&p, 1.0);
  100. ff_amf_write_object_start(&p);
  101. ff_amf_write_field_name(&p, "app");
  102. ff_amf_write_string(&p, rt->app);
  103. if (rt->is_input) {
  104. snprintf(ver, sizeof(ver), "%s %d,%d,%d,%d", RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1,
  105. RTMP_CLIENT_VER2, RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
  106. } else {
  107. snprintf(ver, sizeof(ver), "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
  108. ff_amf_write_field_name(&p, "type");
  109. ff_amf_write_string(&p, "nonprivate");
  110. }
  111. ff_amf_write_field_name(&p, "flashVer");
  112. ff_amf_write_string(&p, ver);
  113. ff_amf_write_field_name(&p, "tcUrl");
  114. ff_amf_write_string(&p, tcurl);
  115. if (rt->is_input) {
  116. ff_amf_write_field_name(&p, "fpad");
  117. ff_amf_write_bool(&p, 0);
  118. ff_amf_write_field_name(&p, "capabilities");
  119. ff_amf_write_number(&p, 15.0);
  120. ff_amf_write_field_name(&p, "audioCodecs");
  121. ff_amf_write_number(&p, 1639.0);
  122. ff_amf_write_field_name(&p, "videoCodecs");
  123. ff_amf_write_number(&p, 252.0);
  124. ff_amf_write_field_name(&p, "videoFunction");
  125. ff_amf_write_number(&p, 1.0);
  126. }
  127. ff_amf_write_object_end(&p);
  128. pkt.data_size = p - pkt.data;
  129. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  130. ff_rtmp_packet_destroy(&pkt);
  131. }
  132. /**
  133. * Generates 'releaseStream' call and sends it to the server. It should make
  134. * the server release some channel for media streams.
  135. */
  136. static void gen_release_stream(URLContext *s, RTMPContext *rt)
  137. {
  138. RTMPPacket pkt;
  139. uint8_t *p;
  140. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  141. 29 + strlen(rt->playpath));
  142. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Releasing stream...\n");
  143. p = pkt.data;
  144. ff_amf_write_string(&p, "releaseStream");
  145. ff_amf_write_number(&p, 2.0);
  146. ff_amf_write_null(&p);
  147. ff_amf_write_string(&p, rt->playpath);
  148. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  149. ff_rtmp_packet_destroy(&pkt);
  150. }
  151. /**
  152. * Generates 'FCPublish' call and sends it to the server. It should make
  153. * the server preapare for receiving media streams.
  154. */
  155. static void gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
  156. {
  157. RTMPPacket pkt;
  158. uint8_t *p;
  159. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  160. 25 + strlen(rt->playpath));
  161. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "FCPublish stream...\n");
  162. p = pkt.data;
  163. ff_amf_write_string(&p, "FCPublish");
  164. ff_amf_write_number(&p, 3.0);
  165. ff_amf_write_null(&p);
  166. ff_amf_write_string(&p, rt->playpath);
  167. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  168. ff_rtmp_packet_destroy(&pkt);
  169. }
  170. /**
  171. * Generates 'FCUnpublish' call and sends it to the server. It should make
  172. * the server destroy stream.
  173. */
  174. static void gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
  175. {
  176. RTMPPacket pkt;
  177. uint8_t *p;
  178. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  179. 27 + strlen(rt->playpath));
  180. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "UnPublishing stream...\n");
  181. p = pkt.data;
  182. ff_amf_write_string(&p, "FCUnpublish");
  183. ff_amf_write_number(&p, 5.0);
  184. ff_amf_write_null(&p);
  185. ff_amf_write_string(&p, rt->playpath);
  186. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  187. ff_rtmp_packet_destroy(&pkt);
  188. }
  189. /**
  190. * Generates 'createStream' call and sends it to the server. It should make
  191. * the server allocate some channel for media streams.
  192. */
  193. static void gen_create_stream(URLContext *s, RTMPContext *rt)
  194. {
  195. RTMPPacket pkt;
  196. uint8_t *p;
  197. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Creating stream...\n");
  198. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 25);
  199. p = pkt.data;
  200. ff_amf_write_string(&p, "createStream");
  201. ff_amf_write_number(&p, rt->is_input ? 3.0 : 4.0);
  202. ff_amf_write_null(&p);
  203. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  204. ff_rtmp_packet_destroy(&pkt);
  205. }
  206. /**
  207. * Generates 'deleteStream' call and sends it to the server. It should make
  208. * the server remove some channel for media streams.
  209. */
  210. static void gen_delete_stream(URLContext *s, RTMPContext *rt)
  211. {
  212. RTMPPacket pkt;
  213. uint8_t *p;
  214. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Deleting stream...\n");
  215. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 34);
  216. p = pkt.data;
  217. ff_amf_write_string(&p, "deleteStream");
  218. ff_amf_write_number(&p, 0.0);
  219. ff_amf_write_null(&p);
  220. ff_amf_write_number(&p, rt->main_channel_id);
  221. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  222. ff_rtmp_packet_destroy(&pkt);
  223. }
  224. /**
  225. * Generates 'play' call and sends it to the server, then pings the server
  226. * to start actual playing.
  227. */
  228. static void gen_play(URLContext *s, RTMPContext *rt)
  229. {
  230. RTMPPacket pkt;
  231. uint8_t *p;
  232. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
  233. ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE, 0,
  234. 20 + strlen(rt->playpath));
  235. pkt.extra = rt->main_channel_id;
  236. p = pkt.data;
  237. ff_amf_write_string(&p, "play");
  238. ff_amf_write_number(&p, 0.0);
  239. ff_amf_write_null(&p);
  240. ff_amf_write_string(&p, rt->playpath);
  241. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  242. ff_rtmp_packet_destroy(&pkt);
  243. // set client buffer time disguised in ping packet
  244. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, 1, 10);
  245. p = pkt.data;
  246. bytestream_put_be16(&p, 3);
  247. bytestream_put_be32(&p, 1);
  248. bytestream_put_be32(&p, 256); //TODO: what is a good value here?
  249. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  250. ff_rtmp_packet_destroy(&pkt);
  251. }
  252. /**
  253. * Generates 'publish' call and sends it to the server.
  254. */
  255. static void gen_publish(URLContext *s, RTMPContext *rt)
  256. {
  257. RTMPPacket pkt;
  258. uint8_t *p;
  259. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
  260. ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE, 0,
  261. 30 + strlen(rt->playpath));
  262. pkt.extra = rt->main_channel_id;
  263. p = pkt.data;
  264. ff_amf_write_string(&p, "publish");
  265. ff_amf_write_number(&p, 0.0);
  266. ff_amf_write_null(&p);
  267. ff_amf_write_string(&p, rt->playpath);
  268. ff_amf_write_string(&p, "live");
  269. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  270. ff_rtmp_packet_destroy(&pkt);
  271. }
  272. /**
  273. * Generates ping reply and sends it to the server.
  274. */
  275. static void gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
  276. {
  277. RTMPPacket pkt;
  278. uint8_t *p;
  279. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, ppkt->timestamp + 1, 6);
  280. p = pkt.data;
  281. bytestream_put_be16(&p, 7);
  282. bytestream_put_be32(&p, AV_RB32(ppkt->data+2) + 1);
  283. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  284. ff_rtmp_packet_destroy(&pkt);
  285. }
  286. //TODO: Move HMAC code somewhere. Eventually.
  287. #define HMAC_IPAD_VAL 0x36
  288. #define HMAC_OPAD_VAL 0x5C
  289. /**
  290. * Calculates HMAC-SHA2 digest for RTMP handshake packets.
  291. *
  292. * @param src input buffer
  293. * @param len input buffer length (should be 1536)
  294. * @param gap offset in buffer where 32 bytes should not be taken into account
  295. * when calculating digest (since it will be used to store that digest)
  296. * @param key digest key
  297. * @param keylen digest key length
  298. * @param dst buffer where calculated digest will be stored (32 bytes)
  299. */
  300. static void rtmp_calc_digest(const uint8_t *src, int len, int gap,
  301. const uint8_t *key, int keylen, uint8_t *dst)
  302. {
  303. struct AVSHA *sha;
  304. uint8_t hmac_buf[64+32] = {0};
  305. int i;
  306. sha = av_mallocz(av_sha_size);
  307. if (keylen < 64) {
  308. memcpy(hmac_buf, key, keylen);
  309. } else {
  310. av_sha_init(sha, 256);
  311. av_sha_update(sha,key, keylen);
  312. av_sha_final(sha, hmac_buf);
  313. }
  314. for (i = 0; i < 64; i++)
  315. hmac_buf[i] ^= HMAC_IPAD_VAL;
  316. av_sha_init(sha, 256);
  317. av_sha_update(sha, hmac_buf, 64);
  318. if (gap <= 0) {
  319. av_sha_update(sha, src, len);
  320. } else { //skip 32 bytes used for storing digest
  321. av_sha_update(sha, src, gap);
  322. av_sha_update(sha, src + gap + 32, len - gap - 32);
  323. }
  324. av_sha_final(sha, hmac_buf + 64);
  325. for (i = 0; i < 64; i++)
  326. hmac_buf[i] ^= HMAC_IPAD_VAL ^ HMAC_OPAD_VAL; //reuse XORed key for opad
  327. av_sha_init(sha, 256);
  328. av_sha_update(sha, hmac_buf, 64+32);
  329. av_sha_final(sha, dst);
  330. av_free(sha);
  331. }
  332. /**
  333. * Puts HMAC-SHA2 digest of packet data (except for the bytes where this digest
  334. * will be stored) into that packet.
  335. *
  336. * @param buf handshake data (1536 bytes)
  337. * @return offset to the digest inside input data
  338. */
  339. static int rtmp_handshake_imprint_with_digest(uint8_t *buf)
  340. {
  341. int i, digest_pos = 0;
  342. for (i = 8; i < 12; i++)
  343. digest_pos += buf[i];
  344. digest_pos = (digest_pos % 728) + 12;
  345. rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  346. rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
  347. buf + digest_pos);
  348. return digest_pos;
  349. }
  350. /**
  351. * Verifies that the received server response has the expected digest value.
  352. *
  353. * @param buf handshake data received from the server (1536 bytes)
  354. * @param off position to search digest offset from
  355. * @return 0 if digest is valid, digest position otherwise
  356. */
  357. static int rtmp_validate_digest(uint8_t *buf, int off)
  358. {
  359. int i, digest_pos = 0;
  360. uint8_t digest[32];
  361. for (i = 0; i < 4; i++)
  362. digest_pos += buf[i + off];
  363. digest_pos = (digest_pos % 728) + off + 4;
  364. rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  365. rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
  366. digest);
  367. if (!memcmp(digest, buf + digest_pos, 32))
  368. return digest_pos;
  369. return 0;
  370. }
  371. /**
  372. * Performs handshake with the server by means of exchanging pseudorandom data
  373. * signed with HMAC-SHA2 digest.
  374. *
  375. * @return 0 if handshake succeeds, negative value otherwise
  376. */
  377. static int rtmp_handshake(URLContext *s, RTMPContext *rt)
  378. {
  379. AVLFG rnd;
  380. uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = {
  381. 3, // unencrypted data
  382. 0, 0, 0, 0, // client uptime
  383. RTMP_CLIENT_VER1,
  384. RTMP_CLIENT_VER2,
  385. RTMP_CLIENT_VER3,
  386. RTMP_CLIENT_VER4,
  387. };
  388. uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE];
  389. uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1];
  390. int i;
  391. int server_pos, client_pos;
  392. uint8_t digest[32];
  393. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Handshaking...\n");
  394. av_lfg_init(&rnd, 0xDEADC0DE);
  395. // generate handshake packet - 1536 bytes of pseudorandom data
  396. for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
  397. tosend[i] = av_lfg_get(&rnd) >> 24;
  398. client_pos = rtmp_handshake_imprint_with_digest(tosend + 1);
  399. url_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE + 1);
  400. i = url_read_complete(rt->stream, serverdata, RTMP_HANDSHAKE_PACKET_SIZE + 1);
  401. if (i != RTMP_HANDSHAKE_PACKET_SIZE + 1) {
  402. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  403. return -1;
  404. }
  405. i = url_read_complete(rt->stream, clientdata, RTMP_HANDSHAKE_PACKET_SIZE);
  406. if (i != RTMP_HANDSHAKE_PACKET_SIZE) {
  407. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  408. return -1;
  409. }
  410. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
  411. serverdata[5], serverdata[6], serverdata[7], serverdata[8]);
  412. if (rt->is_input) {
  413. server_pos = rtmp_validate_digest(serverdata + 1, 772);
  414. if (!server_pos) {
  415. server_pos = rtmp_validate_digest(serverdata + 1, 8);
  416. if (!server_pos) {
  417. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Server response validating failed\n");
  418. return -1;
  419. }
  420. }
  421. rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
  422. rtmp_server_key, sizeof(rtmp_server_key),
  423. digest);
  424. rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE-32, 0,
  425. digest, 32,
  426. digest);
  427. if (memcmp(digest, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
  428. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Signature mismatch\n");
  429. return -1;
  430. }
  431. for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
  432. tosend[i] = av_lfg_get(&rnd) >> 24;
  433. rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
  434. rtmp_player_key, sizeof(rtmp_player_key),
  435. digest);
  436. rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
  437. digest, 32,
  438. tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
  439. // write reply back to the server
  440. url_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE);
  441. } else {
  442. url_write(rt->stream, serverdata+1, RTMP_HANDSHAKE_PACKET_SIZE);
  443. }
  444. return 0;
  445. }
  446. /**
  447. * Parses received packet and may perform some action depending on
  448. * the packet contents.
  449. * @return 0 for no errors, negative values for serious errors which prevent
  450. * further communications, positive values for uncritical errors
  451. */
  452. static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
  453. {
  454. int i, t;
  455. const uint8_t *data_end = pkt->data + pkt->data_size;
  456. switch (pkt->type) {
  457. case RTMP_PT_CHUNK_SIZE:
  458. if (pkt->data_size != 4) {
  459. av_log(LOG_CONTEXT, AV_LOG_ERROR,
  460. "Chunk size change packet is not 4 bytes long (%d)\n", pkt->data_size);
  461. return -1;
  462. }
  463. if (!rt->is_input)
  464. ff_rtmp_packet_write(rt->stream, pkt, rt->chunk_size, rt->prev_pkt[1]);
  465. rt->chunk_size = AV_RB32(pkt->data);
  466. if (rt->chunk_size <= 0) {
  467. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Incorrect chunk size %d\n", rt->chunk_size);
  468. return -1;
  469. }
  470. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "New chunk size = %d\n", rt->chunk_size);
  471. break;
  472. case RTMP_PT_PING:
  473. t = AV_RB16(pkt->data);
  474. if (t == 6)
  475. gen_pong(s, rt, pkt);
  476. break;
  477. case RTMP_PT_INVOKE:
  478. //TODO: check for the messages sent for wrong state?
  479. if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
  480. uint8_t tmpstr[256];
  481. if (!ff_amf_get_field_value(pkt->data + 9, data_end,
  482. "description", tmpstr, sizeof(tmpstr)))
  483. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  484. return -1;
  485. } else if (!memcmp(pkt->data, "\002\000\007_result", 10)) {
  486. switch (rt->state) {
  487. case STATE_HANDSHAKED:
  488. if (!rt->is_input) {
  489. gen_release_stream(s, rt);
  490. gen_fcpublish_stream(s, rt);
  491. rt->state = STATE_RELEASING;
  492. } else {
  493. rt->state = STATE_CONNECTING;
  494. }
  495. gen_create_stream(s, rt);
  496. break;
  497. case STATE_FCPUBLISH:
  498. rt->state = STATE_CONNECTING;
  499. break;
  500. case STATE_RELEASING:
  501. rt->state = STATE_FCPUBLISH;
  502. /* hack for Wowza Media Server, it does not send result for
  503. * releaseStream and FCPublish calls */
  504. if (!pkt->data[10]) {
  505. int pkt_id = (int) av_int2dbl(AV_RB64(pkt->data + 11));
  506. if (pkt_id == 4)
  507. rt->state = STATE_CONNECTING;
  508. }
  509. if (rt->state != STATE_CONNECTING)
  510. break;
  511. case STATE_CONNECTING:
  512. //extract a number from the result
  513. if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
  514. av_log(LOG_CONTEXT, AV_LOG_WARNING, "Unexpected reply on connect()\n");
  515. } else {
  516. rt->main_channel_id = (int) av_int2dbl(AV_RB64(pkt->data + 21));
  517. }
  518. if (rt->is_input) {
  519. gen_play(s, rt);
  520. } else {
  521. gen_publish(s, rt);
  522. }
  523. rt->state = STATE_READY;
  524. break;
  525. }
  526. } else if (!memcmp(pkt->data, "\002\000\010onStatus", 11)) {
  527. const uint8_t* ptr = pkt->data + 11;
  528. uint8_t tmpstr[256];
  529. for (i = 0; i < 2; i++) {
  530. t = ff_amf_tag_size(ptr, data_end);
  531. if (t < 0)
  532. return 1;
  533. ptr += t;
  534. }
  535. t = ff_amf_get_field_value(ptr, data_end,
  536. "level", tmpstr, sizeof(tmpstr));
  537. if (!t && !strcmp(tmpstr, "error")) {
  538. if (!ff_amf_get_field_value(ptr, data_end,
  539. "description", tmpstr, sizeof(tmpstr)))
  540. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  541. return -1;
  542. }
  543. t = ff_amf_get_field_value(ptr, data_end,
  544. "code", tmpstr, sizeof(tmpstr));
  545. if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING;
  546. if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
  547. if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
  548. if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
  549. }
  550. break;
  551. }
  552. return 0;
  553. }
  554. /**
  555. * Interacts with the server by receiving and sending RTMP packets until
  556. * there is some significant data (media data or expected status notification).
  557. *
  558. * @param s reading context
  559. * @param for_header non-zero value tells function to work until it
  560. * gets notification from the server that playing has been started,
  561. * otherwise function will work until some media data is received (or
  562. * an error happens)
  563. * @return 0 for successful operation, negative value in case of error
  564. */
  565. static int get_packet(URLContext *s, int for_header)
  566. {
  567. RTMPContext *rt = s->priv_data;
  568. int ret;
  569. if (rt->state == STATE_STOPPED)
  570. return AVERROR_EOF;
  571. for (;;) {
  572. RTMPPacket rpkt;
  573. if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
  574. rt->chunk_size, rt->prev_pkt[0])) != 0) {
  575. if (ret > 0) {
  576. return AVERROR(EAGAIN);
  577. } else {
  578. return AVERROR(EIO);
  579. }
  580. }
  581. ret = rtmp_parse_result(s, rt, &rpkt);
  582. if (ret < 0) {//serious error in current packet
  583. ff_rtmp_packet_destroy(&rpkt);
  584. return -1;
  585. }
  586. if (rt->state == STATE_STOPPED) {
  587. ff_rtmp_packet_destroy(&rpkt);
  588. return AVERROR_EOF;
  589. }
  590. if (for_header && (rt->state == STATE_PLAYING || rt->state == STATE_PUBLISHING)) {
  591. ff_rtmp_packet_destroy(&rpkt);
  592. return 0;
  593. }
  594. if (!rpkt.data_size || !rt->is_input) {
  595. ff_rtmp_packet_destroy(&rpkt);
  596. continue;
  597. }
  598. if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO ||
  599. (rpkt.type == RTMP_PT_NOTIFY && !memcmp("\002\000\012onMetaData", rpkt.data, 13))) {
  600. uint8_t *p;
  601. uint32_t ts = rpkt.timestamp;
  602. // generate packet header and put data into buffer for FLV demuxer
  603. rt->flv_off = 0;
  604. rt->flv_size = rpkt.data_size + 15;
  605. rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size);
  606. bytestream_put_byte(&p, rpkt.type);
  607. bytestream_put_be24(&p, rpkt.data_size);
  608. bytestream_put_be24(&p, ts);
  609. bytestream_put_byte(&p, ts >> 24);
  610. bytestream_put_be24(&p, 0);
  611. bytestream_put_buffer(&p, rpkt.data, rpkt.data_size);
  612. bytestream_put_be32(&p, 0);
  613. ff_rtmp_packet_destroy(&rpkt);
  614. return 0;
  615. } else if (rpkt.type == RTMP_PT_METADATA) {
  616. // we got raw FLV data, make it available for FLV demuxer
  617. rt->flv_off = 0;
  618. rt->flv_size = rpkt.data_size;
  619. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  620. memcpy(rt->flv_data, rpkt.data, rpkt.data_size);
  621. ff_rtmp_packet_destroy(&rpkt);
  622. return 0;
  623. }
  624. ff_rtmp_packet_destroy(&rpkt);
  625. }
  626. return 0;
  627. }
  628. static int rtmp_close(URLContext *h)
  629. {
  630. RTMPContext *rt = h->priv_data;
  631. if (!rt->is_input) {
  632. rt->flv_data = NULL;
  633. if (rt->out_pkt.data_size)
  634. ff_rtmp_packet_destroy(&rt->out_pkt);
  635. if (rt->state > STATE_FCPUBLISH)
  636. gen_fcunpublish_stream(h, rt);
  637. }
  638. if (rt->state > STATE_HANDSHAKED)
  639. gen_delete_stream(h, rt);
  640. av_freep(&rt->flv_data);
  641. url_close(rt->stream);
  642. av_free(rt);
  643. return 0;
  644. }
  645. /**
  646. * Opens RTMP connection and verifies that the stream can be played.
  647. *
  648. * URL syntax: rtmp://server[:port][/app][/playpath]
  649. * where 'app' is first one or two directories in the path
  650. * (e.g. /ondemand/, /flash/live/, etc.)
  651. * and 'playpath' is a file name (the rest of the path,
  652. * may be prefixed with "mp4:")
  653. */
  654. static int rtmp_open(URLContext *s, const char *uri, int flags)
  655. {
  656. RTMPContext *rt;
  657. char proto[8], hostname[256], path[1024], *fname;
  658. uint8_t buf[2048];
  659. int port;
  660. int ret;
  661. rt = av_mallocz(sizeof(RTMPContext));
  662. if (!rt)
  663. return AVERROR(ENOMEM);
  664. s->priv_data = rt;
  665. rt->is_input = !(flags & URL_WRONLY);
  666. url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
  667. path, sizeof(path), s->filename);
  668. if (port < 0)
  669. port = RTMP_DEFAULT_PORT;
  670. snprintf(buf, sizeof(buf), "tcp://%s:%d", hostname, port);
  671. if (url_open(&rt->stream, buf, URL_RDWR) < 0) {
  672. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Cannot open connection %s\n", buf);
  673. goto fail;
  674. }
  675. rt->state = STATE_START;
  676. if (rtmp_handshake(s, rt))
  677. return -1;
  678. rt->chunk_size = 128;
  679. rt->state = STATE_HANDSHAKED;
  680. //extract "app" part from path
  681. if (!strncmp(path, "/ondemand/", 10)) {
  682. fname = path + 10;
  683. memcpy(rt->app, "ondemand", 9);
  684. } else {
  685. char *p = strchr(path + 1, '/');
  686. if (!p) {
  687. fname = path + 1;
  688. rt->app[0] = '\0';
  689. } else {
  690. char *c = strchr(p + 1, ':');
  691. fname = strchr(p + 1, '/');
  692. if (!fname || c < fname) {
  693. fname = p + 1;
  694. av_strlcpy(rt->app, path + 1, p - path);
  695. } else {
  696. fname++;
  697. av_strlcpy(rt->app, path + 1, fname - path - 1);
  698. }
  699. }
  700. }
  701. if (!strchr(fname, ':') &&
  702. (!strcmp(fname + strlen(fname) - 4, ".f4v") ||
  703. !strcmp(fname + strlen(fname) - 4, ".mp4"))) {
  704. memcpy(rt->playpath, "mp4:", 5);
  705. } else {
  706. rt->playpath[0] = 0;
  707. }
  708. strncat(rt->playpath, fname, sizeof(rt->playpath) - 5);
  709. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
  710. proto, path, rt->app, rt->playpath);
  711. gen_connect(s, rt, proto, hostname, port);
  712. do {
  713. ret = get_packet(s, 1);
  714. } while (ret == EAGAIN);
  715. if (ret < 0)
  716. goto fail;
  717. if (rt->is_input) {
  718. // generate FLV header for demuxer
  719. rt->flv_size = 13;
  720. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  721. rt->flv_off = 0;
  722. memcpy(rt->flv_data, "FLV\1\5\0\0\0\011\0\0\0\0", rt->flv_size);
  723. } else {
  724. rt->flv_size = 0;
  725. rt->flv_data = NULL;
  726. rt->flv_off = 0;
  727. }
  728. s->max_packet_size = url_get_max_packet_size(rt->stream);
  729. s->is_streamed = 1;
  730. return 0;
  731. fail:
  732. rtmp_close(s);
  733. return AVERROR(EIO);
  734. }
  735. static int rtmp_read(URLContext *s, uint8_t *buf, int size)
  736. {
  737. RTMPContext *rt = s->priv_data;
  738. int orig_size = size;
  739. int ret;
  740. while (size > 0) {
  741. int data_left = rt->flv_size - rt->flv_off;
  742. if (data_left >= size) {
  743. memcpy(buf, rt->flv_data + rt->flv_off, size);
  744. rt->flv_off += size;
  745. return orig_size;
  746. }
  747. if (data_left > 0) {
  748. memcpy(buf, rt->flv_data + rt->flv_off, data_left);
  749. buf += data_left;
  750. size -= data_left;
  751. rt->flv_off = rt->flv_size;
  752. }
  753. if ((ret = get_packet(s, 0)) < 0)
  754. return ret;
  755. }
  756. return orig_size;
  757. }
  758. static int rtmp_write(URLContext *h, uint8_t *buf, int size)
  759. {
  760. RTMPContext *rt = h->priv_data;
  761. int size_temp = size;
  762. int pktsize, pkttype;
  763. uint32_t ts;
  764. const uint8_t *buf_temp = buf;
  765. if (size < 11) {
  766. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "FLV packet too small %d\n", size);
  767. return 0;
  768. }
  769. do {
  770. if (!rt->flv_off) {
  771. //skip flv header
  772. if (buf_temp[0] == 'F' && buf_temp[1] == 'L' && buf_temp[2] == 'V') {
  773. buf_temp += 9 + 4;
  774. size_temp -= 9 + 4;
  775. }
  776. pkttype = bytestream_get_byte(&buf_temp);
  777. pktsize = bytestream_get_be24(&buf_temp);
  778. ts = bytestream_get_be24(&buf_temp);
  779. ts |= bytestream_get_byte(&buf_temp) << 24;
  780. bytestream_get_be24(&buf_temp);
  781. size_temp -= 11;
  782. rt->flv_size = pktsize;
  783. //force 12bytes header
  784. if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
  785. pkttype == RTMP_PT_NOTIFY) {
  786. if (pkttype == RTMP_PT_NOTIFY)
  787. pktsize += 16;
  788. rt->prev_pkt[1][RTMP_SOURCE_CHANNEL].channel_id = 0;
  789. }
  790. //this can be a big packet, it's better to send it right here
  791. ff_rtmp_packet_create(&rt->out_pkt, RTMP_SOURCE_CHANNEL, pkttype, ts, pktsize);
  792. rt->out_pkt.extra = rt->main_channel_id;
  793. rt->flv_data = rt->out_pkt.data;
  794. if (pkttype == RTMP_PT_NOTIFY)
  795. ff_amf_write_string(&rt->flv_data, "@setDataFrame");
  796. }
  797. if (rt->flv_size - rt->flv_off > size_temp) {
  798. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, size_temp);
  799. rt->flv_off += size_temp;
  800. } else {
  801. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, rt->flv_size - rt->flv_off);
  802. rt->flv_off += rt->flv_size - rt->flv_off;
  803. }
  804. if (rt->flv_off == rt->flv_size) {
  805. bytestream_get_be32(&buf_temp);
  806. ff_rtmp_packet_write(rt->stream, &rt->out_pkt, rt->chunk_size, rt->prev_pkt[1]);
  807. ff_rtmp_packet_destroy(&rt->out_pkt);
  808. rt->flv_size = 0;
  809. rt->flv_off = 0;
  810. }
  811. } while (buf_temp - buf < size_temp);
  812. return size;
  813. }
  814. URLProtocol rtmp_protocol = {
  815. "rtmp",
  816. rtmp_open,
  817. rtmp_read,
  818. rtmp_write,
  819. NULL, /* seek */
  820. rtmp_close,
  821. };