You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

940 lines
32KB

  1. /*
  2. * RTMP network protocol
  3. * Copyright (c) 2009 Kostya Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file libavformat/rtmpproto.c
  23. * RTMP protocol
  24. */
  25. #include "libavcodec/bytestream.h"
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/lfg.h"
  28. #include "libavutil/sha.h"
  29. #include "avformat.h"
  30. #include "network.h"
  31. #include "flv.h"
  32. #include "rtmp.h"
  33. #include "rtmppkt.h"
  34. /* we can't use av_log() with URLContext yet... */
  35. #if LIBAVFORMAT_VERSION_MAJOR < 53
  36. #define LOG_CONTEXT NULL
  37. #else
  38. #define LOG_CONTEXT s
  39. #endif
  40. //#define DEBUG
  41. /** RTMP protocol handler state */
  42. typedef enum {
  43. STATE_START, ///< client has not done anything yet
  44. STATE_HANDSHAKED, ///< client has performed handshake
  45. STATE_RELEASING, ///< client releasing stream before publish it (for output)
  46. STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
  47. STATE_CONNECTING, ///< client connected to server successfully
  48. STATE_READY, ///< client has sent all needed commands and waits for server reply
  49. STATE_PLAYING, ///< client has started receiving multimedia data from server
  50. STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
  51. STATE_STOPPED, ///< the broadcast has been stopped
  52. } ClientState;
  53. /** protocol handler context */
  54. typedef struct RTMPContext {
  55. URLContext* stream; ///< TCP stream used in interactions with RTMP server
  56. RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets
  57. int chunk_size; ///< size of the chunks RTMP packets are divided into
  58. int is_input; ///< input/output flag
  59. char playpath[256]; ///< path to filename to play (with possible "mp4:" prefix)
  60. char app[128]; ///< application
  61. ClientState state; ///< current state
  62. int main_channel_id; ///< an additional channel ID which is used for some invocations
  63. uint8_t* flv_data; ///< buffer with data for demuxer
  64. int flv_size; ///< current buffer size
  65. int flv_off; ///< number of bytes read from current buffer
  66. RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output)
  67. } RTMPContext;
  68. #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
  69. /** Client key used for digest signing */
  70. static const uint8_t rtmp_player_key[] = {
  71. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  72. 'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1',
  73. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  74. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  75. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  76. };
  77. #define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing
  78. /** Key used for RTMP server digest signing */
  79. static const uint8_t rtmp_server_key[] = {
  80. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  81. 'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ',
  82. 'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1',
  83. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  84. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  85. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  86. };
  87. /**
  88. * Generates 'connect' call and sends it to the server.
  89. */
  90. static void gen_connect(URLContext *s, RTMPContext *rt, const char *proto,
  91. const char *host, int port)
  92. {
  93. RTMPPacket pkt;
  94. uint8_t ver[64], *p;
  95. char tcurl[512];
  96. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 4096);
  97. p = pkt.data;
  98. snprintf(tcurl, sizeof(tcurl), "%s://%s:%d/%s", proto, host, port, rt->app);
  99. ff_amf_write_string(&p, "connect");
  100. ff_amf_write_number(&p, 1.0);
  101. ff_amf_write_object_start(&p);
  102. ff_amf_write_field_name(&p, "app");
  103. ff_amf_write_string(&p, rt->app);
  104. if (rt->is_input) {
  105. snprintf(ver, sizeof(ver), "%s %d,%d,%d,%d", RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1,
  106. RTMP_CLIENT_VER2, RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
  107. } else {
  108. snprintf(ver, sizeof(ver), "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
  109. ff_amf_write_field_name(&p, "type");
  110. ff_amf_write_string(&p, "nonprivate");
  111. }
  112. ff_amf_write_field_name(&p, "flashVer");
  113. ff_amf_write_string(&p, ver);
  114. ff_amf_write_field_name(&p, "tcUrl");
  115. ff_amf_write_string(&p, tcurl);
  116. if (rt->is_input) {
  117. ff_amf_write_field_name(&p, "fpad");
  118. ff_amf_write_bool(&p, 0);
  119. ff_amf_write_field_name(&p, "capabilities");
  120. ff_amf_write_number(&p, 15.0);
  121. ff_amf_write_field_name(&p, "audioCodecs");
  122. ff_amf_write_number(&p, 1639.0);
  123. ff_amf_write_field_name(&p, "videoCodecs");
  124. ff_amf_write_number(&p, 252.0);
  125. ff_amf_write_field_name(&p, "videoFunction");
  126. ff_amf_write_number(&p, 1.0);
  127. }
  128. ff_amf_write_object_end(&p);
  129. pkt.data_size = p - pkt.data;
  130. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  131. ff_rtmp_packet_destroy(&pkt);
  132. }
  133. /**
  134. * Generates 'releaseStream' call and sends it to the server. It should make
  135. * the server release some channel for media streams.
  136. */
  137. static void gen_release_stream(URLContext *s, RTMPContext *rt)
  138. {
  139. RTMPPacket pkt;
  140. uint8_t *p;
  141. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  142. 29 + strlen(rt->playpath));
  143. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Releasing stream...\n");
  144. p = pkt.data;
  145. ff_amf_write_string(&p, "releaseStream");
  146. ff_amf_write_number(&p, 2.0);
  147. ff_amf_write_null(&p);
  148. ff_amf_write_string(&p, rt->playpath);
  149. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  150. ff_rtmp_packet_destroy(&pkt);
  151. }
  152. /**
  153. * Generates 'FCPublish' call and sends it to the server. It should make
  154. * the server preapare for receiving media streams.
  155. */
  156. static void gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
  157. {
  158. RTMPPacket pkt;
  159. uint8_t *p;
  160. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  161. 25 + strlen(rt->playpath));
  162. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "FCPublish stream...\n");
  163. p = pkt.data;
  164. ff_amf_write_string(&p, "FCPublish");
  165. ff_amf_write_number(&p, 3.0);
  166. ff_amf_write_null(&p);
  167. ff_amf_write_string(&p, rt->playpath);
  168. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  169. ff_rtmp_packet_destroy(&pkt);
  170. }
  171. /**
  172. * Generates 'FCUnpublish' call and sends it to the server. It should make
  173. * the server destroy stream.
  174. */
  175. static void gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
  176. {
  177. RTMPPacket pkt;
  178. uint8_t *p;
  179. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  180. 27 + strlen(rt->playpath));
  181. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "UnPublishing stream...\n");
  182. p = pkt.data;
  183. ff_amf_write_string(&p, "FCUnpublish");
  184. ff_amf_write_number(&p, 5.0);
  185. ff_amf_write_null(&p);
  186. ff_amf_write_string(&p, rt->playpath);
  187. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  188. ff_rtmp_packet_destroy(&pkt);
  189. }
  190. /**
  191. * Generates 'createStream' call and sends it to the server. It should make
  192. * the server allocate some channel for media streams.
  193. */
  194. static void gen_create_stream(URLContext *s, RTMPContext *rt)
  195. {
  196. RTMPPacket pkt;
  197. uint8_t *p;
  198. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Creating stream...\n");
  199. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 25);
  200. p = pkt.data;
  201. ff_amf_write_string(&p, "createStream");
  202. ff_amf_write_number(&p, rt->is_input ? 3.0 : 4.0);
  203. ff_amf_write_null(&p);
  204. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  205. ff_rtmp_packet_destroy(&pkt);
  206. }
  207. /**
  208. * Generates 'deleteStream' call and sends it to the server. It should make
  209. * the server remove some channel for media streams.
  210. */
  211. static void gen_delete_stream(URLContext *s, RTMPContext *rt)
  212. {
  213. RTMPPacket pkt;
  214. uint8_t *p;
  215. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Deleting stream...\n");
  216. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 34);
  217. p = pkt.data;
  218. ff_amf_write_string(&p, "deleteStream");
  219. ff_amf_write_number(&p, 0.0);
  220. ff_amf_write_null(&p);
  221. ff_amf_write_number(&p, rt->main_channel_id);
  222. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  223. ff_rtmp_packet_destroy(&pkt);
  224. }
  225. /**
  226. * Generates 'play' call and sends it to the server, then pings the server
  227. * to start actual playing.
  228. */
  229. static void gen_play(URLContext *s, RTMPContext *rt)
  230. {
  231. RTMPPacket pkt;
  232. uint8_t *p;
  233. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
  234. ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE, 0,
  235. 20 + strlen(rt->playpath));
  236. pkt.extra = rt->main_channel_id;
  237. p = pkt.data;
  238. ff_amf_write_string(&p, "play");
  239. ff_amf_write_number(&p, 0.0);
  240. ff_amf_write_null(&p);
  241. ff_amf_write_string(&p, rt->playpath);
  242. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  243. ff_rtmp_packet_destroy(&pkt);
  244. // set client buffer time disguised in ping packet
  245. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, 1, 10);
  246. p = pkt.data;
  247. bytestream_put_be16(&p, 3);
  248. bytestream_put_be32(&p, 1);
  249. bytestream_put_be32(&p, 256); //TODO: what is a good value here?
  250. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  251. ff_rtmp_packet_destroy(&pkt);
  252. }
  253. /**
  254. * Generates 'publish' call and sends it to the server.
  255. */
  256. static void gen_publish(URLContext *s, RTMPContext *rt)
  257. {
  258. RTMPPacket pkt;
  259. uint8_t *p;
  260. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
  261. ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE, 0,
  262. 30 + strlen(rt->playpath));
  263. pkt.extra = rt->main_channel_id;
  264. p = pkt.data;
  265. ff_amf_write_string(&p, "publish");
  266. ff_amf_write_number(&p, 0.0);
  267. ff_amf_write_null(&p);
  268. ff_amf_write_string(&p, rt->playpath);
  269. ff_amf_write_string(&p, "live");
  270. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  271. ff_rtmp_packet_destroy(&pkt);
  272. }
  273. /**
  274. * Generates ping reply and sends it to the server.
  275. */
  276. static void gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
  277. {
  278. RTMPPacket pkt;
  279. uint8_t *p;
  280. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, ppkt->timestamp + 1, 6);
  281. p = pkt.data;
  282. bytestream_put_be16(&p, 7);
  283. bytestream_put_be32(&p, AV_RB32(ppkt->data+2) + 1);
  284. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  285. ff_rtmp_packet_destroy(&pkt);
  286. }
  287. //TODO: Move HMAC code somewhere. Eventually.
  288. #define HMAC_IPAD_VAL 0x36
  289. #define HMAC_OPAD_VAL 0x5C
  290. /**
  291. * Calculates HMAC-SHA2 digest for RTMP handshake packets.
  292. *
  293. * @param src input buffer
  294. * @param len input buffer length (should be 1536)
  295. * @param gap offset in buffer where 32 bytes should not be taken into account
  296. * when calculating digest (since it will be used to store that digest)
  297. * @param key digest key
  298. * @param keylen digest key length
  299. * @param dst buffer where calculated digest will be stored (32 bytes)
  300. */
  301. static void rtmp_calc_digest(const uint8_t *src, int len, int gap,
  302. const uint8_t *key, int keylen, uint8_t *dst)
  303. {
  304. struct AVSHA *sha;
  305. uint8_t hmac_buf[64+32] = {0};
  306. int i;
  307. sha = av_mallocz(av_sha_size);
  308. if (keylen < 64) {
  309. memcpy(hmac_buf, key, keylen);
  310. } else {
  311. av_sha_init(sha, 256);
  312. av_sha_update(sha,key, keylen);
  313. av_sha_final(sha, hmac_buf);
  314. }
  315. for (i = 0; i < 64; i++)
  316. hmac_buf[i] ^= HMAC_IPAD_VAL;
  317. av_sha_init(sha, 256);
  318. av_sha_update(sha, hmac_buf, 64);
  319. if (gap <= 0) {
  320. av_sha_update(sha, src, len);
  321. } else { //skip 32 bytes used for storing digest
  322. av_sha_update(sha, src, gap);
  323. av_sha_update(sha, src + gap + 32, len - gap - 32);
  324. }
  325. av_sha_final(sha, hmac_buf + 64);
  326. for (i = 0; i < 64; i++)
  327. hmac_buf[i] ^= HMAC_IPAD_VAL ^ HMAC_OPAD_VAL; //reuse XORed key for opad
  328. av_sha_init(sha, 256);
  329. av_sha_update(sha, hmac_buf, 64+32);
  330. av_sha_final(sha, dst);
  331. av_free(sha);
  332. }
  333. /**
  334. * Puts HMAC-SHA2 digest of packet data (except for the bytes where this digest
  335. * will be stored) into that packet.
  336. *
  337. * @param buf handshake data (1536 bytes)
  338. * @return offset to the digest inside input data
  339. */
  340. static int rtmp_handshake_imprint_with_digest(uint8_t *buf)
  341. {
  342. int i, digest_pos = 0;
  343. for (i = 8; i < 12; i++)
  344. digest_pos += buf[i];
  345. digest_pos = (digest_pos % 728) + 12;
  346. rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  347. rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
  348. buf + digest_pos);
  349. return digest_pos;
  350. }
  351. /**
  352. * Verifies that the received server response has the expected digest value.
  353. *
  354. * @param buf handshake data received from the server (1536 bytes)
  355. * @param off position to search digest offset from
  356. * @return 0 if digest is valid, digest position otherwise
  357. */
  358. static int rtmp_validate_digest(uint8_t *buf, int off)
  359. {
  360. int i, digest_pos = 0;
  361. uint8_t digest[32];
  362. for (i = 0; i < 4; i++)
  363. digest_pos += buf[i + off];
  364. digest_pos = (digest_pos % 728) + off + 4;
  365. rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  366. rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
  367. digest);
  368. if (!memcmp(digest, buf + digest_pos, 32))
  369. return digest_pos;
  370. return 0;
  371. }
  372. /**
  373. * Performs handshake with the server by means of exchanging pseudorandom data
  374. * signed with HMAC-SHA2 digest.
  375. *
  376. * @return 0 if handshake succeeds, negative value otherwise
  377. */
  378. static int rtmp_handshake(URLContext *s, RTMPContext *rt)
  379. {
  380. AVLFG rnd;
  381. uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = {
  382. 3, // unencrypted data
  383. 0, 0, 0, 0, // client uptime
  384. RTMP_CLIENT_VER1,
  385. RTMP_CLIENT_VER2,
  386. RTMP_CLIENT_VER3,
  387. RTMP_CLIENT_VER4,
  388. };
  389. uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE];
  390. uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1];
  391. int i;
  392. int server_pos, client_pos;
  393. uint8_t digest[32];
  394. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Handshaking...\n");
  395. av_lfg_init(&rnd, 0xDEADC0DE);
  396. // generate handshake packet - 1536 bytes of pseudorandom data
  397. for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
  398. tosend[i] = av_lfg_get(&rnd) >> 24;
  399. client_pos = rtmp_handshake_imprint_with_digest(tosend + 1);
  400. url_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE + 1);
  401. i = url_read_complete(rt->stream, serverdata, RTMP_HANDSHAKE_PACKET_SIZE + 1);
  402. if (i != RTMP_HANDSHAKE_PACKET_SIZE + 1) {
  403. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  404. return -1;
  405. }
  406. i = url_read_complete(rt->stream, clientdata, RTMP_HANDSHAKE_PACKET_SIZE);
  407. if (i != RTMP_HANDSHAKE_PACKET_SIZE) {
  408. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  409. return -1;
  410. }
  411. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
  412. serverdata[5], serverdata[6], serverdata[7], serverdata[8]);
  413. if (rt->is_input) {
  414. server_pos = rtmp_validate_digest(serverdata + 1, 772);
  415. if (!server_pos) {
  416. server_pos = rtmp_validate_digest(serverdata + 1, 8);
  417. if (!server_pos) {
  418. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Server response validating failed\n");
  419. return -1;
  420. }
  421. }
  422. rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
  423. rtmp_server_key, sizeof(rtmp_server_key),
  424. digest);
  425. rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE-32, 0,
  426. digest, 32,
  427. digest);
  428. if (memcmp(digest, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
  429. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Signature mismatch\n");
  430. return -1;
  431. }
  432. for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
  433. tosend[i] = av_lfg_get(&rnd) >> 24;
  434. rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
  435. rtmp_player_key, sizeof(rtmp_player_key),
  436. digest);
  437. rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
  438. digest, 32,
  439. tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
  440. // write reply back to the server
  441. url_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE);
  442. } else {
  443. url_write(rt->stream, serverdata+1, RTMP_HANDSHAKE_PACKET_SIZE);
  444. }
  445. return 0;
  446. }
  447. /**
  448. * Parses received packet and may perform some action depending on
  449. * the packet contents.
  450. * @return 0 for no errors, negative values for serious errors which prevent
  451. * further communications, positive values for uncritical errors
  452. */
  453. static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
  454. {
  455. int i, t;
  456. const uint8_t *data_end = pkt->data + pkt->data_size;
  457. #ifdef DEBUG
  458. ff_rtmp_packet_dump(LOG_CONTEXT, pkt);
  459. #endif
  460. switch (pkt->type) {
  461. case RTMP_PT_CHUNK_SIZE:
  462. if (pkt->data_size != 4) {
  463. av_log(LOG_CONTEXT, AV_LOG_ERROR,
  464. "Chunk size change packet is not 4 bytes long (%d)\n", pkt->data_size);
  465. return -1;
  466. }
  467. if (!rt->is_input)
  468. ff_rtmp_packet_write(rt->stream, pkt, rt->chunk_size, rt->prev_pkt[1]);
  469. rt->chunk_size = AV_RB32(pkt->data);
  470. if (rt->chunk_size <= 0) {
  471. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Incorrect chunk size %d\n", rt->chunk_size);
  472. return -1;
  473. }
  474. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "New chunk size = %d\n", rt->chunk_size);
  475. break;
  476. case RTMP_PT_PING:
  477. t = AV_RB16(pkt->data);
  478. if (t == 6)
  479. gen_pong(s, rt, pkt);
  480. break;
  481. case RTMP_PT_INVOKE:
  482. //TODO: check for the messages sent for wrong state?
  483. if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
  484. uint8_t tmpstr[256];
  485. if (!ff_amf_get_field_value(pkt->data + 9, data_end,
  486. "description", tmpstr, sizeof(tmpstr)))
  487. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  488. return -1;
  489. } else if (!memcmp(pkt->data, "\002\000\007_result", 10)) {
  490. switch (rt->state) {
  491. case STATE_HANDSHAKED:
  492. if (!rt->is_input) {
  493. gen_release_stream(s, rt);
  494. gen_fcpublish_stream(s, rt);
  495. rt->state = STATE_RELEASING;
  496. } else {
  497. rt->state = STATE_CONNECTING;
  498. }
  499. gen_create_stream(s, rt);
  500. break;
  501. case STATE_FCPUBLISH:
  502. rt->state = STATE_CONNECTING;
  503. break;
  504. case STATE_RELEASING:
  505. rt->state = STATE_FCPUBLISH;
  506. /* hack for Wowza Media Server, it does not send result for
  507. * releaseStream and FCPublish calls */
  508. if (!pkt->data[10]) {
  509. int pkt_id = (int) av_int2dbl(AV_RB64(pkt->data + 11));
  510. if (pkt_id == 4)
  511. rt->state = STATE_CONNECTING;
  512. }
  513. if (rt->state != STATE_CONNECTING)
  514. break;
  515. case STATE_CONNECTING:
  516. //extract a number from the result
  517. if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
  518. av_log(LOG_CONTEXT, AV_LOG_WARNING, "Unexpected reply on connect()\n");
  519. } else {
  520. rt->main_channel_id = (int) av_int2dbl(AV_RB64(pkt->data + 21));
  521. }
  522. if (rt->is_input) {
  523. gen_play(s, rt);
  524. } else {
  525. gen_publish(s, rt);
  526. }
  527. rt->state = STATE_READY;
  528. break;
  529. }
  530. } else if (!memcmp(pkt->data, "\002\000\010onStatus", 11)) {
  531. const uint8_t* ptr = pkt->data + 11;
  532. uint8_t tmpstr[256];
  533. for (i = 0; i < 2; i++) {
  534. t = ff_amf_tag_size(ptr, data_end);
  535. if (t < 0)
  536. return 1;
  537. ptr += t;
  538. }
  539. t = ff_amf_get_field_value(ptr, data_end,
  540. "level", tmpstr, sizeof(tmpstr));
  541. if (!t && !strcmp(tmpstr, "error")) {
  542. if (!ff_amf_get_field_value(ptr, data_end,
  543. "description", tmpstr, sizeof(tmpstr)))
  544. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  545. return -1;
  546. }
  547. t = ff_amf_get_field_value(ptr, data_end,
  548. "code", tmpstr, sizeof(tmpstr));
  549. if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING;
  550. if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
  551. if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
  552. if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
  553. }
  554. break;
  555. }
  556. return 0;
  557. }
  558. /**
  559. * Interacts with the server by receiving and sending RTMP packets until
  560. * there is some significant data (media data or expected status notification).
  561. *
  562. * @param s reading context
  563. * @param for_header non-zero value tells function to work until it
  564. * gets notification from the server that playing has been started,
  565. * otherwise function will work until some media data is received (or
  566. * an error happens)
  567. * @return 0 for successful operation, negative value in case of error
  568. */
  569. static int get_packet(URLContext *s, int for_header)
  570. {
  571. RTMPContext *rt = s->priv_data;
  572. int ret;
  573. if (rt->state == STATE_STOPPED)
  574. return AVERROR_EOF;
  575. for (;;) {
  576. RTMPPacket rpkt;
  577. if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
  578. rt->chunk_size, rt->prev_pkt[0])) != 0) {
  579. if (ret > 0) {
  580. return AVERROR(EAGAIN);
  581. } else {
  582. return AVERROR(EIO);
  583. }
  584. }
  585. ret = rtmp_parse_result(s, rt, &rpkt);
  586. if (ret < 0) {//serious error in current packet
  587. ff_rtmp_packet_destroy(&rpkt);
  588. return -1;
  589. }
  590. if (rt->state == STATE_STOPPED) {
  591. ff_rtmp_packet_destroy(&rpkt);
  592. return AVERROR_EOF;
  593. }
  594. if (for_header && (rt->state == STATE_PLAYING || rt->state == STATE_PUBLISHING)) {
  595. ff_rtmp_packet_destroy(&rpkt);
  596. return 0;
  597. }
  598. if (!rpkt.data_size || !rt->is_input) {
  599. ff_rtmp_packet_destroy(&rpkt);
  600. continue;
  601. }
  602. if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO ||
  603. (rpkt.type == RTMP_PT_NOTIFY && !memcmp("\002\000\012onMetaData", rpkt.data, 13))) {
  604. uint8_t *p;
  605. uint32_t ts = rpkt.timestamp;
  606. // generate packet header and put data into buffer for FLV demuxer
  607. rt->flv_off = 0;
  608. rt->flv_size = rpkt.data_size + 15;
  609. rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size);
  610. bytestream_put_byte(&p, rpkt.type);
  611. bytestream_put_be24(&p, rpkt.data_size);
  612. bytestream_put_be24(&p, ts);
  613. bytestream_put_byte(&p, ts >> 24);
  614. bytestream_put_be24(&p, 0);
  615. bytestream_put_buffer(&p, rpkt.data, rpkt.data_size);
  616. bytestream_put_be32(&p, 0);
  617. ff_rtmp_packet_destroy(&rpkt);
  618. return 0;
  619. } else if (rpkt.type == RTMP_PT_METADATA) {
  620. // we got raw FLV data, make it available for FLV demuxer
  621. rt->flv_off = 0;
  622. rt->flv_size = rpkt.data_size;
  623. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  624. memcpy(rt->flv_data, rpkt.data, rpkt.data_size);
  625. ff_rtmp_packet_destroy(&rpkt);
  626. return 0;
  627. }
  628. ff_rtmp_packet_destroy(&rpkt);
  629. }
  630. return 0;
  631. }
  632. static int rtmp_close(URLContext *h)
  633. {
  634. RTMPContext *rt = h->priv_data;
  635. if (!rt->is_input) {
  636. rt->flv_data = NULL;
  637. if (rt->out_pkt.data_size)
  638. ff_rtmp_packet_destroy(&rt->out_pkt);
  639. if (rt->state > STATE_FCPUBLISH)
  640. gen_fcunpublish_stream(h, rt);
  641. }
  642. if (rt->state > STATE_HANDSHAKED)
  643. gen_delete_stream(h, rt);
  644. av_freep(&rt->flv_data);
  645. url_close(rt->stream);
  646. av_free(rt);
  647. return 0;
  648. }
  649. /**
  650. * Opens RTMP connection and verifies that the stream can be played.
  651. *
  652. * URL syntax: rtmp://server[:port][/app][/playpath]
  653. * where 'app' is first one or two directories in the path
  654. * (e.g. /ondemand/, /flash/live/, etc.)
  655. * and 'playpath' is a file name (the rest of the path,
  656. * may be prefixed with "mp4:")
  657. */
  658. static int rtmp_open(URLContext *s, const char *uri, int flags)
  659. {
  660. RTMPContext *rt;
  661. char proto[8], hostname[256], path[1024], *fname;
  662. uint8_t buf[2048];
  663. int port;
  664. int ret;
  665. rt = av_mallocz(sizeof(RTMPContext));
  666. if (!rt)
  667. return AVERROR(ENOMEM);
  668. s->priv_data = rt;
  669. rt->is_input = !(flags & URL_WRONLY);
  670. url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
  671. path, sizeof(path), s->filename);
  672. if (port < 0)
  673. port = RTMP_DEFAULT_PORT;
  674. snprintf(buf, sizeof(buf), "tcp://%s:%d", hostname, port);
  675. if (url_open(&rt->stream, buf, URL_RDWR) < 0) {
  676. av_log(LOG_CONTEXT, AV_LOG_ERROR, "Cannot open connection %s\n", buf);
  677. goto fail;
  678. }
  679. rt->state = STATE_START;
  680. if (rtmp_handshake(s, rt))
  681. return -1;
  682. rt->chunk_size = 128;
  683. rt->state = STATE_HANDSHAKED;
  684. //extract "app" part from path
  685. if (!strncmp(path, "/ondemand/", 10)) {
  686. fname = path + 10;
  687. memcpy(rt->app, "ondemand", 9);
  688. } else {
  689. char *p = strchr(path + 1, '/');
  690. if (!p) {
  691. fname = path + 1;
  692. rt->app[0] = '\0';
  693. } else {
  694. char *c = strchr(p + 1, ':');
  695. fname = strchr(p + 1, '/');
  696. if (!fname || c < fname) {
  697. fname = p + 1;
  698. av_strlcpy(rt->app, path + 1, p - path);
  699. } else {
  700. fname++;
  701. av_strlcpy(rt->app, path + 1, fname - path - 1);
  702. }
  703. }
  704. }
  705. if (!strchr(fname, ':') &&
  706. (!strcmp(fname + strlen(fname) - 4, ".f4v") ||
  707. !strcmp(fname + strlen(fname) - 4, ".mp4"))) {
  708. memcpy(rt->playpath, "mp4:", 5);
  709. } else {
  710. rt->playpath[0] = 0;
  711. }
  712. strncat(rt->playpath, fname, sizeof(rt->playpath) - 5);
  713. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
  714. proto, path, rt->app, rt->playpath);
  715. gen_connect(s, rt, proto, hostname, port);
  716. do {
  717. ret = get_packet(s, 1);
  718. } while (ret == EAGAIN);
  719. if (ret < 0)
  720. goto fail;
  721. if (rt->is_input) {
  722. // generate FLV header for demuxer
  723. rt->flv_size = 13;
  724. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  725. rt->flv_off = 0;
  726. memcpy(rt->flv_data, "FLV\1\5\0\0\0\011\0\0\0\0", rt->flv_size);
  727. } else {
  728. rt->flv_size = 0;
  729. rt->flv_data = NULL;
  730. rt->flv_off = 0;
  731. }
  732. s->max_packet_size = url_get_max_packet_size(rt->stream);
  733. s->is_streamed = 1;
  734. return 0;
  735. fail:
  736. rtmp_close(s);
  737. return AVERROR(EIO);
  738. }
  739. static int rtmp_read(URLContext *s, uint8_t *buf, int size)
  740. {
  741. RTMPContext *rt = s->priv_data;
  742. int orig_size = size;
  743. int ret;
  744. while (size > 0) {
  745. int data_left = rt->flv_size - rt->flv_off;
  746. if (data_left >= size) {
  747. memcpy(buf, rt->flv_data + rt->flv_off, size);
  748. rt->flv_off += size;
  749. return orig_size;
  750. }
  751. if (data_left > 0) {
  752. memcpy(buf, rt->flv_data + rt->flv_off, data_left);
  753. buf += data_left;
  754. size -= data_left;
  755. rt->flv_off = rt->flv_size;
  756. }
  757. if ((ret = get_packet(s, 0)) < 0)
  758. return ret;
  759. }
  760. return orig_size;
  761. }
  762. static int rtmp_write(URLContext *h, uint8_t *buf, int size)
  763. {
  764. RTMPContext *rt = h->priv_data;
  765. int size_temp = size;
  766. int pktsize, pkttype;
  767. uint32_t ts;
  768. const uint8_t *buf_temp = buf;
  769. if (size < 11) {
  770. av_log(LOG_CONTEXT, AV_LOG_DEBUG, "FLV packet too small %d\n", size);
  771. return 0;
  772. }
  773. do {
  774. if (!rt->flv_off) {
  775. //skip flv header
  776. if (buf_temp[0] == 'F' && buf_temp[1] == 'L' && buf_temp[2] == 'V') {
  777. buf_temp += 9 + 4;
  778. size_temp -= 9 + 4;
  779. }
  780. pkttype = bytestream_get_byte(&buf_temp);
  781. pktsize = bytestream_get_be24(&buf_temp);
  782. ts = bytestream_get_be24(&buf_temp);
  783. ts |= bytestream_get_byte(&buf_temp) << 24;
  784. bytestream_get_be24(&buf_temp);
  785. size_temp -= 11;
  786. rt->flv_size = pktsize;
  787. //force 12bytes header
  788. if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
  789. pkttype == RTMP_PT_NOTIFY) {
  790. if (pkttype == RTMP_PT_NOTIFY)
  791. pktsize += 16;
  792. rt->prev_pkt[1][RTMP_SOURCE_CHANNEL].channel_id = 0;
  793. }
  794. //this can be a big packet, it's better to send it right here
  795. ff_rtmp_packet_create(&rt->out_pkt, RTMP_SOURCE_CHANNEL, pkttype, ts, pktsize);
  796. rt->out_pkt.extra = rt->main_channel_id;
  797. rt->flv_data = rt->out_pkt.data;
  798. if (pkttype == RTMP_PT_NOTIFY)
  799. ff_amf_write_string(&rt->flv_data, "@setDataFrame");
  800. }
  801. if (rt->flv_size - rt->flv_off > size_temp) {
  802. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, size_temp);
  803. rt->flv_off += size_temp;
  804. } else {
  805. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, rt->flv_size - rt->flv_off);
  806. rt->flv_off += rt->flv_size - rt->flv_off;
  807. }
  808. if (rt->flv_off == rt->flv_size) {
  809. bytestream_get_be32(&buf_temp);
  810. ff_rtmp_packet_write(rt->stream, &rt->out_pkt, rt->chunk_size, rt->prev_pkt[1]);
  811. ff_rtmp_packet_destroy(&rt->out_pkt);
  812. rt->flv_size = 0;
  813. rt->flv_off = 0;
  814. }
  815. } while (buf_temp - buf < size_temp);
  816. return size;
  817. }
  818. URLProtocol rtmp_protocol = {
  819. "rtmp",
  820. rtmp_open,
  821. rtmp_read,
  822. rtmp_write,
  823. NULL, /* seek */
  824. rtmp_close,
  825. };