You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1126 lines
39KB

  1. /*
  2. * RTMP network protocol
  3. * Copyright (c) 2009 Kostya Shishkov
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RTMP protocol
  24. */
  25. #include "libavcodec/bytestream.h"
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/intfloat.h"
  28. #include "libavutil/lfg.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/sha.h"
  31. #include "avformat.h"
  32. #include "internal.h"
  33. #include "network.h"
  34. #include "flv.h"
  35. #include "rtmp.h"
  36. #include "rtmppkt.h"
  37. #include "url.h"
  38. //#define DEBUG
  39. #define APP_MAX_LENGTH 128
  40. #define PLAYPATH_MAX_LENGTH 256
  41. #define TCURL_MAX_LENGTH 512
  42. #define FLASHVER_MAX_LENGTH 64
  43. /** RTMP protocol handler state */
  44. typedef enum {
  45. STATE_START, ///< client has not done anything yet
  46. STATE_HANDSHAKED, ///< client has performed handshake
  47. STATE_RELEASING, ///< client releasing stream before publish it (for output)
  48. STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
  49. STATE_CONNECTING, ///< client connected to server successfully
  50. STATE_READY, ///< client has sent all needed commands and waits for server reply
  51. STATE_PLAYING, ///< client has started receiving multimedia data from server
  52. STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
  53. STATE_STOPPED, ///< the broadcast has been stopped
  54. } ClientState;
  55. /** protocol handler context */
  56. typedef struct RTMPContext {
  57. const AVClass *class;
  58. URLContext* stream; ///< TCP stream used in interactions with RTMP server
  59. RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets
  60. int chunk_size; ///< size of the chunks RTMP packets are divided into
  61. int is_input; ///< input/output flag
  62. char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
  63. int live; ///< 0: recorded, -1: live, -2: both
  64. char *app; ///< name of application
  65. ClientState state; ///< current state
  66. int main_channel_id; ///< an additional channel ID which is used for some invocations
  67. uint8_t* flv_data; ///< buffer with data for demuxer
  68. int flv_size; ///< current buffer size
  69. int flv_off; ///< number of bytes read from current buffer
  70. RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output)
  71. uint32_t client_report_size; ///< number of bytes after which client should report to server
  72. uint32_t bytes_read; ///< number of bytes read from server
  73. uint32_t last_bytes_read; ///< number of bytes read last reported to server
  74. int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call
  75. uint8_t flv_header[11]; ///< partial incoming flv packet header
  76. int flv_header_bytes; ///< number of initialized bytes in flv_header
  77. int nb_invokes; ///< keeps track of invoke messages
  78. int create_stream_invoke; ///< invoke id for the create stream command
  79. char* tcurl; ///< url of the target stream
  80. char* flashver; ///< version of the flash plugin
  81. char* swfurl; ///< url of the swf player
  82. } RTMPContext;
  83. #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
  84. /** Client key used for digest signing */
  85. static const uint8_t rtmp_player_key[] = {
  86. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  87. 'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1',
  88. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  89. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  90. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  91. };
  92. #define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing
  93. /** Key used for RTMP server digest signing */
  94. static const uint8_t rtmp_server_key[] = {
  95. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  96. 'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ',
  97. 'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1',
  98. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  99. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  100. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  101. };
  102. /**
  103. * Generate 'connect' call and send it to the server.
  104. */
  105. static void gen_connect(URLContext *s, RTMPContext *rt)
  106. {
  107. RTMPPacket pkt;
  108. uint8_t *p;
  109. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 4096);
  110. p = pkt.data;
  111. ff_amf_write_string(&p, "connect");
  112. ff_amf_write_number(&p, ++rt->nb_invokes);
  113. ff_amf_write_object_start(&p);
  114. ff_amf_write_field_name(&p, "app");
  115. ff_amf_write_string(&p, rt->app);
  116. if (!rt->is_input) {
  117. ff_amf_write_field_name(&p, "type");
  118. ff_amf_write_string(&p, "nonprivate");
  119. }
  120. ff_amf_write_field_name(&p, "flashVer");
  121. ff_amf_write_string(&p, rt->flashver);
  122. if (rt->swfurl) {
  123. ff_amf_write_field_name(&p, "swfUrl");
  124. ff_amf_write_string(&p, rt->swfurl);
  125. }
  126. ff_amf_write_field_name(&p, "tcUrl");
  127. ff_amf_write_string(&p, rt->tcurl);
  128. if (rt->is_input) {
  129. ff_amf_write_field_name(&p, "fpad");
  130. ff_amf_write_bool(&p, 0);
  131. ff_amf_write_field_name(&p, "capabilities");
  132. ff_amf_write_number(&p, 15.0);
  133. /* Tell the server we support all the audio codecs except
  134. * SUPPORT_SND_INTEL (0x0008) and SUPPORT_SND_UNUSED (0x0010)
  135. * which are unused in the RTMP protocol implementation. */
  136. ff_amf_write_field_name(&p, "audioCodecs");
  137. ff_amf_write_number(&p, 4071.0);
  138. ff_amf_write_field_name(&p, "videoCodecs");
  139. ff_amf_write_number(&p, 252.0);
  140. ff_amf_write_field_name(&p, "videoFunction");
  141. ff_amf_write_number(&p, 1.0);
  142. }
  143. ff_amf_write_object_end(&p);
  144. pkt.data_size = p - pkt.data;
  145. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  146. ff_rtmp_packet_destroy(&pkt);
  147. }
  148. /**
  149. * Generate 'releaseStream' call and send it to the server. It should make
  150. * the server release some channel for media streams.
  151. */
  152. static void gen_release_stream(URLContext *s, RTMPContext *rt)
  153. {
  154. RTMPPacket pkt;
  155. uint8_t *p;
  156. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  157. 29 + strlen(rt->playpath));
  158. av_log(s, AV_LOG_DEBUG, "Releasing stream...\n");
  159. p = pkt.data;
  160. ff_amf_write_string(&p, "releaseStream");
  161. ff_amf_write_number(&p, ++rt->nb_invokes);
  162. ff_amf_write_null(&p);
  163. ff_amf_write_string(&p, rt->playpath);
  164. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  165. ff_rtmp_packet_destroy(&pkt);
  166. }
  167. /**
  168. * Generate 'FCPublish' call and send it to the server. It should make
  169. * the server preapare for receiving media streams.
  170. */
  171. static void gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
  172. {
  173. RTMPPacket pkt;
  174. uint8_t *p;
  175. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  176. 25 + strlen(rt->playpath));
  177. av_log(s, AV_LOG_DEBUG, "FCPublish stream...\n");
  178. p = pkt.data;
  179. ff_amf_write_string(&p, "FCPublish");
  180. ff_amf_write_number(&p, ++rt->nb_invokes);
  181. ff_amf_write_null(&p);
  182. ff_amf_write_string(&p, rt->playpath);
  183. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  184. ff_rtmp_packet_destroy(&pkt);
  185. }
  186. /**
  187. * Generate 'FCUnpublish' call and send it to the server. It should make
  188. * the server destroy stream.
  189. */
  190. static void gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
  191. {
  192. RTMPPacket pkt;
  193. uint8_t *p;
  194. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  195. 27 + strlen(rt->playpath));
  196. av_log(s, AV_LOG_DEBUG, "UnPublishing stream...\n");
  197. p = pkt.data;
  198. ff_amf_write_string(&p, "FCUnpublish");
  199. ff_amf_write_number(&p, ++rt->nb_invokes);
  200. ff_amf_write_null(&p);
  201. ff_amf_write_string(&p, rt->playpath);
  202. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  203. ff_rtmp_packet_destroy(&pkt);
  204. }
  205. /**
  206. * Generate 'createStream' call and send it to the server. It should make
  207. * the server allocate some channel for media streams.
  208. */
  209. static void gen_create_stream(URLContext *s, RTMPContext *rt)
  210. {
  211. RTMPPacket pkt;
  212. uint8_t *p;
  213. av_log(s, AV_LOG_DEBUG, "Creating stream...\n");
  214. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 25);
  215. p = pkt.data;
  216. ff_amf_write_string(&p, "createStream");
  217. ff_amf_write_number(&p, ++rt->nb_invokes);
  218. ff_amf_write_null(&p);
  219. rt->create_stream_invoke = rt->nb_invokes;
  220. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  221. ff_rtmp_packet_destroy(&pkt);
  222. }
  223. /**
  224. * Generate 'deleteStream' call and send it to the server. It should make
  225. * the server remove some channel for media streams.
  226. */
  227. static void gen_delete_stream(URLContext *s, RTMPContext *rt)
  228. {
  229. RTMPPacket pkt;
  230. uint8_t *p;
  231. av_log(s, AV_LOG_DEBUG, "Deleting stream...\n");
  232. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 34);
  233. p = pkt.data;
  234. ff_amf_write_string(&p, "deleteStream");
  235. ff_amf_write_number(&p, ++rt->nb_invokes);
  236. ff_amf_write_null(&p);
  237. ff_amf_write_number(&p, rt->main_channel_id);
  238. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  239. ff_rtmp_packet_destroy(&pkt);
  240. }
  241. /**
  242. * Generate 'play' call and send it to the server, then ping the server
  243. * to start actual playing.
  244. */
  245. static void gen_play(URLContext *s, RTMPContext *rt)
  246. {
  247. RTMPPacket pkt;
  248. uint8_t *p;
  249. av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
  250. ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE, 0,
  251. 29 + strlen(rt->playpath));
  252. pkt.extra = rt->main_channel_id;
  253. p = pkt.data;
  254. ff_amf_write_string(&p, "play");
  255. ff_amf_write_number(&p, ++rt->nb_invokes);
  256. ff_amf_write_null(&p);
  257. ff_amf_write_string(&p, rt->playpath);
  258. ff_amf_write_number(&p, rt->live);
  259. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  260. ff_rtmp_packet_destroy(&pkt);
  261. // set client buffer time disguised in ping packet
  262. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, 1, 10);
  263. p = pkt.data;
  264. bytestream_put_be16(&p, 3);
  265. bytestream_put_be32(&p, 1);
  266. bytestream_put_be32(&p, 256); //TODO: what is a good value here?
  267. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  268. ff_rtmp_packet_destroy(&pkt);
  269. }
  270. /**
  271. * Generate 'publish' call and send it to the server.
  272. */
  273. static void gen_publish(URLContext *s, RTMPContext *rt)
  274. {
  275. RTMPPacket pkt;
  276. uint8_t *p;
  277. av_log(s, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
  278. ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE, 0,
  279. 30 + strlen(rt->playpath));
  280. pkt.extra = rt->main_channel_id;
  281. p = pkt.data;
  282. ff_amf_write_string(&p, "publish");
  283. ff_amf_write_number(&p, ++rt->nb_invokes);
  284. ff_amf_write_null(&p);
  285. ff_amf_write_string(&p, rt->playpath);
  286. ff_amf_write_string(&p, "live");
  287. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  288. ff_rtmp_packet_destroy(&pkt);
  289. }
  290. /**
  291. * Generate ping reply and send it to the server.
  292. */
  293. static void gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
  294. {
  295. RTMPPacket pkt;
  296. uint8_t *p;
  297. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, ppkt->timestamp + 1, 6);
  298. p = pkt.data;
  299. bytestream_put_be16(&p, 7);
  300. bytestream_put_be32(&p, AV_RB32(ppkt->data+2));
  301. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  302. ff_rtmp_packet_destroy(&pkt);
  303. }
  304. /**
  305. * Generate server bandwidth message and send it to the server.
  306. */
  307. static void gen_server_bw(URLContext *s, RTMPContext *rt)
  308. {
  309. RTMPPacket pkt;
  310. uint8_t *p;
  311. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_SERVER_BW, 0, 4);
  312. p = pkt.data;
  313. bytestream_put_be32(&p, 2500000);
  314. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  315. ff_rtmp_packet_destroy(&pkt);
  316. }
  317. /**
  318. * Generate check bandwidth message and send it to the server.
  319. */
  320. static void gen_check_bw(URLContext *s, RTMPContext *rt)
  321. {
  322. RTMPPacket pkt;
  323. uint8_t *p;
  324. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 21);
  325. p = pkt.data;
  326. ff_amf_write_string(&p, "_checkbw");
  327. ff_amf_write_number(&p, ++rt->nb_invokes);
  328. ff_amf_write_null(&p);
  329. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  330. ff_rtmp_packet_destroy(&pkt);
  331. }
  332. /**
  333. * Generate report on bytes read so far and send it to the server.
  334. */
  335. static void gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
  336. {
  337. RTMPPacket pkt;
  338. uint8_t *p;
  339. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ, ts, 4);
  340. p = pkt.data;
  341. bytestream_put_be32(&p, rt->bytes_read);
  342. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  343. ff_rtmp_packet_destroy(&pkt);
  344. }
  345. //TODO: Move HMAC code somewhere. Eventually.
  346. #define HMAC_IPAD_VAL 0x36
  347. #define HMAC_OPAD_VAL 0x5C
  348. /**
  349. * Calculate HMAC-SHA2 digest for RTMP handshake packets.
  350. *
  351. * @param src input buffer
  352. * @param len input buffer length (should be 1536)
  353. * @param gap offset in buffer where 32 bytes should not be taken into account
  354. * when calculating digest (since it will be used to store that digest)
  355. * @param key digest key
  356. * @param keylen digest key length
  357. * @param dst buffer where calculated digest will be stored (32 bytes)
  358. */
  359. static void rtmp_calc_digest(const uint8_t *src, int len, int gap,
  360. const uint8_t *key, int keylen, uint8_t *dst)
  361. {
  362. struct AVSHA *sha;
  363. uint8_t hmac_buf[64+32] = {0};
  364. int i;
  365. sha = av_mallocz(av_sha_size);
  366. if (keylen < 64) {
  367. memcpy(hmac_buf, key, keylen);
  368. } else {
  369. av_sha_init(sha, 256);
  370. av_sha_update(sha,key, keylen);
  371. av_sha_final(sha, hmac_buf);
  372. }
  373. for (i = 0; i < 64; i++)
  374. hmac_buf[i] ^= HMAC_IPAD_VAL;
  375. av_sha_init(sha, 256);
  376. av_sha_update(sha, hmac_buf, 64);
  377. if (gap <= 0) {
  378. av_sha_update(sha, src, len);
  379. } else { //skip 32 bytes used for storing digest
  380. av_sha_update(sha, src, gap);
  381. av_sha_update(sha, src + gap + 32, len - gap - 32);
  382. }
  383. av_sha_final(sha, hmac_buf + 64);
  384. for (i = 0; i < 64; i++)
  385. hmac_buf[i] ^= HMAC_IPAD_VAL ^ HMAC_OPAD_VAL; //reuse XORed key for opad
  386. av_sha_init(sha, 256);
  387. av_sha_update(sha, hmac_buf, 64+32);
  388. av_sha_final(sha, dst);
  389. av_free(sha);
  390. }
  391. /**
  392. * Put HMAC-SHA2 digest of packet data (except for the bytes where this digest
  393. * will be stored) into that packet.
  394. *
  395. * @param buf handshake data (1536 bytes)
  396. * @return offset to the digest inside input data
  397. */
  398. static int rtmp_handshake_imprint_with_digest(uint8_t *buf)
  399. {
  400. int i, digest_pos = 0;
  401. for (i = 8; i < 12; i++)
  402. digest_pos += buf[i];
  403. digest_pos = (digest_pos % 728) + 12;
  404. rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  405. rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
  406. buf + digest_pos);
  407. return digest_pos;
  408. }
  409. /**
  410. * Verify that the received server response has the expected digest value.
  411. *
  412. * @param buf handshake data received from the server (1536 bytes)
  413. * @param off position to search digest offset from
  414. * @return 0 if digest is valid, digest position otherwise
  415. */
  416. static int rtmp_validate_digest(uint8_t *buf, int off)
  417. {
  418. int i, digest_pos = 0;
  419. uint8_t digest[32];
  420. for (i = 0; i < 4; i++)
  421. digest_pos += buf[i + off];
  422. digest_pos = (digest_pos % 728) + off + 4;
  423. rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  424. rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
  425. digest);
  426. if (!memcmp(digest, buf + digest_pos, 32))
  427. return digest_pos;
  428. return 0;
  429. }
  430. /**
  431. * Perform handshake with the server by means of exchanging pseudorandom data
  432. * signed with HMAC-SHA2 digest.
  433. *
  434. * @return 0 if handshake succeeds, negative value otherwise
  435. */
  436. static int rtmp_handshake(URLContext *s, RTMPContext *rt)
  437. {
  438. AVLFG rnd;
  439. uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = {
  440. 3, // unencrypted data
  441. 0, 0, 0, 0, // client uptime
  442. RTMP_CLIENT_VER1,
  443. RTMP_CLIENT_VER2,
  444. RTMP_CLIENT_VER3,
  445. RTMP_CLIENT_VER4,
  446. };
  447. uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE];
  448. uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1];
  449. int i;
  450. int server_pos, client_pos;
  451. uint8_t digest[32];
  452. av_log(s, AV_LOG_DEBUG, "Handshaking...\n");
  453. av_lfg_init(&rnd, 0xDEADC0DE);
  454. // generate handshake packet - 1536 bytes of pseudorandom data
  455. for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
  456. tosend[i] = av_lfg_get(&rnd) >> 24;
  457. client_pos = rtmp_handshake_imprint_with_digest(tosend + 1);
  458. ffurl_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE + 1);
  459. i = ffurl_read_complete(rt->stream, serverdata, RTMP_HANDSHAKE_PACKET_SIZE + 1);
  460. if (i != RTMP_HANDSHAKE_PACKET_SIZE + 1) {
  461. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  462. return -1;
  463. }
  464. i = ffurl_read_complete(rt->stream, clientdata, RTMP_HANDSHAKE_PACKET_SIZE);
  465. if (i != RTMP_HANDSHAKE_PACKET_SIZE) {
  466. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  467. return -1;
  468. }
  469. av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
  470. serverdata[5], serverdata[6], serverdata[7], serverdata[8]);
  471. if (rt->is_input && serverdata[5] >= 3) {
  472. server_pos = rtmp_validate_digest(serverdata + 1, 772);
  473. if (!server_pos) {
  474. server_pos = rtmp_validate_digest(serverdata + 1, 8);
  475. if (!server_pos) {
  476. av_log(s, AV_LOG_ERROR, "Server response validating failed\n");
  477. return -1;
  478. }
  479. }
  480. rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
  481. rtmp_server_key, sizeof(rtmp_server_key),
  482. digest);
  483. rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE-32, 0,
  484. digest, 32,
  485. digest);
  486. if (memcmp(digest, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
  487. av_log(s, AV_LOG_ERROR, "Signature mismatch\n");
  488. return -1;
  489. }
  490. for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
  491. tosend[i] = av_lfg_get(&rnd) >> 24;
  492. rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
  493. rtmp_player_key, sizeof(rtmp_player_key),
  494. digest);
  495. rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
  496. digest, 32,
  497. tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
  498. // write reply back to the server
  499. ffurl_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE);
  500. } else {
  501. ffurl_write(rt->stream, serverdata+1, RTMP_HANDSHAKE_PACKET_SIZE);
  502. }
  503. return 0;
  504. }
  505. /**
  506. * Parse received packet and possibly perform some action depending on
  507. * the packet contents.
  508. * @return 0 for no errors, negative values for serious errors which prevent
  509. * further communications, positive values for uncritical errors
  510. */
  511. static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
  512. {
  513. int i, t;
  514. const uint8_t *data_end = pkt->data + pkt->data_size;
  515. #ifdef DEBUG
  516. ff_rtmp_packet_dump(s, pkt);
  517. #endif
  518. switch (pkt->type) {
  519. case RTMP_PT_CHUNK_SIZE:
  520. if (pkt->data_size != 4) {
  521. av_log(s, AV_LOG_ERROR,
  522. "Chunk size change packet is not 4 bytes long (%d)\n", pkt->data_size);
  523. return -1;
  524. }
  525. if (!rt->is_input)
  526. ff_rtmp_packet_write(rt->stream, pkt, rt->chunk_size, rt->prev_pkt[1]);
  527. rt->chunk_size = AV_RB32(pkt->data);
  528. if (rt->chunk_size <= 0) {
  529. av_log(s, AV_LOG_ERROR, "Incorrect chunk size %d\n", rt->chunk_size);
  530. return -1;
  531. }
  532. av_log(s, AV_LOG_DEBUG, "New chunk size = %d\n", rt->chunk_size);
  533. break;
  534. case RTMP_PT_PING:
  535. t = AV_RB16(pkt->data);
  536. if (t == 6)
  537. gen_pong(s, rt, pkt);
  538. break;
  539. case RTMP_PT_CLIENT_BW:
  540. if (pkt->data_size < 4) {
  541. av_log(s, AV_LOG_ERROR,
  542. "Client bandwidth report packet is less than 4 bytes long (%d)\n",
  543. pkt->data_size);
  544. return -1;
  545. }
  546. av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", AV_RB32(pkt->data));
  547. rt->client_report_size = AV_RB32(pkt->data) >> 1;
  548. break;
  549. case RTMP_PT_INVOKE:
  550. //TODO: check for the messages sent for wrong state?
  551. if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
  552. uint8_t tmpstr[256];
  553. if (!ff_amf_get_field_value(pkt->data + 9, data_end,
  554. "description", tmpstr, sizeof(tmpstr)))
  555. av_log(s, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  556. return -1;
  557. } else if (!memcmp(pkt->data, "\002\000\007_result", 10)) {
  558. switch (rt->state) {
  559. case STATE_HANDSHAKED:
  560. if (!rt->is_input) {
  561. gen_release_stream(s, rt);
  562. gen_fcpublish_stream(s, rt);
  563. rt->state = STATE_RELEASING;
  564. } else {
  565. gen_server_bw(s, rt);
  566. rt->state = STATE_CONNECTING;
  567. }
  568. gen_create_stream(s, rt);
  569. break;
  570. case STATE_FCPUBLISH:
  571. rt->state = STATE_CONNECTING;
  572. break;
  573. case STATE_RELEASING:
  574. rt->state = STATE_FCPUBLISH;
  575. /* hack for Wowza Media Server, it does not send result for
  576. * releaseStream and FCPublish calls */
  577. if (!pkt->data[10]) {
  578. int pkt_id = av_int2double(AV_RB64(pkt->data + 11));
  579. if (pkt_id == rt->create_stream_invoke)
  580. rt->state = STATE_CONNECTING;
  581. }
  582. if (rt->state != STATE_CONNECTING)
  583. break;
  584. case STATE_CONNECTING:
  585. //extract a number from the result
  586. if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
  587. av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
  588. } else {
  589. rt->main_channel_id = av_int2double(AV_RB64(pkt->data + 21));
  590. }
  591. if (rt->is_input) {
  592. gen_play(s, rt);
  593. } else {
  594. gen_publish(s, rt);
  595. }
  596. rt->state = STATE_READY;
  597. break;
  598. }
  599. } else if (!memcmp(pkt->data, "\002\000\010onStatus", 11)) {
  600. const uint8_t* ptr = pkt->data + 11;
  601. uint8_t tmpstr[256];
  602. for (i = 0; i < 2; i++) {
  603. t = ff_amf_tag_size(ptr, data_end);
  604. if (t < 0)
  605. return 1;
  606. ptr += t;
  607. }
  608. t = ff_amf_get_field_value(ptr, data_end,
  609. "level", tmpstr, sizeof(tmpstr));
  610. if (!t && !strcmp(tmpstr, "error")) {
  611. if (!ff_amf_get_field_value(ptr, data_end,
  612. "description", tmpstr, sizeof(tmpstr)))
  613. av_log(s, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  614. return -1;
  615. }
  616. t = ff_amf_get_field_value(ptr, data_end,
  617. "code", tmpstr, sizeof(tmpstr));
  618. if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING;
  619. if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
  620. if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
  621. if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
  622. } else if (!memcmp(pkt->data, "\002\000\010onBWDone", 11)) {
  623. gen_check_bw(s, rt);
  624. }
  625. break;
  626. }
  627. return 0;
  628. }
  629. /**
  630. * Interact with the server by receiving and sending RTMP packets until
  631. * there is some significant data (media data or expected status notification).
  632. *
  633. * @param s reading context
  634. * @param for_header non-zero value tells function to work until it
  635. * gets notification from the server that playing has been started,
  636. * otherwise function will work until some media data is received (or
  637. * an error happens)
  638. * @return 0 for successful operation, negative value in case of error
  639. */
  640. static int get_packet(URLContext *s, int for_header)
  641. {
  642. RTMPContext *rt = s->priv_data;
  643. int ret;
  644. uint8_t *p;
  645. const uint8_t *next;
  646. uint32_t data_size;
  647. uint32_t ts, cts, pts=0;
  648. if (rt->state == STATE_STOPPED)
  649. return AVERROR_EOF;
  650. for (;;) {
  651. RTMPPacket rpkt = { 0 };
  652. if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
  653. rt->chunk_size, rt->prev_pkt[0])) <= 0) {
  654. if (ret == 0) {
  655. return AVERROR(EAGAIN);
  656. } else {
  657. return AVERROR(EIO);
  658. }
  659. }
  660. rt->bytes_read += ret;
  661. if (rt->bytes_read > rt->last_bytes_read + rt->client_report_size) {
  662. av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
  663. gen_bytes_read(s, rt, rpkt.timestamp + 1);
  664. rt->last_bytes_read = rt->bytes_read;
  665. }
  666. ret = rtmp_parse_result(s, rt, &rpkt);
  667. if (ret < 0) {//serious error in current packet
  668. ff_rtmp_packet_destroy(&rpkt);
  669. return -1;
  670. }
  671. if (rt->state == STATE_STOPPED) {
  672. ff_rtmp_packet_destroy(&rpkt);
  673. return AVERROR_EOF;
  674. }
  675. if (for_header && (rt->state == STATE_PLAYING || rt->state == STATE_PUBLISHING)) {
  676. ff_rtmp_packet_destroy(&rpkt);
  677. return 0;
  678. }
  679. if (!rpkt.data_size || !rt->is_input) {
  680. ff_rtmp_packet_destroy(&rpkt);
  681. continue;
  682. }
  683. if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO ||
  684. (rpkt.type == RTMP_PT_NOTIFY && !memcmp("\002\000\012onMetaData", rpkt.data, 13))) {
  685. ts = rpkt.timestamp;
  686. // generate packet header and put data into buffer for FLV demuxer
  687. rt->flv_off = 0;
  688. rt->flv_size = rpkt.data_size + 15;
  689. rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size);
  690. bytestream_put_byte(&p, rpkt.type);
  691. bytestream_put_be24(&p, rpkt.data_size);
  692. bytestream_put_be24(&p, ts);
  693. bytestream_put_byte(&p, ts >> 24);
  694. bytestream_put_be24(&p, 0);
  695. bytestream_put_buffer(&p, rpkt.data, rpkt.data_size);
  696. bytestream_put_be32(&p, 0);
  697. ff_rtmp_packet_destroy(&rpkt);
  698. return 0;
  699. } else if (rpkt.type == RTMP_PT_METADATA) {
  700. // we got raw FLV data, make it available for FLV demuxer
  701. rt->flv_off = 0;
  702. rt->flv_size = rpkt.data_size;
  703. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  704. /* rewrite timestamps */
  705. next = rpkt.data;
  706. ts = rpkt.timestamp;
  707. while (next - rpkt.data < rpkt.data_size - 11) {
  708. next++;
  709. data_size = bytestream_get_be24(&next);
  710. p=next;
  711. cts = bytestream_get_be24(&next);
  712. cts |= bytestream_get_byte(&next) << 24;
  713. if (pts==0)
  714. pts=cts;
  715. ts += cts - pts;
  716. pts = cts;
  717. bytestream_put_be24(&p, ts);
  718. bytestream_put_byte(&p, ts >> 24);
  719. next += data_size + 3 + 4;
  720. }
  721. memcpy(rt->flv_data, rpkt.data, rpkt.data_size);
  722. ff_rtmp_packet_destroy(&rpkt);
  723. return 0;
  724. }
  725. ff_rtmp_packet_destroy(&rpkt);
  726. }
  727. }
  728. static int rtmp_close(URLContext *h)
  729. {
  730. RTMPContext *rt = h->priv_data;
  731. if (!rt->is_input) {
  732. rt->flv_data = NULL;
  733. if (rt->out_pkt.data_size)
  734. ff_rtmp_packet_destroy(&rt->out_pkt);
  735. if (rt->state > STATE_FCPUBLISH)
  736. gen_fcunpublish_stream(h, rt);
  737. }
  738. if (rt->state > STATE_HANDSHAKED)
  739. gen_delete_stream(h, rt);
  740. av_freep(&rt->flv_data);
  741. ffurl_close(rt->stream);
  742. return 0;
  743. }
  744. /**
  745. * Open RTMP connection and verify that the stream can be played.
  746. *
  747. * URL syntax: rtmp://server[:port][/app][/playpath]
  748. * where 'app' is first one or two directories in the path
  749. * (e.g. /ondemand/, /flash/live/, etc.)
  750. * and 'playpath' is a file name (the rest of the path,
  751. * may be prefixed with "mp4:")
  752. */
  753. static int rtmp_open(URLContext *s, const char *uri, int flags)
  754. {
  755. RTMPContext *rt = s->priv_data;
  756. char proto[8], hostname[256], path[1024], *fname;
  757. char *old_app;
  758. uint8_t buf[2048];
  759. int port;
  760. int ret;
  761. rt->is_input = !(flags & AVIO_FLAG_WRITE);
  762. av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
  763. path, sizeof(path), s->filename);
  764. if (port < 0)
  765. port = RTMP_DEFAULT_PORT;
  766. ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
  767. if (ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
  768. &s->interrupt_callback, NULL) < 0) {
  769. av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf);
  770. goto fail;
  771. }
  772. rt->state = STATE_START;
  773. if (rtmp_handshake(s, rt))
  774. goto fail;
  775. rt->chunk_size = 128;
  776. rt->state = STATE_HANDSHAKED;
  777. // Keep the application name when it has been defined by the user.
  778. old_app = rt->app;
  779. rt->app = av_malloc(APP_MAX_LENGTH);
  780. if (!rt->app) {
  781. rtmp_close(s);
  782. return AVERROR(ENOMEM);
  783. }
  784. //extract "app" part from path
  785. if (!strncmp(path, "/ondemand/", 10)) {
  786. fname = path + 10;
  787. memcpy(rt->app, "ondemand", 9);
  788. } else {
  789. char *p = strchr(path + 1, '/');
  790. if (!p) {
  791. fname = path + 1;
  792. rt->app[0] = '\0';
  793. } else {
  794. char *c = strchr(p + 1, ':');
  795. fname = strchr(p + 1, '/');
  796. if (!fname || c < fname) {
  797. fname = p + 1;
  798. av_strlcpy(rt->app, path + 1, p - path);
  799. } else {
  800. fname++;
  801. av_strlcpy(rt->app, path + 1, fname - path - 1);
  802. }
  803. }
  804. }
  805. if (old_app) {
  806. // The name of application has been defined by the user, override it.
  807. av_free(rt->app);
  808. rt->app = old_app;
  809. }
  810. if (!rt->playpath) {
  811. rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
  812. if (!rt->playpath) {
  813. rtmp_close(s);
  814. return AVERROR(ENOMEM);
  815. }
  816. if (!strchr(fname, ':') &&
  817. (!strcmp(fname + strlen(fname) - 4, ".f4v") ||
  818. !strcmp(fname + strlen(fname) - 4, ".mp4"))) {
  819. memcpy(rt->playpath, "mp4:", 5);
  820. } else {
  821. rt->playpath[0] = 0;
  822. }
  823. strncat(rt->playpath, fname, PLAYPATH_MAX_LENGTH - 5);
  824. }
  825. if (!rt->tcurl) {
  826. rt->tcurl = av_malloc(TCURL_MAX_LENGTH);
  827. ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname,
  828. port, "/%s", rt->app);
  829. }
  830. if (!rt->flashver) {
  831. rt->flashver = av_malloc(FLASHVER_MAX_LENGTH);
  832. if (rt->is_input) {
  833. snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d",
  834. RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2,
  835. RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
  836. } else {
  837. snprintf(rt->flashver, FLASHVER_MAX_LENGTH,
  838. "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
  839. }
  840. }
  841. rt->client_report_size = 1048576;
  842. rt->bytes_read = 0;
  843. rt->last_bytes_read = 0;
  844. av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
  845. proto, path, rt->app, rt->playpath);
  846. gen_connect(s, rt);
  847. do {
  848. ret = get_packet(s, 1);
  849. } while (ret == EAGAIN);
  850. if (ret < 0)
  851. goto fail;
  852. if (rt->is_input) {
  853. // generate FLV header for demuxer
  854. rt->flv_size = 13;
  855. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  856. rt->flv_off = 0;
  857. memcpy(rt->flv_data, "FLV\1\5\0\0\0\011\0\0\0\0", rt->flv_size);
  858. } else {
  859. rt->flv_size = 0;
  860. rt->flv_data = NULL;
  861. rt->flv_off = 0;
  862. rt->skip_bytes = 13;
  863. }
  864. s->max_packet_size = rt->stream->max_packet_size;
  865. s->is_streamed = 1;
  866. return 0;
  867. fail:
  868. rtmp_close(s);
  869. return AVERROR(EIO);
  870. }
  871. static int rtmp_read(URLContext *s, uint8_t *buf, int size)
  872. {
  873. RTMPContext *rt = s->priv_data;
  874. int orig_size = size;
  875. int ret;
  876. while (size > 0) {
  877. int data_left = rt->flv_size - rt->flv_off;
  878. if (data_left >= size) {
  879. memcpy(buf, rt->flv_data + rt->flv_off, size);
  880. rt->flv_off += size;
  881. return orig_size;
  882. }
  883. if (data_left > 0) {
  884. memcpy(buf, rt->flv_data + rt->flv_off, data_left);
  885. buf += data_left;
  886. size -= data_left;
  887. rt->flv_off = rt->flv_size;
  888. return data_left;
  889. }
  890. if ((ret = get_packet(s, 0)) < 0)
  891. return ret;
  892. }
  893. return orig_size;
  894. }
  895. static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
  896. {
  897. RTMPContext *rt = s->priv_data;
  898. int size_temp = size;
  899. int pktsize, pkttype;
  900. uint32_t ts;
  901. const uint8_t *buf_temp = buf;
  902. do {
  903. if (rt->skip_bytes) {
  904. int skip = FFMIN(rt->skip_bytes, size_temp);
  905. buf_temp += skip;
  906. size_temp -= skip;
  907. rt->skip_bytes -= skip;
  908. continue;
  909. }
  910. if (rt->flv_header_bytes < 11) {
  911. const uint8_t *header = rt->flv_header;
  912. int copy = FFMIN(11 - rt->flv_header_bytes, size_temp);
  913. bytestream_get_buffer(&buf_temp, rt->flv_header + rt->flv_header_bytes, copy);
  914. rt->flv_header_bytes += copy;
  915. size_temp -= copy;
  916. if (rt->flv_header_bytes < 11)
  917. break;
  918. pkttype = bytestream_get_byte(&header);
  919. pktsize = bytestream_get_be24(&header);
  920. ts = bytestream_get_be24(&header);
  921. ts |= bytestream_get_byte(&header) << 24;
  922. bytestream_get_be24(&header);
  923. rt->flv_size = pktsize;
  924. //force 12bytes header
  925. if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
  926. pkttype == RTMP_PT_NOTIFY) {
  927. if (pkttype == RTMP_PT_NOTIFY)
  928. pktsize += 16;
  929. rt->prev_pkt[1][RTMP_SOURCE_CHANNEL].channel_id = 0;
  930. }
  931. //this can be a big packet, it's better to send it right here
  932. ff_rtmp_packet_create(&rt->out_pkt, RTMP_SOURCE_CHANNEL, pkttype, ts, pktsize);
  933. rt->out_pkt.extra = rt->main_channel_id;
  934. rt->flv_data = rt->out_pkt.data;
  935. if (pkttype == RTMP_PT_NOTIFY)
  936. ff_amf_write_string(&rt->flv_data, "@setDataFrame");
  937. }
  938. if (rt->flv_size - rt->flv_off > size_temp) {
  939. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, size_temp);
  940. rt->flv_off += size_temp;
  941. size_temp = 0;
  942. } else {
  943. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, rt->flv_size - rt->flv_off);
  944. size_temp -= rt->flv_size - rt->flv_off;
  945. rt->flv_off += rt->flv_size - rt->flv_off;
  946. }
  947. if (rt->flv_off == rt->flv_size) {
  948. rt->skip_bytes = 4;
  949. ff_rtmp_packet_write(rt->stream, &rt->out_pkt, rt->chunk_size, rt->prev_pkt[1]);
  950. ff_rtmp_packet_destroy(&rt->out_pkt);
  951. rt->flv_size = 0;
  952. rt->flv_off = 0;
  953. rt->flv_header_bytes = 0;
  954. }
  955. } while (buf_temp - buf < size);
  956. return size;
  957. }
  958. #define OFFSET(x) offsetof(RTMPContext, x)
  959. #define DEC AV_OPT_FLAG_DECODING_PARAM
  960. #define ENC AV_OPT_FLAG_ENCODING_PARAM
  961. static const AVOption rtmp_options[] = {
  962. {"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  963. {"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  964. {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
  965. {"any", "both", 0, AV_OPT_TYPE_CONST, {-2}, 0, 0, DEC, "rtmp_live"},
  966. {"live", "live stream", 0, AV_OPT_TYPE_CONST, {-1}, 0, 0, DEC, "rtmp_live"},
  967. {"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {0}, 0, 0, DEC, "rtmp_live"},
  968. {"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  969. {"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  970. {"rtmp_tcurl", "URL of the target stream. Defaults to rtmp://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  971. { NULL },
  972. };
  973. static const AVClass rtmp_class = {
  974. .class_name = "rtmp",
  975. .item_name = av_default_item_name,
  976. .option = rtmp_options,
  977. .version = LIBAVUTIL_VERSION_INT,
  978. };
  979. URLProtocol ff_rtmp_protocol = {
  980. .name = "rtmp",
  981. .url_open = rtmp_open,
  982. .url_read = rtmp_read,
  983. .url_write = rtmp_write,
  984. .url_close = rtmp_close,
  985. .priv_data_size = sizeof(RTMPContext),
  986. .flags = URL_PROTOCOL_FLAG_NETWORK,
  987. .priv_data_class= &rtmp_class,
  988. };