You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1428 lines
47KB

  1. /*
  2. * RTMP network protocol
  3. * Copyright (c) 2009 Kostya Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RTMP protocol
  24. */
  25. #include "libavcodec/bytestream.h"
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/intfloat.h"
  28. #include "libavutil/lfg.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/sha.h"
  31. #include "avformat.h"
  32. #include "internal.h"
  33. #include "network.h"
  34. #include "flv.h"
  35. #include "rtmp.h"
  36. #include "rtmppkt.h"
  37. #include "url.h"
  38. //#define DEBUG
  39. #define APP_MAX_LENGTH 128
  40. #define PLAYPATH_MAX_LENGTH 256
  41. #define TCURL_MAX_LENGTH 512
  42. #define FLASHVER_MAX_LENGTH 64
  43. /** RTMP protocol handler state */
  44. typedef enum {
  45. STATE_START, ///< client has not done anything yet
  46. STATE_HANDSHAKED, ///< client has performed handshake
  47. STATE_RELEASING, ///< client releasing stream before publish it (for output)
  48. STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
  49. STATE_CONNECTING, ///< client connected to server successfully
  50. STATE_READY, ///< client has sent all needed commands and waits for server reply
  51. STATE_PLAYING, ///< client has started receiving multimedia data from server
  52. STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
  53. STATE_STOPPED, ///< the broadcast has been stopped
  54. } ClientState;
  55. /** protocol handler context */
  56. typedef struct RTMPContext {
  57. const AVClass *class;
  58. URLContext* stream; ///< TCP stream used in interactions with RTMP server
  59. RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets
  60. int chunk_size; ///< size of the chunks RTMP packets are divided into
  61. int is_input; ///< input/output flag
  62. char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
  63. int live; ///< 0: recorded, -1: live, -2: both
  64. char *app; ///< name of application
  65. char *conn; ///< append arbitrary AMF data to the Connect message
  66. ClientState state; ///< current state
  67. int main_channel_id; ///< an additional channel ID which is used for some invocations
  68. uint8_t* flv_data; ///< buffer with data for demuxer
  69. int flv_size; ///< current buffer size
  70. int flv_off; ///< number of bytes read from current buffer
  71. RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output)
  72. uint32_t client_report_size; ///< number of bytes after which client should report to server
  73. uint32_t bytes_read; ///< number of bytes read from server
  74. uint32_t last_bytes_read; ///< number of bytes read last reported to server
  75. int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call
  76. uint8_t flv_header[11]; ///< partial incoming flv packet header
  77. int flv_header_bytes; ///< number of initialized bytes in flv_header
  78. int nb_invokes; ///< keeps track of invoke messages
  79. int create_stream_invoke; ///< invoke id for the create stream command
  80. char* tcurl; ///< url of the target stream
  81. char* flashver; ///< version of the flash plugin
  82. char* swfurl; ///< url of the swf player
  83. int server_bw; ///< server bandwidth
  84. int client_buffer_time; ///< client buffer time in ms
  85. } RTMPContext;
  86. #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
  87. /** Client key used for digest signing */
  88. static const uint8_t rtmp_player_key[] = {
  89. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  90. 'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1',
  91. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  92. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  93. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  94. };
  95. #define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing
  96. /** Key used for RTMP server digest signing */
  97. static const uint8_t rtmp_server_key[] = {
  98. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  99. 'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ',
  100. 'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1',
  101. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  102. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  103. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  104. };
  105. static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p)
  106. {
  107. char *field, *value;
  108. char type;
  109. /* The type must be B for Boolean, N for number, S for string, O for
  110. * object, or Z for null. For Booleans the data must be either 0 or 1 for
  111. * FALSE or TRUE, respectively. Likewise for Objects the data must be
  112. * 0 or 1 to end or begin an object, respectively. Data items in subobjects
  113. * may be named, by prefixing the type with 'N' and specifying the name
  114. * before the value (ie. NB:myFlag:1). This option may be used multiple times
  115. * to construct arbitrary AMF sequences. */
  116. if (param[0] && param[1] == ':') {
  117. type = param[0];
  118. value = param + 2;
  119. } else if (param[0] == 'N' && param[1] && param[2] == ':') {
  120. type = param[1];
  121. field = param + 3;
  122. value = strchr(field, ':');
  123. if (!value)
  124. goto fail;
  125. *value = '\0';
  126. value++;
  127. if (!field || !value)
  128. goto fail;
  129. ff_amf_write_field_name(p, field);
  130. } else {
  131. goto fail;
  132. }
  133. switch (type) {
  134. case 'B':
  135. ff_amf_write_bool(p, value[0] != '0');
  136. break;
  137. case 'S':
  138. ff_amf_write_string(p, value);
  139. break;
  140. case 'N':
  141. ff_amf_write_number(p, strtod(value, NULL));
  142. break;
  143. case 'Z':
  144. ff_amf_write_null(p);
  145. break;
  146. case 'O':
  147. if (value[0] != '0')
  148. ff_amf_write_object_start(p);
  149. else
  150. ff_amf_write_object_end(p);
  151. break;
  152. default:
  153. goto fail;
  154. break;
  155. }
  156. return 0;
  157. fail:
  158. av_log(s, AV_LOG_ERROR, "Invalid AMF parameter: %s\n", param);
  159. return AVERROR(EINVAL);
  160. }
  161. /**
  162. * Generate 'connect' call and send it to the server.
  163. */
  164. static int gen_connect(URLContext *s, RTMPContext *rt)
  165. {
  166. RTMPPacket pkt;
  167. uint8_t *p;
  168. int ret;
  169. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  170. 0, 4096)) < 0)
  171. return ret;
  172. p = pkt.data;
  173. ff_amf_write_string(&p, "connect");
  174. ff_amf_write_number(&p, ++rt->nb_invokes);
  175. ff_amf_write_object_start(&p);
  176. ff_amf_write_field_name(&p, "app");
  177. ff_amf_write_string(&p, rt->app);
  178. if (!rt->is_input) {
  179. ff_amf_write_field_name(&p, "type");
  180. ff_amf_write_string(&p, "nonprivate");
  181. }
  182. ff_amf_write_field_name(&p, "flashVer");
  183. ff_amf_write_string(&p, rt->flashver);
  184. if (rt->swfurl) {
  185. ff_amf_write_field_name(&p, "swfUrl");
  186. ff_amf_write_string(&p, rt->swfurl);
  187. }
  188. ff_amf_write_field_name(&p, "tcUrl");
  189. ff_amf_write_string(&p, rt->tcurl);
  190. if (rt->is_input) {
  191. ff_amf_write_field_name(&p, "fpad");
  192. ff_amf_write_bool(&p, 0);
  193. ff_amf_write_field_name(&p, "capabilities");
  194. ff_amf_write_number(&p, 15.0);
  195. /* Tell the server we support all the audio codecs except
  196. * SUPPORT_SND_INTEL (0x0008) and SUPPORT_SND_UNUSED (0x0010)
  197. * which are unused in the RTMP protocol implementation. */
  198. ff_amf_write_field_name(&p, "audioCodecs");
  199. ff_amf_write_number(&p, 4071.0);
  200. ff_amf_write_field_name(&p, "videoCodecs");
  201. ff_amf_write_number(&p, 252.0);
  202. ff_amf_write_field_name(&p, "videoFunction");
  203. ff_amf_write_number(&p, 1.0);
  204. }
  205. ff_amf_write_object_end(&p);
  206. if (rt->conn) {
  207. char *param = rt->conn;
  208. // Write arbitrary AMF data to the Connect message.
  209. while (param != NULL) {
  210. char *sep;
  211. param += strspn(param, " ");
  212. if (!*param)
  213. break;
  214. sep = strchr(param, ' ');
  215. if (sep)
  216. *sep = '\0';
  217. if ((ret = rtmp_write_amf_data(s, param, &p)) < 0) {
  218. // Invalid AMF parameter.
  219. ff_rtmp_packet_destroy(&pkt);
  220. return ret;
  221. }
  222. if (sep)
  223. param = sep + 1;
  224. else
  225. break;
  226. }
  227. }
  228. pkt.data_size = p - pkt.data;
  229. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  230. rt->prev_pkt[1]);
  231. ff_rtmp_packet_destroy(&pkt);
  232. return ret;
  233. }
  234. /**
  235. * Generate 'releaseStream' call and send it to the server. It should make
  236. * the server release some channel for media streams.
  237. */
  238. static int gen_release_stream(URLContext *s, RTMPContext *rt)
  239. {
  240. RTMPPacket pkt;
  241. uint8_t *p;
  242. int ret;
  243. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  244. 0, 29 + strlen(rt->playpath))) < 0)
  245. return ret;
  246. av_log(s, AV_LOG_DEBUG, "Releasing stream...\n");
  247. p = pkt.data;
  248. ff_amf_write_string(&p, "releaseStream");
  249. ff_amf_write_number(&p, ++rt->nb_invokes);
  250. ff_amf_write_null(&p);
  251. ff_amf_write_string(&p, rt->playpath);
  252. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  253. rt->prev_pkt[1]);
  254. ff_rtmp_packet_destroy(&pkt);
  255. return ret;
  256. }
  257. /**
  258. * Generate 'FCPublish' call and send it to the server. It should make
  259. * the server preapare for receiving media streams.
  260. */
  261. static int gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
  262. {
  263. RTMPPacket pkt;
  264. uint8_t *p;
  265. int ret;
  266. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  267. 0, 25 + strlen(rt->playpath))) < 0)
  268. return ret;
  269. av_log(s, AV_LOG_DEBUG, "FCPublish stream...\n");
  270. p = pkt.data;
  271. ff_amf_write_string(&p, "FCPublish");
  272. ff_amf_write_number(&p, ++rt->nb_invokes);
  273. ff_amf_write_null(&p);
  274. ff_amf_write_string(&p, rt->playpath);
  275. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  276. rt->prev_pkt[1]);
  277. ff_rtmp_packet_destroy(&pkt);
  278. return ret;
  279. }
  280. /**
  281. * Generate 'FCUnpublish' call and send it to the server. It should make
  282. * the server destroy stream.
  283. */
  284. static int gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
  285. {
  286. RTMPPacket pkt;
  287. uint8_t *p;
  288. int ret;
  289. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  290. 0, 27 + strlen(rt->playpath))) < 0)
  291. return ret;
  292. av_log(s, AV_LOG_DEBUG, "UnPublishing stream...\n");
  293. p = pkt.data;
  294. ff_amf_write_string(&p, "FCUnpublish");
  295. ff_amf_write_number(&p, ++rt->nb_invokes);
  296. ff_amf_write_null(&p);
  297. ff_amf_write_string(&p, rt->playpath);
  298. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  299. rt->prev_pkt[1]);
  300. ff_rtmp_packet_destroy(&pkt);
  301. return ret;
  302. }
  303. /**
  304. * Generate 'createStream' call and send it to the server. It should make
  305. * the server allocate some channel for media streams.
  306. */
  307. static int gen_create_stream(URLContext *s, RTMPContext *rt)
  308. {
  309. RTMPPacket pkt;
  310. uint8_t *p;
  311. int ret;
  312. av_log(s, AV_LOG_DEBUG, "Creating stream...\n");
  313. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  314. 0, 25)) < 0)
  315. return ret;
  316. p = pkt.data;
  317. ff_amf_write_string(&p, "createStream");
  318. ff_amf_write_number(&p, ++rt->nb_invokes);
  319. ff_amf_write_null(&p);
  320. rt->create_stream_invoke = rt->nb_invokes;
  321. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  322. rt->prev_pkt[1]);
  323. ff_rtmp_packet_destroy(&pkt);
  324. return ret;
  325. }
  326. /**
  327. * Generate 'deleteStream' call and send it to the server. It should make
  328. * the server remove some channel for media streams.
  329. */
  330. static int gen_delete_stream(URLContext *s, RTMPContext *rt)
  331. {
  332. RTMPPacket pkt;
  333. uint8_t *p;
  334. int ret;
  335. av_log(s, AV_LOG_DEBUG, "Deleting stream...\n");
  336. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  337. 0, 34)) < 0)
  338. return ret;
  339. p = pkt.data;
  340. ff_amf_write_string(&p, "deleteStream");
  341. ff_amf_write_number(&p, ++rt->nb_invokes);
  342. ff_amf_write_null(&p);
  343. ff_amf_write_number(&p, rt->main_channel_id);
  344. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  345. rt->prev_pkt[1]);
  346. ff_rtmp_packet_destroy(&pkt);
  347. return ret;
  348. }
  349. /**
  350. * Generate client buffer time and send it to the server.
  351. */
  352. static int gen_buffer_time(URLContext *s, RTMPContext *rt)
  353. {
  354. RTMPPacket pkt;
  355. uint8_t *p;
  356. int ret;
  357. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
  358. 1, 10)) < 0)
  359. return ret;
  360. p = pkt.data;
  361. bytestream_put_be16(&p, 3);
  362. bytestream_put_be32(&p, rt->main_channel_id);
  363. bytestream_put_be32(&p, rt->client_buffer_time);
  364. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  365. rt->prev_pkt[1]);
  366. ff_rtmp_packet_destroy(&pkt);
  367. return ret;
  368. }
  369. /**
  370. * Generate 'play' call and send it to the server, then ping the server
  371. * to start actual playing.
  372. */
  373. static int gen_play(URLContext *s, RTMPContext *rt)
  374. {
  375. RTMPPacket pkt;
  376. uint8_t *p;
  377. int ret;
  378. av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
  379. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE,
  380. 0, 29 + strlen(rt->playpath))) < 0)
  381. return ret;
  382. pkt.extra = rt->main_channel_id;
  383. p = pkt.data;
  384. ff_amf_write_string(&p, "play");
  385. ff_amf_write_number(&p, ++rt->nb_invokes);
  386. ff_amf_write_null(&p);
  387. ff_amf_write_string(&p, rt->playpath);
  388. ff_amf_write_number(&p, rt->live);
  389. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  390. rt->prev_pkt[1]);
  391. ff_rtmp_packet_destroy(&pkt);
  392. return ret;
  393. }
  394. /**
  395. * Generate 'publish' call and send it to the server.
  396. */
  397. static int gen_publish(URLContext *s, RTMPContext *rt)
  398. {
  399. RTMPPacket pkt;
  400. uint8_t *p;
  401. int ret;
  402. av_log(s, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
  403. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
  404. 0, 30 + strlen(rt->playpath))) < 0)
  405. return ret;
  406. pkt.extra = rt->main_channel_id;
  407. p = pkt.data;
  408. ff_amf_write_string(&p, "publish");
  409. ff_amf_write_number(&p, ++rt->nb_invokes);
  410. ff_amf_write_null(&p);
  411. ff_amf_write_string(&p, rt->playpath);
  412. ff_amf_write_string(&p, "live");
  413. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  414. rt->prev_pkt[1]);
  415. ff_rtmp_packet_destroy(&pkt);
  416. return ret;
  417. }
  418. /**
  419. * Generate ping reply and send it to the server.
  420. */
  421. static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
  422. {
  423. RTMPPacket pkt;
  424. uint8_t *p;
  425. int ret;
  426. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
  427. ppkt->timestamp + 1, 6)) < 0)
  428. return ret;
  429. p = pkt.data;
  430. bytestream_put_be16(&p, 7);
  431. bytestream_put_be32(&p, AV_RB32(ppkt->data+2));
  432. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  433. rt->prev_pkt[1]);
  434. ff_rtmp_packet_destroy(&pkt);
  435. return ret;
  436. }
  437. /**
  438. * Generate server bandwidth message and send it to the server.
  439. */
  440. static int gen_server_bw(URLContext *s, RTMPContext *rt)
  441. {
  442. RTMPPacket pkt;
  443. uint8_t *p;
  444. int ret;
  445. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_SERVER_BW,
  446. 0, 4)) < 0)
  447. return ret;
  448. p = pkt.data;
  449. bytestream_put_be32(&p, rt->server_bw);
  450. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  451. rt->prev_pkt[1]);
  452. ff_rtmp_packet_destroy(&pkt);
  453. return ret;
  454. }
  455. /**
  456. * Generate check bandwidth message and send it to the server.
  457. */
  458. static int gen_check_bw(URLContext *s, RTMPContext *rt)
  459. {
  460. RTMPPacket pkt;
  461. uint8_t *p;
  462. int ret;
  463. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  464. 0, 21)) < 0)
  465. return ret;
  466. p = pkt.data;
  467. ff_amf_write_string(&p, "_checkbw");
  468. ff_amf_write_number(&p, ++rt->nb_invokes);
  469. ff_amf_write_null(&p);
  470. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  471. rt->prev_pkt[1]);
  472. ff_rtmp_packet_destroy(&pkt);
  473. return ret;
  474. }
  475. /**
  476. * Generate report on bytes read so far and send it to the server.
  477. */
  478. static int gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
  479. {
  480. RTMPPacket pkt;
  481. uint8_t *p;
  482. int ret;
  483. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ,
  484. ts, 4)) < 0)
  485. return ret;
  486. p = pkt.data;
  487. bytestream_put_be32(&p, rt->bytes_read);
  488. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size,
  489. rt->prev_pkt[1]);
  490. ff_rtmp_packet_destroy(&pkt);
  491. return ret;
  492. }
  493. //TODO: Move HMAC code somewhere. Eventually.
  494. #define HMAC_IPAD_VAL 0x36
  495. #define HMAC_OPAD_VAL 0x5C
  496. /**
  497. * Calculate HMAC-SHA2 digest for RTMP handshake packets.
  498. *
  499. * @param src input buffer
  500. * @param len input buffer length (should be 1536)
  501. * @param gap offset in buffer where 32 bytes should not be taken into account
  502. * when calculating digest (since it will be used to store that digest)
  503. * @param key digest key
  504. * @param keylen digest key length
  505. * @param dst buffer where calculated digest will be stored (32 bytes)
  506. */
  507. static int rtmp_calc_digest(const uint8_t *src, int len, int gap,
  508. const uint8_t *key, int keylen, uint8_t *dst)
  509. {
  510. struct AVSHA *sha;
  511. uint8_t hmac_buf[64+32] = {0};
  512. int i;
  513. sha = av_mallocz(av_sha_size);
  514. if (!sha)
  515. return AVERROR(ENOMEM);
  516. if (keylen < 64) {
  517. memcpy(hmac_buf, key, keylen);
  518. } else {
  519. av_sha_init(sha, 256);
  520. av_sha_update(sha,key, keylen);
  521. av_sha_final(sha, hmac_buf);
  522. }
  523. for (i = 0; i < 64; i++)
  524. hmac_buf[i] ^= HMAC_IPAD_VAL;
  525. av_sha_init(sha, 256);
  526. av_sha_update(sha, hmac_buf, 64);
  527. if (gap <= 0) {
  528. av_sha_update(sha, src, len);
  529. } else { //skip 32 bytes used for storing digest
  530. av_sha_update(sha, src, gap);
  531. av_sha_update(sha, src + gap + 32, len - gap - 32);
  532. }
  533. av_sha_final(sha, hmac_buf + 64);
  534. for (i = 0; i < 64; i++)
  535. hmac_buf[i] ^= HMAC_IPAD_VAL ^ HMAC_OPAD_VAL; //reuse XORed key for opad
  536. av_sha_init(sha, 256);
  537. av_sha_update(sha, hmac_buf, 64+32);
  538. av_sha_final(sha, dst);
  539. av_free(sha);
  540. return 0;
  541. }
  542. /**
  543. * Put HMAC-SHA2 digest of packet data (except for the bytes where this digest
  544. * will be stored) into that packet.
  545. *
  546. * @param buf handshake data (1536 bytes)
  547. * @return offset to the digest inside input data
  548. */
  549. static int rtmp_handshake_imprint_with_digest(uint8_t *buf)
  550. {
  551. int i, digest_pos = 0;
  552. int ret;
  553. for (i = 8; i < 12; i++)
  554. digest_pos += buf[i];
  555. digest_pos = (digest_pos % 728) + 12;
  556. ret = rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  557. rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
  558. buf + digest_pos);
  559. if (ret < 0)
  560. return ret;
  561. return digest_pos;
  562. }
  563. /**
  564. * Verify that the received server response has the expected digest value.
  565. *
  566. * @param buf handshake data received from the server (1536 bytes)
  567. * @param off position to search digest offset from
  568. * @return 0 if digest is valid, digest position otherwise
  569. */
  570. static int rtmp_validate_digest(uint8_t *buf, int off)
  571. {
  572. int i, digest_pos = 0;
  573. uint8_t digest[32];
  574. int ret;
  575. for (i = 0; i < 4; i++)
  576. digest_pos += buf[i + off];
  577. digest_pos = (digest_pos % 728) + off + 4;
  578. ret = rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  579. rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
  580. digest);
  581. if (ret < 0)
  582. return ret;
  583. if (!memcmp(digest, buf + digest_pos, 32))
  584. return digest_pos;
  585. return 0;
  586. }
  587. /**
  588. * Perform handshake with the server by means of exchanging pseudorandom data
  589. * signed with HMAC-SHA2 digest.
  590. *
  591. * @return 0 if handshake succeeds, negative value otherwise
  592. */
  593. static int rtmp_handshake(URLContext *s, RTMPContext *rt)
  594. {
  595. AVLFG rnd;
  596. uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = {
  597. 3, // unencrypted data
  598. 0, 0, 0, 0, // client uptime
  599. RTMP_CLIENT_VER1,
  600. RTMP_CLIENT_VER2,
  601. RTMP_CLIENT_VER3,
  602. RTMP_CLIENT_VER4,
  603. };
  604. uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE];
  605. uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1];
  606. int i;
  607. int server_pos, client_pos;
  608. uint8_t digest[32];
  609. int ret;
  610. av_log(s, AV_LOG_DEBUG, "Handshaking...\n");
  611. av_lfg_init(&rnd, 0xDEADC0DE);
  612. // generate handshake packet - 1536 bytes of pseudorandom data
  613. for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
  614. tosend[i] = av_lfg_get(&rnd) >> 24;
  615. client_pos = rtmp_handshake_imprint_with_digest(tosend + 1);
  616. if (client_pos < 0)
  617. return client_pos;
  618. if ((ret = ffurl_write(rt->stream, tosend,
  619. RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
  620. av_log(s, AV_LOG_ERROR, "Cannot write RTMP handshake request\n");
  621. return ret;
  622. }
  623. if ((ret = ffurl_read_complete(rt->stream, serverdata,
  624. RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
  625. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  626. return ret;
  627. }
  628. if ((ret = ffurl_read_complete(rt->stream, clientdata,
  629. RTMP_HANDSHAKE_PACKET_SIZE)) < 0) {
  630. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  631. return ret;
  632. }
  633. av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
  634. serverdata[5], serverdata[6], serverdata[7], serverdata[8]);
  635. if (rt->is_input && serverdata[5] >= 3) {
  636. server_pos = rtmp_validate_digest(serverdata + 1, 772);
  637. if (server_pos < 0)
  638. return server_pos;
  639. if (!server_pos) {
  640. server_pos = rtmp_validate_digest(serverdata + 1, 8);
  641. if (server_pos < 0)
  642. return server_pos;
  643. if (!server_pos) {
  644. av_log(s, AV_LOG_ERROR, "Server response validating failed\n");
  645. return AVERROR(EIO);
  646. }
  647. }
  648. ret = rtmp_calc_digest(tosend + 1 + client_pos, 32, 0, rtmp_server_key,
  649. sizeof(rtmp_server_key), digest);
  650. if (ret < 0)
  651. return ret;
  652. ret = rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
  653. digest, 32, digest);
  654. if (ret < 0)
  655. return ret;
  656. if (memcmp(digest, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
  657. av_log(s, AV_LOG_ERROR, "Signature mismatch\n");
  658. return AVERROR(EIO);
  659. }
  660. for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
  661. tosend[i] = av_lfg_get(&rnd) >> 24;
  662. ret = rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
  663. rtmp_player_key, sizeof(rtmp_player_key),
  664. digest);
  665. if (ret < 0)
  666. return ret;
  667. ret = rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
  668. digest, 32,
  669. tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
  670. if (ret < 0)
  671. return ret;
  672. // write reply back to the server
  673. if ((ret = ffurl_write(rt->stream, tosend,
  674. RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
  675. return ret;
  676. } else {
  677. if ((ret = ffurl_write(rt->stream, serverdata + 1,
  678. RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
  679. return ret;
  680. }
  681. return 0;
  682. }
  683. /**
  684. * Parse received packet and possibly perform some action depending on
  685. * the packet contents.
  686. * @return 0 for no errors, negative values for serious errors which prevent
  687. * further communications, positive values for uncritical errors
  688. */
  689. static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
  690. {
  691. int i, t;
  692. const uint8_t *data_end = pkt->data + pkt->data_size;
  693. int ret;
  694. #ifdef DEBUG
  695. ff_rtmp_packet_dump(s, pkt);
  696. #endif
  697. switch (pkt->type) {
  698. case RTMP_PT_CHUNK_SIZE:
  699. if (pkt->data_size != 4) {
  700. av_log(s, AV_LOG_ERROR,
  701. "Chunk size change packet is not 4 bytes long (%d)\n", pkt->data_size);
  702. return -1;
  703. }
  704. if (!rt->is_input)
  705. if ((ret = ff_rtmp_packet_write(rt->stream, pkt, rt->chunk_size,
  706. rt->prev_pkt[1])) < 0)
  707. return ret;
  708. rt->chunk_size = AV_RB32(pkt->data);
  709. if (rt->chunk_size <= 0) {
  710. av_log(s, AV_LOG_ERROR, "Incorrect chunk size %d\n", rt->chunk_size);
  711. return -1;
  712. }
  713. av_log(s, AV_LOG_DEBUG, "New chunk size = %d\n", rt->chunk_size);
  714. break;
  715. case RTMP_PT_PING:
  716. t = AV_RB16(pkt->data);
  717. if (t == 6)
  718. if ((ret = gen_pong(s, rt, pkt)) < 0)
  719. return ret;
  720. break;
  721. case RTMP_PT_CLIENT_BW:
  722. if (pkt->data_size < 4) {
  723. av_log(s, AV_LOG_ERROR,
  724. "Client bandwidth report packet is less than 4 bytes long (%d)\n",
  725. pkt->data_size);
  726. return -1;
  727. }
  728. av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", AV_RB32(pkt->data));
  729. rt->client_report_size = AV_RB32(pkt->data) >> 1;
  730. break;
  731. case RTMP_PT_SERVER_BW:
  732. rt->server_bw = AV_RB32(pkt->data);
  733. if (rt->server_bw <= 0) {
  734. av_log(s, AV_LOG_ERROR, "Incorrect server bandwidth %d\n", rt->server_bw);
  735. return AVERROR(EINVAL);
  736. }
  737. av_log(s, AV_LOG_DEBUG, "Server bandwidth = %d\n", rt->server_bw);
  738. break;
  739. case RTMP_PT_INVOKE:
  740. //TODO: check for the messages sent for wrong state?
  741. if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
  742. uint8_t tmpstr[256];
  743. if (!ff_amf_get_field_value(pkt->data + 9, data_end,
  744. "description", tmpstr, sizeof(tmpstr)))
  745. av_log(s, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  746. return -1;
  747. } else if (!memcmp(pkt->data, "\002\000\007_result", 10)) {
  748. switch (rt->state) {
  749. case STATE_HANDSHAKED:
  750. if (!rt->is_input) {
  751. if ((ret = gen_release_stream(s, rt)) < 0)
  752. return ret;
  753. if ((ret = gen_fcpublish_stream(s, rt)) < 0)
  754. return ret;
  755. rt->state = STATE_RELEASING;
  756. } else {
  757. if ((ret = gen_server_bw(s, rt)) < 0)
  758. return ret;
  759. rt->state = STATE_CONNECTING;
  760. }
  761. if ((ret = gen_create_stream(s, rt)) < 0)
  762. return ret;
  763. break;
  764. case STATE_FCPUBLISH:
  765. rt->state = STATE_CONNECTING;
  766. break;
  767. case STATE_RELEASING:
  768. rt->state = STATE_FCPUBLISH;
  769. /* hack for Wowza Media Server, it does not send result for
  770. * releaseStream and FCPublish calls */
  771. if (!pkt->data[10]) {
  772. int pkt_id = av_int2double(AV_RB64(pkt->data + 11));
  773. if (pkt_id == rt->create_stream_invoke)
  774. rt->state = STATE_CONNECTING;
  775. }
  776. if (rt->state != STATE_CONNECTING)
  777. break;
  778. case STATE_CONNECTING:
  779. //extract a number from the result
  780. if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
  781. av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
  782. } else {
  783. rt->main_channel_id = av_int2double(AV_RB64(pkt->data + 21));
  784. }
  785. if (rt->is_input) {
  786. if ((ret = gen_play(s, rt)) < 0)
  787. return ret;
  788. if ((ret = gen_buffer_time(s, rt)) < 0)
  789. return ret;
  790. } else {
  791. if ((ret = gen_publish(s, rt)) < 0)
  792. return ret;
  793. }
  794. rt->state = STATE_READY;
  795. break;
  796. }
  797. } else if (!memcmp(pkt->data, "\002\000\010onStatus", 11)) {
  798. const uint8_t* ptr = pkt->data + 11;
  799. uint8_t tmpstr[256];
  800. for (i = 0; i < 2; i++) {
  801. t = ff_amf_tag_size(ptr, data_end);
  802. if (t < 0)
  803. return 1;
  804. ptr += t;
  805. }
  806. t = ff_amf_get_field_value(ptr, data_end,
  807. "level", tmpstr, sizeof(tmpstr));
  808. if (!t && !strcmp(tmpstr, "error")) {
  809. if (!ff_amf_get_field_value(ptr, data_end,
  810. "description", tmpstr, sizeof(tmpstr)))
  811. av_log(s, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  812. return -1;
  813. }
  814. t = ff_amf_get_field_value(ptr, data_end,
  815. "code", tmpstr, sizeof(tmpstr));
  816. if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING;
  817. if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
  818. if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
  819. if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
  820. } else if (!memcmp(pkt->data, "\002\000\010onBWDone", 11)) {
  821. if ((ret = gen_check_bw(s, rt)) < 0)
  822. return ret;
  823. }
  824. break;
  825. default:
  826. av_log(s, AV_LOG_VERBOSE, "Unknown packet type received 0x%02X\n", pkt->type);
  827. break;
  828. }
  829. return 0;
  830. }
  831. /**
  832. * Interact with the server by receiving and sending RTMP packets until
  833. * there is some significant data (media data or expected status notification).
  834. *
  835. * @param s reading context
  836. * @param for_header non-zero value tells function to work until it
  837. * gets notification from the server that playing has been started,
  838. * otherwise function will work until some media data is received (or
  839. * an error happens)
  840. * @return 0 for successful operation, negative value in case of error
  841. */
  842. static int get_packet(URLContext *s, int for_header)
  843. {
  844. RTMPContext *rt = s->priv_data;
  845. int ret;
  846. uint8_t *p;
  847. const uint8_t *next;
  848. uint32_t data_size;
  849. uint32_t ts, cts, pts=0;
  850. if (rt->state == STATE_STOPPED)
  851. return AVERROR_EOF;
  852. for (;;) {
  853. RTMPPacket rpkt = { 0 };
  854. if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
  855. rt->chunk_size, rt->prev_pkt[0])) <= 0) {
  856. if (ret == 0) {
  857. return AVERROR(EAGAIN);
  858. } else {
  859. return AVERROR(EIO);
  860. }
  861. }
  862. rt->bytes_read += ret;
  863. if (rt->bytes_read - rt->last_bytes_read > rt->client_report_size) {
  864. av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
  865. if ((ret = gen_bytes_read(s, rt, rpkt.timestamp + 1)) < 0)
  866. return ret;
  867. rt->last_bytes_read = rt->bytes_read;
  868. }
  869. ret = rtmp_parse_result(s, rt, &rpkt);
  870. if (ret < 0) {//serious error in current packet
  871. ff_rtmp_packet_destroy(&rpkt);
  872. return ret;
  873. }
  874. if (rt->state == STATE_STOPPED) {
  875. ff_rtmp_packet_destroy(&rpkt);
  876. return AVERROR_EOF;
  877. }
  878. if (for_header && (rt->state == STATE_PLAYING || rt->state == STATE_PUBLISHING)) {
  879. ff_rtmp_packet_destroy(&rpkt);
  880. return 0;
  881. }
  882. if (!rpkt.data_size || !rt->is_input) {
  883. ff_rtmp_packet_destroy(&rpkt);
  884. continue;
  885. }
  886. if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO ||
  887. (rpkt.type == RTMP_PT_NOTIFY && !memcmp("\002\000\012onMetaData", rpkt.data, 13))) {
  888. ts = rpkt.timestamp;
  889. // generate packet header and put data into buffer for FLV demuxer
  890. rt->flv_off = 0;
  891. rt->flv_size = rpkt.data_size + 15;
  892. rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size);
  893. bytestream_put_byte(&p, rpkt.type);
  894. bytestream_put_be24(&p, rpkt.data_size);
  895. bytestream_put_be24(&p, ts);
  896. bytestream_put_byte(&p, ts >> 24);
  897. bytestream_put_be24(&p, 0);
  898. bytestream_put_buffer(&p, rpkt.data, rpkt.data_size);
  899. bytestream_put_be32(&p, 0);
  900. ff_rtmp_packet_destroy(&rpkt);
  901. return 0;
  902. } else if (rpkt.type == RTMP_PT_METADATA) {
  903. // we got raw FLV data, make it available for FLV demuxer
  904. rt->flv_off = 0;
  905. rt->flv_size = rpkt.data_size;
  906. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  907. /* rewrite timestamps */
  908. next = rpkt.data;
  909. ts = rpkt.timestamp;
  910. while (next - rpkt.data < rpkt.data_size - 11) {
  911. next++;
  912. data_size = bytestream_get_be24(&next);
  913. p=next;
  914. cts = bytestream_get_be24(&next);
  915. cts |= bytestream_get_byte(&next) << 24;
  916. if (pts==0)
  917. pts=cts;
  918. ts += cts - pts;
  919. pts = cts;
  920. bytestream_put_be24(&p, ts);
  921. bytestream_put_byte(&p, ts >> 24);
  922. next += data_size + 3 + 4;
  923. }
  924. memcpy(rt->flv_data, rpkt.data, rpkt.data_size);
  925. ff_rtmp_packet_destroy(&rpkt);
  926. return 0;
  927. }
  928. ff_rtmp_packet_destroy(&rpkt);
  929. }
  930. }
  931. static int rtmp_close(URLContext *h)
  932. {
  933. RTMPContext *rt = h->priv_data;
  934. int ret = 0;
  935. if (!rt->is_input) {
  936. rt->flv_data = NULL;
  937. if (rt->out_pkt.data_size)
  938. ff_rtmp_packet_destroy(&rt->out_pkt);
  939. if (rt->state > STATE_FCPUBLISH)
  940. ret = gen_fcunpublish_stream(h, rt);
  941. }
  942. if (rt->state > STATE_HANDSHAKED)
  943. ret = gen_delete_stream(h, rt);
  944. av_freep(&rt->flv_data);
  945. ffurl_close(rt->stream);
  946. return ret;
  947. }
  948. /**
  949. * Open RTMP connection and verify that the stream can be played.
  950. *
  951. * URL syntax: rtmp://server[:port][/app][/playpath]
  952. * where 'app' is first one or two directories in the path
  953. * (e.g. /ondemand/, /flash/live/, etc.)
  954. * and 'playpath' is a file name (the rest of the path,
  955. * may be prefixed with "mp4:")
  956. */
  957. static int rtmp_open(URLContext *s, const char *uri, int flags)
  958. {
  959. RTMPContext *rt = s->priv_data;
  960. char proto[8], hostname[256], path[1024], *fname;
  961. char *old_app;
  962. uint8_t buf[2048];
  963. int port;
  964. int ret;
  965. rt->is_input = !(flags & AVIO_FLAG_WRITE);
  966. av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
  967. path, sizeof(path), s->filename);
  968. if (port < 0)
  969. port = RTMP_DEFAULT_PORT;
  970. ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
  971. if ((ret = ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
  972. &s->interrupt_callback, NULL)) < 0) {
  973. av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf);
  974. goto fail;
  975. }
  976. rt->state = STATE_START;
  977. if ((ret = rtmp_handshake(s, rt)) < 0)
  978. goto fail;
  979. rt->chunk_size = 128;
  980. rt->state = STATE_HANDSHAKED;
  981. // Keep the application name when it has been defined by the user.
  982. old_app = rt->app;
  983. rt->app = av_malloc(APP_MAX_LENGTH);
  984. if (!rt->app) {
  985. ret = AVERROR(ENOMEM);
  986. goto fail;
  987. }
  988. //extract "app" part from path
  989. if (!strncmp(path, "/ondemand/", 10)) {
  990. fname = path + 10;
  991. memcpy(rt->app, "ondemand", 9);
  992. } else {
  993. char *next = *path ? path + 1 : path;
  994. char *p = strchr(next, '/');
  995. if (!p) {
  996. fname = next;
  997. rt->app[0] = '\0';
  998. } else {
  999. // make sure we do not mismatch a playpath for an application instance
  1000. char *c = strchr(p + 1, ':');
  1001. fname = strchr(p + 1, '/');
  1002. if (!fname || (c && c < fname)) {
  1003. fname = p + 1;
  1004. av_strlcpy(rt->app, path + 1, p - path);
  1005. } else {
  1006. fname++;
  1007. av_strlcpy(rt->app, path + 1, fname - path - 1);
  1008. }
  1009. }
  1010. }
  1011. if (old_app) {
  1012. // The name of application has been defined by the user, override it.
  1013. av_free(rt->app);
  1014. rt->app = old_app;
  1015. }
  1016. if (!rt->playpath) {
  1017. int len = strlen(fname);
  1018. rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
  1019. if (!rt->playpath) {
  1020. ret = AVERROR(ENOMEM);
  1021. goto fail;
  1022. }
  1023. if (!strchr(fname, ':') && len >= 4 &&
  1024. (!strcmp(fname + len - 4, ".f4v") ||
  1025. !strcmp(fname + len - 4, ".mp4"))) {
  1026. memcpy(rt->playpath, "mp4:", 5);
  1027. } else if (len >= 4 && !strcmp(fname + len - 4, ".flv")) {
  1028. fname[len - 4] = '\0';
  1029. } else {
  1030. rt->playpath[0] = 0;
  1031. }
  1032. strncat(rt->playpath, fname, PLAYPATH_MAX_LENGTH - 5);
  1033. }
  1034. if (!rt->tcurl) {
  1035. rt->tcurl = av_malloc(TCURL_MAX_LENGTH);
  1036. if (!rt->tcurl) {
  1037. ret = AVERROR(ENOMEM);
  1038. goto fail;
  1039. }
  1040. ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname,
  1041. port, "/%s", rt->app);
  1042. }
  1043. if (!rt->flashver) {
  1044. rt->flashver = av_malloc(FLASHVER_MAX_LENGTH);
  1045. if (!rt->flashver) {
  1046. ret = AVERROR(ENOMEM);
  1047. goto fail;
  1048. }
  1049. if (rt->is_input) {
  1050. snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d",
  1051. RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2,
  1052. RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
  1053. } else {
  1054. snprintf(rt->flashver, FLASHVER_MAX_LENGTH,
  1055. "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
  1056. }
  1057. }
  1058. rt->client_report_size = 1048576;
  1059. rt->bytes_read = 0;
  1060. rt->last_bytes_read = 0;
  1061. rt->server_bw = 2500000;
  1062. av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
  1063. proto, path, rt->app, rt->playpath);
  1064. if ((ret = gen_connect(s, rt)) < 0)
  1065. goto fail;
  1066. do {
  1067. ret = get_packet(s, 1);
  1068. } while (ret == EAGAIN);
  1069. if (ret < 0)
  1070. goto fail;
  1071. if (rt->is_input) {
  1072. // generate FLV header for demuxer
  1073. rt->flv_size = 13;
  1074. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  1075. rt->flv_off = 0;
  1076. memcpy(rt->flv_data, "FLV\1\5\0\0\0\011\0\0\0\0", rt->flv_size);
  1077. } else {
  1078. rt->flv_size = 0;
  1079. rt->flv_data = NULL;
  1080. rt->flv_off = 0;
  1081. rt->skip_bytes = 13;
  1082. }
  1083. s->max_packet_size = rt->stream->max_packet_size;
  1084. s->is_streamed = 1;
  1085. return 0;
  1086. fail:
  1087. rtmp_close(s);
  1088. return ret;
  1089. }
  1090. static int rtmp_read(URLContext *s, uint8_t *buf, int size)
  1091. {
  1092. RTMPContext *rt = s->priv_data;
  1093. int orig_size = size;
  1094. int ret;
  1095. while (size > 0) {
  1096. int data_left = rt->flv_size - rt->flv_off;
  1097. if (data_left >= size) {
  1098. memcpy(buf, rt->flv_data + rt->flv_off, size);
  1099. rt->flv_off += size;
  1100. return orig_size;
  1101. }
  1102. if (data_left > 0) {
  1103. memcpy(buf, rt->flv_data + rt->flv_off, data_left);
  1104. buf += data_left;
  1105. size -= data_left;
  1106. rt->flv_off = rt->flv_size;
  1107. return data_left;
  1108. }
  1109. if ((ret = get_packet(s, 0)) < 0)
  1110. return ret;
  1111. }
  1112. return orig_size;
  1113. }
  1114. static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
  1115. {
  1116. RTMPContext *rt = s->priv_data;
  1117. int size_temp = size;
  1118. int pktsize, pkttype;
  1119. uint32_t ts;
  1120. const uint8_t *buf_temp = buf;
  1121. uint8_t c;
  1122. int ret;
  1123. do {
  1124. if (rt->skip_bytes) {
  1125. int skip = FFMIN(rt->skip_bytes, size_temp);
  1126. buf_temp += skip;
  1127. size_temp -= skip;
  1128. rt->skip_bytes -= skip;
  1129. continue;
  1130. }
  1131. if (rt->flv_header_bytes < 11) {
  1132. const uint8_t *header = rt->flv_header;
  1133. int copy = FFMIN(11 - rt->flv_header_bytes, size_temp);
  1134. bytestream_get_buffer(&buf_temp, rt->flv_header + rt->flv_header_bytes, copy);
  1135. rt->flv_header_bytes += copy;
  1136. size_temp -= copy;
  1137. if (rt->flv_header_bytes < 11)
  1138. break;
  1139. pkttype = bytestream_get_byte(&header);
  1140. pktsize = bytestream_get_be24(&header);
  1141. ts = bytestream_get_be24(&header);
  1142. ts |= bytestream_get_byte(&header) << 24;
  1143. bytestream_get_be24(&header);
  1144. rt->flv_size = pktsize;
  1145. //force 12bytes header
  1146. if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
  1147. pkttype == RTMP_PT_NOTIFY) {
  1148. if (pkttype == RTMP_PT_NOTIFY)
  1149. pktsize += 16;
  1150. rt->prev_pkt[1][RTMP_SOURCE_CHANNEL].channel_id = 0;
  1151. }
  1152. //this can be a big packet, it's better to send it right here
  1153. if ((ret = ff_rtmp_packet_create(&rt->out_pkt, RTMP_SOURCE_CHANNEL,
  1154. pkttype, ts, pktsize)) < 0)
  1155. return ret;
  1156. rt->out_pkt.extra = rt->main_channel_id;
  1157. rt->flv_data = rt->out_pkt.data;
  1158. if (pkttype == RTMP_PT_NOTIFY)
  1159. ff_amf_write_string(&rt->flv_data, "@setDataFrame");
  1160. }
  1161. if (rt->flv_size - rt->flv_off > size_temp) {
  1162. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, size_temp);
  1163. rt->flv_off += size_temp;
  1164. size_temp = 0;
  1165. } else {
  1166. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, rt->flv_size - rt->flv_off);
  1167. size_temp -= rt->flv_size - rt->flv_off;
  1168. rt->flv_off += rt->flv_size - rt->flv_off;
  1169. }
  1170. if (rt->flv_off == rt->flv_size) {
  1171. rt->skip_bytes = 4;
  1172. if ((ret = ff_rtmp_packet_write(rt->stream, &rt->out_pkt,
  1173. rt->chunk_size, rt->prev_pkt[1])) < 0)
  1174. return ret;
  1175. ff_rtmp_packet_destroy(&rt->out_pkt);
  1176. rt->flv_size = 0;
  1177. rt->flv_off = 0;
  1178. rt->flv_header_bytes = 0;
  1179. }
  1180. } while (buf_temp - buf < size);
  1181. /* set stream into nonblocking mode */
  1182. rt->stream->flags |= AVIO_FLAG_NONBLOCK;
  1183. /* try to read one byte from the stream */
  1184. ret = ffurl_read(rt->stream, &c, 1);
  1185. /* switch the stream back into blocking mode */
  1186. rt->stream->flags &= ~AVIO_FLAG_NONBLOCK;
  1187. if (ret == AVERROR(EAGAIN)) {
  1188. /* no incoming data to handle */
  1189. return size;
  1190. } else if (ret < 0) {
  1191. return ret;
  1192. } else if (ret == 1) {
  1193. RTMPPacket rpkt = { 0 };
  1194. if ((ret = ff_rtmp_packet_read_internal(rt->stream, &rpkt,
  1195. rt->chunk_size,
  1196. rt->prev_pkt[0], c)) <= 0)
  1197. return ret;
  1198. if ((ret = rtmp_parse_result(s, rt, &rpkt)) < 0)
  1199. return ret;
  1200. ff_rtmp_packet_destroy(&rpkt);
  1201. }
  1202. return size;
  1203. }
  1204. #define OFFSET(x) offsetof(RTMPContext, x)
  1205. #define DEC AV_OPT_FLAG_DECODING_PARAM
  1206. #define ENC AV_OPT_FLAG_ENCODING_PARAM
  1207. static const AVOption rtmp_options[] = {
  1208. {"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1209. {"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {3000}, 0, INT_MAX, DEC|ENC},
  1210. {"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1211. {"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1212. {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
  1213. {"any", "both", 0, AV_OPT_TYPE_CONST, {-2}, 0, 0, DEC, "rtmp_live"},
  1214. {"live", "live stream", 0, AV_OPT_TYPE_CONST, {-1}, 0, 0, DEC, "rtmp_live"},
  1215. {"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {0}, 0, 0, DEC, "rtmp_live"},
  1216. {"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1217. {"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1218. {"rtmp_tcurl", "URL of the target stream. Defaults to rtmp://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  1219. { NULL },
  1220. };
  1221. static const AVClass rtmp_class = {
  1222. .class_name = "rtmp",
  1223. .item_name = av_default_item_name,
  1224. .option = rtmp_options,
  1225. .version = LIBAVUTIL_VERSION_INT,
  1226. };
  1227. URLProtocol ff_rtmp_protocol = {
  1228. .name = "rtmp",
  1229. .url_open = rtmp_open,
  1230. .url_read = rtmp_read,
  1231. .url_write = rtmp_write,
  1232. .url_close = rtmp_close,
  1233. .priv_data_size = sizeof(RTMPContext),
  1234. .flags = URL_PROTOCOL_FLAG_NETWORK,
  1235. .priv_data_class= &rtmp_class,
  1236. };