You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3180 lines
106KB

  1. /*
  2. * RTMP network protocol
  3. * Copyright (c) 2009 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RTMP protocol
  24. */
  25. #include "libavcodec/bytestream.h"
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/base64.h"
  28. #include "libavutil/hmac.h"
  29. #include "libavutil/intfloat.h"
  30. #include "libavutil/lfg.h"
  31. #include "libavutil/md5.h"
  32. #include "libavutil/opt.h"
  33. #include "libavutil/random_seed.h"
  34. #include "avformat.h"
  35. #include "internal.h"
  36. #include "network.h"
  37. #include "flv.h"
  38. #include "rtmp.h"
  39. #include "rtmpcrypt.h"
  40. #include "rtmppkt.h"
  41. #include "url.h"
  42. #if CONFIG_ZLIB
  43. #include <zlib.h>
  44. #endif
  45. #define APP_MAX_LENGTH 1024
  46. #define PLAYPATH_MAX_LENGTH 512
  47. #define TCURL_MAX_LENGTH 1024
  48. #define FLASHVER_MAX_LENGTH 64
  49. #define RTMP_PKTDATA_DEFAULT_SIZE 4096
  50. #define RTMP_HEADER 11
  51. /** RTMP protocol handler state */
  52. typedef enum {
  53. STATE_START, ///< client has not done anything yet
  54. STATE_HANDSHAKED, ///< client has performed handshake
  55. STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
  56. STATE_PLAYING, ///< client has started receiving multimedia data from server
  57. STATE_SEEKING, ///< client has started the seek operation. Back on STATE_PLAYING when the time comes
  58. STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
  59. STATE_RECEIVING, ///< received a publish command (for input)
  60. STATE_SENDING, ///< received a play command (for output)
  61. STATE_STOPPED, ///< the broadcast has been stopped
  62. } ClientState;
  63. typedef struct TrackedMethod {
  64. char *name;
  65. int id;
  66. } TrackedMethod;
  67. /** protocol handler context */
  68. typedef struct RTMPContext {
  69. const AVClass *class;
  70. URLContext* stream; ///< TCP stream used in interactions with RTMP server
  71. RTMPPacket *prev_pkt[2]; ///< packet history used when reading and sending packets ([0] for reading, [1] for writing)
  72. int nb_prev_pkt[2]; ///< number of elements in prev_pkt
  73. int in_chunk_size; ///< size of the chunks incoming RTMP packets are divided into
  74. int out_chunk_size; ///< size of the chunks outgoing RTMP packets are divided into
  75. int is_input; ///< input/output flag
  76. char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
  77. int live; ///< 0: recorded, -1: live, -2: both
  78. char *app; ///< name of application
  79. char *conn; ///< append arbitrary AMF data to the Connect message
  80. ClientState state; ///< current state
  81. int stream_id; ///< ID assigned by the server for the stream
  82. uint8_t* flv_data; ///< buffer with data for demuxer
  83. int flv_size; ///< current buffer size
  84. int flv_off; ///< number of bytes read from current buffer
  85. int flv_nb_packets; ///< number of flv packets published
  86. RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output)
  87. uint32_t receive_report_size; ///< number of bytes after which we should report the number of received bytes to the peer
  88. uint64_t bytes_read; ///< number of bytes read from server
  89. uint64_t last_bytes_read; ///< number of bytes read last reported to server
  90. uint32_t last_timestamp; ///< last timestamp received in a packet
  91. int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call
  92. int has_audio; ///< presence of audio data
  93. int has_video; ///< presence of video data
  94. int received_metadata; ///< Indicates if we have received metadata about the streams
  95. uint8_t flv_header[RTMP_HEADER]; ///< partial incoming flv packet header
  96. int flv_header_bytes; ///< number of initialized bytes in flv_header
  97. int nb_invokes; ///< keeps track of invoke messages
  98. char* tcurl; ///< url of the target stream
  99. char* flashver; ///< version of the flash plugin
  100. char* swfhash; ///< SHA256 hash of the decompressed SWF file (32 bytes)
  101. int swfhash_len; ///< length of the SHA256 hash
  102. int swfsize; ///< size of the decompressed SWF file
  103. char* swfurl; ///< url of the swf player
  104. char* swfverify; ///< URL to player swf file, compute hash/size automatically
  105. char swfverification[42]; ///< hash of the SWF verification
  106. char* pageurl; ///< url of the web page
  107. char* subscribe; ///< name of live stream to subscribe
  108. int max_sent_unacked; ///< max unacked sent bytes
  109. int client_buffer_time; ///< client buffer time in ms
  110. int flush_interval; ///< number of packets flushed in the same request (RTMPT only)
  111. int encrypted; ///< use an encrypted connection (RTMPE only)
  112. TrackedMethod*tracked_methods; ///< tracked methods buffer
  113. int nb_tracked_methods; ///< number of tracked methods
  114. int tracked_methods_size; ///< size of the tracked methods buffer
  115. int listen; ///< listen mode flag
  116. int listen_timeout; ///< listen timeout to wait for new connections
  117. int nb_streamid; ///< The next stream id to return on createStream calls
  118. double duration; ///< Duration of the stream in seconds as returned by the server (only valid if non-zero)
  119. char username[50];
  120. char password[50];
  121. char auth_params[500];
  122. int do_reconnect;
  123. int auth_tried;
  124. } RTMPContext;
  125. #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
  126. /** Client key used for digest signing */
  127. static const uint8_t rtmp_player_key[] = {
  128. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  129. 'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1',
  130. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  131. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  132. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  133. };
  134. #define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing
  135. /** Key used for RTMP server digest signing */
  136. static const uint8_t rtmp_server_key[] = {
  137. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  138. 'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ',
  139. 'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1',
  140. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  141. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  142. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  143. };
  144. static int handle_chunk_size(URLContext *s, RTMPPacket *pkt);
  145. static int handle_window_ack_size(URLContext *s, RTMPPacket *pkt);
  146. static int handle_set_peer_bw(URLContext *s, RTMPPacket *pkt);
  147. static int add_tracked_method(RTMPContext *rt, const char *name, int id)
  148. {
  149. int err;
  150. if (rt->nb_tracked_methods + 1 > rt->tracked_methods_size) {
  151. rt->tracked_methods_size = (rt->nb_tracked_methods + 1) * 2;
  152. if ((err = av_reallocp(&rt->tracked_methods, rt->tracked_methods_size *
  153. sizeof(*rt->tracked_methods))) < 0) {
  154. rt->nb_tracked_methods = 0;
  155. rt->tracked_methods_size = 0;
  156. return err;
  157. }
  158. }
  159. rt->tracked_methods[rt->nb_tracked_methods].name = av_strdup(name);
  160. if (!rt->tracked_methods[rt->nb_tracked_methods].name)
  161. return AVERROR(ENOMEM);
  162. rt->tracked_methods[rt->nb_tracked_methods].id = id;
  163. rt->nb_tracked_methods++;
  164. return 0;
  165. }
  166. static void del_tracked_method(RTMPContext *rt, int index)
  167. {
  168. memmove(&rt->tracked_methods[index], &rt->tracked_methods[index + 1],
  169. sizeof(*rt->tracked_methods) * (rt->nb_tracked_methods - index - 1));
  170. rt->nb_tracked_methods--;
  171. }
  172. static int find_tracked_method(URLContext *s, RTMPPacket *pkt, int offset,
  173. char **tracked_method)
  174. {
  175. RTMPContext *rt = s->priv_data;
  176. GetByteContext gbc;
  177. double pkt_id;
  178. int ret;
  179. int i;
  180. bytestream2_init(&gbc, pkt->data + offset, pkt->size - offset);
  181. if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0)
  182. return ret;
  183. for (i = 0; i < rt->nb_tracked_methods; i++) {
  184. if (rt->tracked_methods[i].id != pkt_id)
  185. continue;
  186. *tracked_method = rt->tracked_methods[i].name;
  187. del_tracked_method(rt, i);
  188. break;
  189. }
  190. return 0;
  191. }
  192. static void free_tracked_methods(RTMPContext *rt)
  193. {
  194. int i;
  195. for (i = 0; i < rt->nb_tracked_methods; i ++)
  196. av_freep(&rt->tracked_methods[i].name);
  197. av_freep(&rt->tracked_methods);
  198. rt->tracked_methods_size = 0;
  199. rt->nb_tracked_methods = 0;
  200. }
  201. static int rtmp_send_packet(RTMPContext *rt, RTMPPacket *pkt, int track)
  202. {
  203. int ret;
  204. if (pkt->type == RTMP_PT_INVOKE && track) {
  205. GetByteContext gbc;
  206. char name[128];
  207. double pkt_id;
  208. int len;
  209. bytestream2_init(&gbc, pkt->data, pkt->size);
  210. if ((ret = ff_amf_read_string(&gbc, name, sizeof(name), &len)) < 0)
  211. goto fail;
  212. if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0)
  213. goto fail;
  214. if ((ret = add_tracked_method(rt, name, pkt_id)) < 0)
  215. goto fail;
  216. }
  217. ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
  218. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  219. fail:
  220. ff_rtmp_packet_destroy(pkt);
  221. return ret;
  222. }
  223. static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p)
  224. {
  225. char *field, *value;
  226. char type;
  227. /* The type must be B for Boolean, N for number, S for string, O for
  228. * object, or Z for null. For Booleans the data must be either 0 or 1 for
  229. * FALSE or TRUE, respectively. Likewise for Objects the data must be
  230. * 0 or 1 to end or begin an object, respectively. Data items in subobjects
  231. * may be named, by prefixing the type with 'N' and specifying the name
  232. * before the value (ie. NB:myFlag:1). This option may be used multiple times
  233. * to construct arbitrary AMF sequences. */
  234. if (param[0] && param[1] == ':') {
  235. type = param[0];
  236. value = param + 2;
  237. } else if (param[0] == 'N' && param[1] && param[2] == ':') {
  238. type = param[1];
  239. field = param + 3;
  240. value = strchr(field, ':');
  241. if (!value)
  242. goto fail;
  243. *value = '\0';
  244. value++;
  245. ff_amf_write_field_name(p, field);
  246. } else {
  247. goto fail;
  248. }
  249. switch (type) {
  250. case 'B':
  251. ff_amf_write_bool(p, value[0] != '0');
  252. break;
  253. case 'S':
  254. ff_amf_write_string(p, value);
  255. break;
  256. case 'N':
  257. ff_amf_write_number(p, strtod(value, NULL));
  258. break;
  259. case 'Z':
  260. ff_amf_write_null(p);
  261. break;
  262. case 'O':
  263. if (value[0] != '0')
  264. ff_amf_write_object_start(p);
  265. else
  266. ff_amf_write_object_end(p);
  267. break;
  268. default:
  269. goto fail;
  270. break;
  271. }
  272. return 0;
  273. fail:
  274. av_log(s, AV_LOG_ERROR, "Invalid AMF parameter: %s\n", param);
  275. return AVERROR(EINVAL);
  276. }
  277. /**
  278. * Generate 'connect' call and send it to the server.
  279. */
  280. static int gen_connect(URLContext *s, RTMPContext *rt)
  281. {
  282. RTMPPacket pkt;
  283. uint8_t *p;
  284. int ret;
  285. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  286. 0, 4096 + APP_MAX_LENGTH)) < 0)
  287. return ret;
  288. p = pkt.data;
  289. ff_amf_write_string(&p, "connect");
  290. ff_amf_write_number(&p, ++rt->nb_invokes);
  291. ff_amf_write_object_start(&p);
  292. ff_amf_write_field_name(&p, "app");
  293. ff_amf_write_string2(&p, rt->app, rt->auth_params);
  294. if (!rt->is_input) {
  295. ff_amf_write_field_name(&p, "type");
  296. ff_amf_write_string(&p, "nonprivate");
  297. }
  298. ff_amf_write_field_name(&p, "flashVer");
  299. ff_amf_write_string(&p, rt->flashver);
  300. if (rt->swfurl || rt->swfverify) {
  301. ff_amf_write_field_name(&p, "swfUrl");
  302. if (rt->swfurl)
  303. ff_amf_write_string(&p, rt->swfurl);
  304. else
  305. ff_amf_write_string(&p, rt->swfverify);
  306. }
  307. ff_amf_write_field_name(&p, "tcUrl");
  308. ff_amf_write_string2(&p, rt->tcurl, rt->auth_params);
  309. if (rt->is_input) {
  310. ff_amf_write_field_name(&p, "fpad");
  311. ff_amf_write_bool(&p, 0);
  312. ff_amf_write_field_name(&p, "capabilities");
  313. ff_amf_write_number(&p, 15.0);
  314. /* Tell the server we support all the audio codecs except
  315. * SUPPORT_SND_INTEL (0x0008) and SUPPORT_SND_UNUSED (0x0010)
  316. * which are unused in the RTMP protocol implementation. */
  317. ff_amf_write_field_name(&p, "audioCodecs");
  318. ff_amf_write_number(&p, 4071.0);
  319. ff_amf_write_field_name(&p, "videoCodecs");
  320. ff_amf_write_number(&p, 252.0);
  321. ff_amf_write_field_name(&p, "videoFunction");
  322. ff_amf_write_number(&p, 1.0);
  323. if (rt->pageurl) {
  324. ff_amf_write_field_name(&p, "pageUrl");
  325. ff_amf_write_string(&p, rt->pageurl);
  326. }
  327. }
  328. ff_amf_write_object_end(&p);
  329. if (rt->conn) {
  330. char *param = rt->conn;
  331. // Write arbitrary AMF data to the Connect message.
  332. while (param) {
  333. char *sep;
  334. param += strspn(param, " ");
  335. if (!*param)
  336. break;
  337. sep = strchr(param, ' ');
  338. if (sep)
  339. *sep = '\0';
  340. if ((ret = rtmp_write_amf_data(s, param, &p)) < 0) {
  341. // Invalid AMF parameter.
  342. ff_rtmp_packet_destroy(&pkt);
  343. return ret;
  344. }
  345. if (sep)
  346. param = sep + 1;
  347. else
  348. break;
  349. }
  350. }
  351. pkt.size = p - pkt.data;
  352. return rtmp_send_packet(rt, &pkt, 1);
  353. }
  354. #define RTMP_CTRL_ABORT_MESSAGE (2)
  355. static int read_connect(URLContext *s, RTMPContext *rt)
  356. {
  357. RTMPPacket pkt = { 0 };
  358. uint8_t *p;
  359. const uint8_t *cp;
  360. int ret;
  361. char command[64];
  362. int stringlen;
  363. double seqnum;
  364. uint8_t tmpstr[256];
  365. GetByteContext gbc;
  366. // handle RTMP Protocol Control Messages
  367. for (;;) {
  368. if ((ret = ff_rtmp_packet_read(rt->stream, &pkt, rt->in_chunk_size,
  369. &rt->prev_pkt[0], &rt->nb_prev_pkt[0])) < 0)
  370. return ret;
  371. #ifdef DEBUG
  372. ff_rtmp_packet_dump(s, &pkt);
  373. #endif
  374. if (pkt.type == RTMP_PT_CHUNK_SIZE) {
  375. if ((ret = handle_chunk_size(s, &pkt)) < 0) {
  376. ff_rtmp_packet_destroy(&pkt);
  377. return ret;
  378. }
  379. } else if (pkt.type == RTMP_CTRL_ABORT_MESSAGE) {
  380. av_log(s, AV_LOG_ERROR, "received abort message\n");
  381. ff_rtmp_packet_destroy(&pkt);
  382. return AVERROR_UNKNOWN;
  383. } else if (pkt.type == RTMP_PT_BYTES_READ) {
  384. av_log(s, AV_LOG_TRACE, "received acknowledgement\n");
  385. } else if (pkt.type == RTMP_PT_WINDOW_ACK_SIZE) {
  386. if ((ret = handle_window_ack_size(s, &pkt)) < 0) {
  387. ff_rtmp_packet_destroy(&pkt);
  388. return ret;
  389. }
  390. } else if (pkt.type == RTMP_PT_SET_PEER_BW) {
  391. if ((ret = handle_set_peer_bw(s, &pkt)) < 0) {
  392. ff_rtmp_packet_destroy(&pkt);
  393. return ret;
  394. }
  395. } else if (pkt.type == RTMP_PT_INVOKE) {
  396. // received RTMP Command Message
  397. break;
  398. } else {
  399. av_log(s, AV_LOG_ERROR, "Unknown control message type (%d)\n", pkt.type);
  400. }
  401. ff_rtmp_packet_destroy(&pkt);
  402. }
  403. cp = pkt.data;
  404. bytestream2_init(&gbc, cp, pkt.size);
  405. if (ff_amf_read_string(&gbc, command, sizeof(command), &stringlen)) {
  406. av_log(s, AV_LOG_ERROR, "Unable to read command string\n");
  407. ff_rtmp_packet_destroy(&pkt);
  408. return AVERROR_INVALIDDATA;
  409. }
  410. if (strcmp(command, "connect")) {
  411. av_log(s, AV_LOG_ERROR, "Expecting connect, got %s\n", command);
  412. ff_rtmp_packet_destroy(&pkt);
  413. return AVERROR_INVALIDDATA;
  414. }
  415. ret = ff_amf_read_number(&gbc, &seqnum);
  416. if (ret)
  417. av_log(s, AV_LOG_WARNING, "SeqNum not found\n");
  418. /* Here one could parse an AMF Object with data as flashVers and others. */
  419. ret = ff_amf_get_field_value(gbc.buffer,
  420. gbc.buffer + bytestream2_get_bytes_left(&gbc),
  421. "app", tmpstr, sizeof(tmpstr));
  422. if (ret)
  423. av_log(s, AV_LOG_WARNING, "App field not found in connect\n");
  424. if (!ret && strcmp(tmpstr, rt->app))
  425. av_log(s, AV_LOG_WARNING, "App field don't match up: %s <-> %s\n",
  426. tmpstr, rt->app);
  427. ff_rtmp_packet_destroy(&pkt);
  428. // Send Window Acknowledgement Size (as defined in specification)
  429. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
  430. RTMP_PT_WINDOW_ACK_SIZE, 0, 4)) < 0)
  431. return ret;
  432. p = pkt.data;
  433. // Inform the peer about how often we want acknowledgements about what
  434. // we send. (We don't check for the acknowledgements currently.)
  435. bytestream_put_be32(&p, rt->max_sent_unacked);
  436. pkt.size = p - pkt.data;
  437. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  438. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  439. ff_rtmp_packet_destroy(&pkt);
  440. if (ret < 0)
  441. return ret;
  442. // Set Peer Bandwidth
  443. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
  444. RTMP_PT_SET_PEER_BW, 0, 5)) < 0)
  445. return ret;
  446. p = pkt.data;
  447. // Tell the peer to only send this many bytes unless it gets acknowledgements.
  448. // This could be any arbitrary value we want here.
  449. bytestream_put_be32(&p, rt->max_sent_unacked);
  450. bytestream_put_byte(&p, 2); // dynamic
  451. pkt.size = p - pkt.data;
  452. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  453. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  454. ff_rtmp_packet_destroy(&pkt);
  455. if (ret < 0)
  456. return ret;
  457. // User control
  458. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
  459. RTMP_PT_USER_CONTROL, 0, 6)) < 0)
  460. return ret;
  461. p = pkt.data;
  462. bytestream_put_be16(&p, 0); // 0 -> Stream Begin
  463. bytestream_put_be32(&p, 0); // Stream 0
  464. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  465. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  466. ff_rtmp_packet_destroy(&pkt);
  467. if (ret < 0)
  468. return ret;
  469. // Chunk size
  470. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
  471. RTMP_PT_CHUNK_SIZE, 0, 4)) < 0)
  472. return ret;
  473. p = pkt.data;
  474. bytestream_put_be32(&p, rt->out_chunk_size);
  475. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  476. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  477. ff_rtmp_packet_destroy(&pkt);
  478. if (ret < 0)
  479. return ret;
  480. // Send _result NetConnection.Connect.Success to connect
  481. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
  482. RTMP_PT_INVOKE, 0,
  483. RTMP_PKTDATA_DEFAULT_SIZE)) < 0)
  484. return ret;
  485. p = pkt.data;
  486. ff_amf_write_string(&p, "_result");
  487. ff_amf_write_number(&p, seqnum);
  488. ff_amf_write_object_start(&p);
  489. ff_amf_write_field_name(&p, "fmsVer");
  490. ff_amf_write_string(&p, "FMS/3,0,1,123");
  491. ff_amf_write_field_name(&p, "capabilities");
  492. ff_amf_write_number(&p, 31);
  493. ff_amf_write_object_end(&p);
  494. ff_amf_write_object_start(&p);
  495. ff_amf_write_field_name(&p, "level");
  496. ff_amf_write_string(&p, "status");
  497. ff_amf_write_field_name(&p, "code");
  498. ff_amf_write_string(&p, "NetConnection.Connect.Success");
  499. ff_amf_write_field_name(&p, "description");
  500. ff_amf_write_string(&p, "Connection succeeded.");
  501. ff_amf_write_field_name(&p, "objectEncoding");
  502. ff_amf_write_number(&p, 0);
  503. ff_amf_write_object_end(&p);
  504. pkt.size = p - pkt.data;
  505. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  506. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  507. ff_rtmp_packet_destroy(&pkt);
  508. if (ret < 0)
  509. return ret;
  510. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
  511. RTMP_PT_INVOKE, 0, 30)) < 0)
  512. return ret;
  513. p = pkt.data;
  514. ff_amf_write_string(&p, "onBWDone");
  515. ff_amf_write_number(&p, 0);
  516. ff_amf_write_null(&p);
  517. ff_amf_write_number(&p, 8192);
  518. pkt.size = p - pkt.data;
  519. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  520. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  521. ff_rtmp_packet_destroy(&pkt);
  522. return ret;
  523. }
  524. /**
  525. * Generate 'releaseStream' call and send it to the server. It should make
  526. * the server release some channel for media streams.
  527. */
  528. static int gen_release_stream(URLContext *s, RTMPContext *rt)
  529. {
  530. RTMPPacket pkt;
  531. uint8_t *p;
  532. int ret;
  533. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  534. 0, 29 + strlen(rt->playpath))) < 0)
  535. return ret;
  536. av_log(s, AV_LOG_DEBUG, "Releasing stream...\n");
  537. p = pkt.data;
  538. ff_amf_write_string(&p, "releaseStream");
  539. ff_amf_write_number(&p, ++rt->nb_invokes);
  540. ff_amf_write_null(&p);
  541. ff_amf_write_string(&p, rt->playpath);
  542. return rtmp_send_packet(rt, &pkt, 1);
  543. }
  544. /**
  545. * Generate 'FCPublish' call and send it to the server. It should make
  546. * the server prepare for receiving media streams.
  547. */
  548. static int gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
  549. {
  550. RTMPPacket pkt;
  551. uint8_t *p;
  552. int ret;
  553. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  554. 0, 25 + strlen(rt->playpath))) < 0)
  555. return ret;
  556. av_log(s, AV_LOG_DEBUG, "FCPublish stream...\n");
  557. p = pkt.data;
  558. ff_amf_write_string(&p, "FCPublish");
  559. ff_amf_write_number(&p, ++rt->nb_invokes);
  560. ff_amf_write_null(&p);
  561. ff_amf_write_string(&p, rt->playpath);
  562. return rtmp_send_packet(rt, &pkt, 1);
  563. }
  564. /**
  565. * Generate 'FCUnpublish' call and send it to the server. It should make
  566. * the server destroy stream.
  567. */
  568. static int gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
  569. {
  570. RTMPPacket pkt;
  571. uint8_t *p;
  572. int ret;
  573. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  574. 0, 27 + strlen(rt->playpath))) < 0)
  575. return ret;
  576. av_log(s, AV_LOG_DEBUG, "UnPublishing stream...\n");
  577. p = pkt.data;
  578. ff_amf_write_string(&p, "FCUnpublish");
  579. ff_amf_write_number(&p, ++rt->nb_invokes);
  580. ff_amf_write_null(&p);
  581. ff_amf_write_string(&p, rt->playpath);
  582. return rtmp_send_packet(rt, &pkt, 0);
  583. }
  584. /**
  585. * Generate 'createStream' call and send it to the server. It should make
  586. * the server allocate some channel for media streams.
  587. */
  588. static int gen_create_stream(URLContext *s, RTMPContext *rt)
  589. {
  590. RTMPPacket pkt;
  591. uint8_t *p;
  592. int ret;
  593. av_log(s, AV_LOG_DEBUG, "Creating stream...\n");
  594. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  595. 0, 25)) < 0)
  596. return ret;
  597. p = pkt.data;
  598. ff_amf_write_string(&p, "createStream");
  599. ff_amf_write_number(&p, ++rt->nb_invokes);
  600. ff_amf_write_null(&p);
  601. return rtmp_send_packet(rt, &pkt, 1);
  602. }
  603. /**
  604. * Generate 'deleteStream' call and send it to the server. It should make
  605. * the server remove some channel for media streams.
  606. */
  607. static int gen_delete_stream(URLContext *s, RTMPContext *rt)
  608. {
  609. RTMPPacket pkt;
  610. uint8_t *p;
  611. int ret;
  612. av_log(s, AV_LOG_DEBUG, "Deleting stream...\n");
  613. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  614. 0, 34)) < 0)
  615. return ret;
  616. p = pkt.data;
  617. ff_amf_write_string(&p, "deleteStream");
  618. ff_amf_write_number(&p, ++rt->nb_invokes);
  619. ff_amf_write_null(&p);
  620. ff_amf_write_number(&p, rt->stream_id);
  621. return rtmp_send_packet(rt, &pkt, 0);
  622. }
  623. /**
  624. * Generate 'getStreamLength' call and send it to the server. If the server
  625. * knows the duration of the selected stream, it will reply with the duration
  626. * in seconds.
  627. */
  628. static int gen_get_stream_length(URLContext *s, RTMPContext *rt)
  629. {
  630. RTMPPacket pkt;
  631. uint8_t *p;
  632. int ret;
  633. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
  634. 0, 31 + strlen(rt->playpath))) < 0)
  635. return ret;
  636. p = pkt.data;
  637. ff_amf_write_string(&p, "getStreamLength");
  638. ff_amf_write_number(&p, ++rt->nb_invokes);
  639. ff_amf_write_null(&p);
  640. ff_amf_write_string(&p, rt->playpath);
  641. return rtmp_send_packet(rt, &pkt, 1);
  642. }
  643. /**
  644. * Generate client buffer time and send it to the server.
  645. */
  646. static int gen_buffer_time(URLContext *s, RTMPContext *rt)
  647. {
  648. RTMPPacket pkt;
  649. uint8_t *p;
  650. int ret;
  651. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_USER_CONTROL,
  652. 1, 10)) < 0)
  653. return ret;
  654. p = pkt.data;
  655. bytestream_put_be16(&p, 3); // SetBuffer Length
  656. bytestream_put_be32(&p, rt->stream_id);
  657. bytestream_put_be32(&p, rt->client_buffer_time);
  658. return rtmp_send_packet(rt, &pkt, 0);
  659. }
  660. /**
  661. * Generate 'play' call and send it to the server, then ping the server
  662. * to start actual playing.
  663. */
  664. static int gen_play(URLContext *s, RTMPContext *rt)
  665. {
  666. RTMPPacket pkt;
  667. uint8_t *p;
  668. int ret;
  669. av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
  670. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
  671. 0, 29 + strlen(rt->playpath))) < 0)
  672. return ret;
  673. pkt.extra = rt->stream_id;
  674. p = pkt.data;
  675. ff_amf_write_string(&p, "play");
  676. ff_amf_write_number(&p, ++rt->nb_invokes);
  677. ff_amf_write_null(&p);
  678. ff_amf_write_string(&p, rt->playpath);
  679. ff_amf_write_number(&p, rt->live * 1000);
  680. return rtmp_send_packet(rt, &pkt, 1);
  681. }
  682. static int gen_seek(URLContext *s, RTMPContext *rt, int64_t timestamp)
  683. {
  684. RTMPPacket pkt;
  685. uint8_t *p;
  686. int ret;
  687. av_log(s, AV_LOG_DEBUG, "Sending seek command for timestamp %"PRId64"\n",
  688. timestamp);
  689. if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 26)) < 0)
  690. return ret;
  691. pkt.extra = rt->stream_id;
  692. p = pkt.data;
  693. ff_amf_write_string(&p, "seek");
  694. ff_amf_write_number(&p, 0); //no tracking back responses
  695. ff_amf_write_null(&p); //as usual, the first null param
  696. ff_amf_write_number(&p, timestamp); //where we want to jump
  697. return rtmp_send_packet(rt, &pkt, 1);
  698. }
  699. /**
  700. * Generate a pause packet that either pauses or unpauses the current stream.
  701. */
  702. static int gen_pause(URLContext *s, RTMPContext *rt, int pause, uint32_t timestamp)
  703. {
  704. RTMPPacket pkt;
  705. uint8_t *p;
  706. int ret;
  707. av_log(s, AV_LOG_DEBUG, "Sending pause command for timestamp %d\n",
  708. timestamp);
  709. if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 29)) < 0)
  710. return ret;
  711. pkt.extra = rt->stream_id;
  712. p = pkt.data;
  713. ff_amf_write_string(&p, "pause");
  714. ff_amf_write_number(&p, 0); //no tracking back responses
  715. ff_amf_write_null(&p); //as usual, the first null param
  716. ff_amf_write_bool(&p, pause); // pause or unpause
  717. ff_amf_write_number(&p, timestamp); //where we pause the stream
  718. return rtmp_send_packet(rt, &pkt, 1);
  719. }
  720. /**
  721. * Generate 'publish' call and send it to the server.
  722. */
  723. static int gen_publish(URLContext *s, RTMPContext *rt)
  724. {
  725. RTMPPacket pkt;
  726. uint8_t *p;
  727. int ret;
  728. av_log(s, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
  729. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
  730. 0, 30 + strlen(rt->playpath))) < 0)
  731. return ret;
  732. pkt.extra = rt->stream_id;
  733. p = pkt.data;
  734. ff_amf_write_string(&p, "publish");
  735. ff_amf_write_number(&p, ++rt->nb_invokes);
  736. ff_amf_write_null(&p);
  737. ff_amf_write_string(&p, rt->playpath);
  738. ff_amf_write_string(&p, "live");
  739. return rtmp_send_packet(rt, &pkt, 1);
  740. }
  741. /**
  742. * Generate ping reply and send it to the server.
  743. */
  744. static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
  745. {
  746. RTMPPacket pkt;
  747. uint8_t *p;
  748. int ret;
  749. if (ppkt->size < 6) {
  750. av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
  751. ppkt->size);
  752. return AVERROR_INVALIDDATA;
  753. }
  754. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,RTMP_PT_USER_CONTROL,
  755. ppkt->timestamp + 1, 6)) < 0)
  756. return ret;
  757. p = pkt.data;
  758. bytestream_put_be16(&p, 7); // PingResponse
  759. bytestream_put_be32(&p, AV_RB32(ppkt->data+2));
  760. return rtmp_send_packet(rt, &pkt, 0);
  761. }
  762. /**
  763. * Generate SWF verification message and send it to the server.
  764. */
  765. static int gen_swf_verification(URLContext *s, RTMPContext *rt)
  766. {
  767. RTMPPacket pkt;
  768. uint8_t *p;
  769. int ret;
  770. av_log(s, AV_LOG_DEBUG, "Sending SWF verification...\n");
  771. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_USER_CONTROL,
  772. 0, 44)) < 0)
  773. return ret;
  774. p = pkt.data;
  775. bytestream_put_be16(&p, 27);
  776. memcpy(p, rt->swfverification, 42);
  777. return rtmp_send_packet(rt, &pkt, 0);
  778. }
  779. /**
  780. * Generate window acknowledgement size message and send it to the server.
  781. */
  782. static int gen_window_ack_size(URLContext *s, RTMPContext *rt)
  783. {
  784. RTMPPacket pkt;
  785. uint8_t *p;
  786. int ret;
  787. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_WINDOW_ACK_SIZE,
  788. 0, 4)) < 0)
  789. return ret;
  790. p = pkt.data;
  791. bytestream_put_be32(&p, rt->max_sent_unacked);
  792. return rtmp_send_packet(rt, &pkt, 0);
  793. }
  794. /**
  795. * Generate check bandwidth message and send it to the server.
  796. */
  797. static int gen_check_bw(URLContext *s, RTMPContext *rt)
  798. {
  799. RTMPPacket pkt;
  800. uint8_t *p;
  801. int ret;
  802. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  803. 0, 21)) < 0)
  804. return ret;
  805. p = pkt.data;
  806. ff_amf_write_string(&p, "_checkbw");
  807. ff_amf_write_number(&p, ++rt->nb_invokes);
  808. ff_amf_write_null(&p);
  809. return rtmp_send_packet(rt, &pkt, 1);
  810. }
  811. /**
  812. * Generate report on bytes read so far and send it to the server.
  813. */
  814. static int gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
  815. {
  816. RTMPPacket pkt;
  817. uint8_t *p;
  818. int ret;
  819. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ,
  820. ts, 4)) < 0)
  821. return ret;
  822. p = pkt.data;
  823. bytestream_put_be32(&p, rt->bytes_read);
  824. return rtmp_send_packet(rt, &pkt, 0);
  825. }
  826. static int gen_fcsubscribe_stream(URLContext *s, RTMPContext *rt,
  827. const char *subscribe)
  828. {
  829. RTMPPacket pkt;
  830. uint8_t *p;
  831. int ret;
  832. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  833. 0, 27 + strlen(subscribe))) < 0)
  834. return ret;
  835. p = pkt.data;
  836. ff_amf_write_string(&p, "FCSubscribe");
  837. ff_amf_write_number(&p, ++rt->nb_invokes);
  838. ff_amf_write_null(&p);
  839. ff_amf_write_string(&p, subscribe);
  840. return rtmp_send_packet(rt, &pkt, 1);
  841. }
  842. int ff_rtmp_calc_digest(const uint8_t *src, int len, int gap,
  843. const uint8_t *key, int keylen, uint8_t *dst)
  844. {
  845. AVHMAC *hmac;
  846. hmac = av_hmac_alloc(AV_HMAC_SHA256);
  847. if (!hmac)
  848. return AVERROR(ENOMEM);
  849. av_hmac_init(hmac, key, keylen);
  850. if (gap <= 0) {
  851. av_hmac_update(hmac, src, len);
  852. } else { //skip 32 bytes used for storing digest
  853. av_hmac_update(hmac, src, gap);
  854. av_hmac_update(hmac, src + gap + 32, len - gap - 32);
  855. }
  856. av_hmac_final(hmac, dst, 32);
  857. av_hmac_free(hmac);
  858. return 0;
  859. }
  860. int ff_rtmp_calc_digest_pos(const uint8_t *buf, int off, int mod_val,
  861. int add_val)
  862. {
  863. int i, digest_pos = 0;
  864. for (i = 0; i < 4; i++)
  865. digest_pos += buf[i + off];
  866. digest_pos = digest_pos % mod_val + add_val;
  867. return digest_pos;
  868. }
  869. /**
  870. * Put HMAC-SHA2 digest of packet data (except for the bytes where this digest
  871. * will be stored) into that packet.
  872. *
  873. * @param buf handshake data (1536 bytes)
  874. * @param encrypted use an encrypted connection (RTMPE)
  875. * @return offset to the digest inside input data
  876. */
  877. static int rtmp_handshake_imprint_with_digest(uint8_t *buf, int encrypted)
  878. {
  879. int ret, digest_pos;
  880. if (encrypted)
  881. digest_pos = ff_rtmp_calc_digest_pos(buf, 772, 728, 776);
  882. else
  883. digest_pos = ff_rtmp_calc_digest_pos(buf, 8, 728, 12);
  884. ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  885. rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
  886. buf + digest_pos);
  887. if (ret < 0)
  888. return ret;
  889. return digest_pos;
  890. }
  891. /**
  892. * Verify that the received server response has the expected digest value.
  893. *
  894. * @param buf handshake data received from the server (1536 bytes)
  895. * @param off position to search digest offset from
  896. * @return 0 if digest is valid, digest position otherwise
  897. */
  898. static int rtmp_validate_digest(uint8_t *buf, int off)
  899. {
  900. uint8_t digest[32];
  901. int ret, digest_pos;
  902. digest_pos = ff_rtmp_calc_digest_pos(buf, off, 728, off + 4);
  903. ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  904. rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
  905. digest);
  906. if (ret < 0)
  907. return ret;
  908. if (!memcmp(digest, buf + digest_pos, 32))
  909. return digest_pos;
  910. return 0;
  911. }
  912. static int rtmp_calc_swf_verification(URLContext *s, RTMPContext *rt,
  913. uint8_t *buf)
  914. {
  915. uint8_t *p;
  916. int ret;
  917. if (rt->swfhash_len != 32) {
  918. av_log(s, AV_LOG_ERROR,
  919. "Hash of the decompressed SWF file is not 32 bytes long.\n");
  920. return AVERROR(EINVAL);
  921. }
  922. p = &rt->swfverification[0];
  923. bytestream_put_byte(&p, 1);
  924. bytestream_put_byte(&p, 1);
  925. bytestream_put_be32(&p, rt->swfsize);
  926. bytestream_put_be32(&p, rt->swfsize);
  927. if ((ret = ff_rtmp_calc_digest(rt->swfhash, 32, 0, buf, 32, p)) < 0)
  928. return ret;
  929. return 0;
  930. }
  931. #if CONFIG_ZLIB
  932. static int rtmp_uncompress_swfplayer(uint8_t *in_data, int64_t in_size,
  933. uint8_t **out_data, int64_t *out_size)
  934. {
  935. z_stream zs = { 0 };
  936. void *ptr;
  937. int size;
  938. int ret = 0;
  939. zs.avail_in = in_size;
  940. zs.next_in = in_data;
  941. ret = inflateInit(&zs);
  942. if (ret != Z_OK)
  943. return AVERROR_UNKNOWN;
  944. do {
  945. uint8_t tmp_buf[16384];
  946. zs.avail_out = sizeof(tmp_buf);
  947. zs.next_out = tmp_buf;
  948. ret = inflate(&zs, Z_NO_FLUSH);
  949. if (ret != Z_OK && ret != Z_STREAM_END) {
  950. ret = AVERROR_UNKNOWN;
  951. goto fail;
  952. }
  953. size = sizeof(tmp_buf) - zs.avail_out;
  954. if (!(ptr = av_realloc(*out_data, *out_size + size))) {
  955. ret = AVERROR(ENOMEM);
  956. goto fail;
  957. }
  958. *out_data = ptr;
  959. memcpy(*out_data + *out_size, tmp_buf, size);
  960. *out_size += size;
  961. } while (zs.avail_out == 0);
  962. fail:
  963. inflateEnd(&zs);
  964. return ret;
  965. }
  966. #endif
  967. static int rtmp_calc_swfhash(URLContext *s)
  968. {
  969. RTMPContext *rt = s->priv_data;
  970. uint8_t *in_data = NULL, *out_data = NULL, *swfdata;
  971. int64_t in_size;
  972. URLContext *stream;
  973. char swfhash[32];
  974. int swfsize;
  975. int ret = 0;
  976. /* Get the SWF player file. */
  977. if ((ret = ffurl_open_whitelist(&stream, rt->swfverify, AVIO_FLAG_READ,
  978. &s->interrupt_callback, NULL,
  979. s->protocol_whitelist, s->protocol_blacklist, s)) < 0) {
  980. av_log(s, AV_LOG_ERROR, "Cannot open connection %s.\n", rt->swfverify);
  981. goto fail;
  982. }
  983. if ((in_size = ffurl_seek(stream, 0, AVSEEK_SIZE)) < 0) {
  984. ret = AVERROR(EIO);
  985. goto fail;
  986. }
  987. if (!(in_data = av_malloc(in_size))) {
  988. ret = AVERROR(ENOMEM);
  989. goto fail;
  990. }
  991. if ((ret = ffurl_read_complete(stream, in_data, in_size)) < 0)
  992. goto fail;
  993. if (in_size < 3) {
  994. ret = AVERROR_INVALIDDATA;
  995. goto fail;
  996. }
  997. if (!memcmp(in_data, "CWS", 3)) {
  998. #if CONFIG_ZLIB
  999. int64_t out_size;
  1000. /* Decompress the SWF player file using Zlib. */
  1001. if (!(out_data = av_malloc(8))) {
  1002. ret = AVERROR(ENOMEM);
  1003. goto fail;
  1004. }
  1005. *in_data = 'F'; // magic stuff
  1006. memcpy(out_data, in_data, 8);
  1007. out_size = 8;
  1008. if ((ret = rtmp_uncompress_swfplayer(in_data + 8, in_size - 8,
  1009. &out_data, &out_size)) < 0)
  1010. goto fail;
  1011. swfsize = out_size;
  1012. swfdata = out_data;
  1013. #else
  1014. av_log(s, AV_LOG_ERROR,
  1015. "Zlib is required for decompressing the SWF player file.\n");
  1016. ret = AVERROR(EINVAL);
  1017. goto fail;
  1018. #endif
  1019. } else {
  1020. swfsize = in_size;
  1021. swfdata = in_data;
  1022. }
  1023. /* Compute the SHA256 hash of the SWF player file. */
  1024. if ((ret = ff_rtmp_calc_digest(swfdata, swfsize, 0,
  1025. "Genuine Adobe Flash Player 001", 30,
  1026. swfhash)) < 0)
  1027. goto fail;
  1028. /* Set SWFVerification parameters. */
  1029. av_opt_set_bin(rt, "rtmp_swfhash", swfhash, 32, 0);
  1030. rt->swfsize = swfsize;
  1031. fail:
  1032. av_freep(&in_data);
  1033. av_freep(&out_data);
  1034. ffurl_close(stream);
  1035. return ret;
  1036. }
  1037. /**
  1038. * Perform handshake with the server by means of exchanging pseudorandom data
  1039. * signed with HMAC-SHA2 digest.
  1040. *
  1041. * @return 0 if handshake succeeds, negative value otherwise
  1042. */
  1043. static int rtmp_handshake(URLContext *s, RTMPContext *rt)
  1044. {
  1045. AVLFG rnd;
  1046. uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = {
  1047. 3, // unencrypted data
  1048. 0, 0, 0, 0, // client uptime
  1049. RTMP_CLIENT_VER1,
  1050. RTMP_CLIENT_VER2,
  1051. RTMP_CLIENT_VER3,
  1052. RTMP_CLIENT_VER4,
  1053. };
  1054. uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE];
  1055. uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1];
  1056. int i;
  1057. int server_pos, client_pos;
  1058. uint8_t digest[32], signature[32];
  1059. int ret, type = 0;
  1060. av_log(s, AV_LOG_DEBUG, "Handshaking...\n");
  1061. av_lfg_init(&rnd, 0xDEADC0DE);
  1062. // generate handshake packet - 1536 bytes of pseudorandom data
  1063. for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
  1064. tosend[i] = av_lfg_get(&rnd) >> 24;
  1065. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1066. /* When the client wants to use RTMPE, we have to change the command
  1067. * byte to 0x06 which means to use encrypted data and we have to set
  1068. * the flash version to at least 9.0.115.0. */
  1069. tosend[0] = 6;
  1070. tosend[5] = 128;
  1071. tosend[6] = 0;
  1072. tosend[7] = 3;
  1073. tosend[8] = 2;
  1074. /* Initialize the Diffie-Hellmann context and generate the public key
  1075. * to send to the server. */
  1076. if ((ret = ff_rtmpe_gen_pub_key(rt->stream, tosend + 1)) < 0)
  1077. return ret;
  1078. }
  1079. client_pos = rtmp_handshake_imprint_with_digest(tosend + 1, rt->encrypted);
  1080. if (client_pos < 0)
  1081. return client_pos;
  1082. if ((ret = ffurl_write(rt->stream, tosend,
  1083. RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
  1084. av_log(s, AV_LOG_ERROR, "Cannot write RTMP handshake request\n");
  1085. return ret;
  1086. }
  1087. if ((ret = ffurl_read_complete(rt->stream, serverdata,
  1088. RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
  1089. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  1090. return ret;
  1091. }
  1092. if ((ret = ffurl_read_complete(rt->stream, clientdata,
  1093. RTMP_HANDSHAKE_PACKET_SIZE)) < 0) {
  1094. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  1095. return ret;
  1096. }
  1097. av_log(s, AV_LOG_DEBUG, "Type answer %d\n", serverdata[0]);
  1098. av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
  1099. serverdata[5], serverdata[6], serverdata[7], serverdata[8]);
  1100. if (rt->is_input && serverdata[5] >= 3) {
  1101. server_pos = rtmp_validate_digest(serverdata + 1, 772);
  1102. if (server_pos < 0)
  1103. return server_pos;
  1104. if (!server_pos) {
  1105. type = 1;
  1106. server_pos = rtmp_validate_digest(serverdata + 1, 8);
  1107. if (server_pos < 0)
  1108. return server_pos;
  1109. if (!server_pos) {
  1110. av_log(s, AV_LOG_ERROR, "Server response validating failed\n");
  1111. return AVERROR(EIO);
  1112. }
  1113. }
  1114. /* Generate SWFVerification token (SHA256 HMAC hash of decompressed SWF,
  1115. * key are the last 32 bytes of the server handshake. */
  1116. if (rt->swfsize) {
  1117. if ((ret = rtmp_calc_swf_verification(s, rt, serverdata + 1 +
  1118. RTMP_HANDSHAKE_PACKET_SIZE - 32)) < 0)
  1119. return ret;
  1120. }
  1121. ret = ff_rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
  1122. rtmp_server_key, sizeof(rtmp_server_key),
  1123. digest);
  1124. if (ret < 0)
  1125. return ret;
  1126. ret = ff_rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE - 32,
  1127. 0, digest, 32, signature);
  1128. if (ret < 0)
  1129. return ret;
  1130. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1131. /* Compute the shared secret key sent by the server and initialize
  1132. * the RC4 encryption. */
  1133. if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
  1134. tosend + 1, type)) < 0)
  1135. return ret;
  1136. /* Encrypt the signature received by the server. */
  1137. ff_rtmpe_encrypt_sig(rt->stream, signature, digest, serverdata[0]);
  1138. }
  1139. if (memcmp(signature, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
  1140. av_log(s, AV_LOG_ERROR, "Signature mismatch\n");
  1141. return AVERROR(EIO);
  1142. }
  1143. for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
  1144. tosend[i] = av_lfg_get(&rnd) >> 24;
  1145. ret = ff_rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
  1146. rtmp_player_key, sizeof(rtmp_player_key),
  1147. digest);
  1148. if (ret < 0)
  1149. return ret;
  1150. ret = ff_rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
  1151. digest, 32,
  1152. tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
  1153. if (ret < 0)
  1154. return ret;
  1155. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1156. /* Encrypt the signature to be send to the server. */
  1157. ff_rtmpe_encrypt_sig(rt->stream, tosend +
  1158. RTMP_HANDSHAKE_PACKET_SIZE - 32, digest,
  1159. serverdata[0]);
  1160. }
  1161. // write reply back to the server
  1162. if ((ret = ffurl_write(rt->stream, tosend,
  1163. RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
  1164. return ret;
  1165. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1166. /* Set RC4 keys for encryption and update the keystreams. */
  1167. if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
  1168. return ret;
  1169. }
  1170. } else {
  1171. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1172. /* Compute the shared secret key sent by the server and initialize
  1173. * the RC4 encryption. */
  1174. if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
  1175. tosend + 1, 1)) < 0)
  1176. return ret;
  1177. if (serverdata[0] == 9) {
  1178. /* Encrypt the signature received by the server. */
  1179. ff_rtmpe_encrypt_sig(rt->stream, signature, digest,
  1180. serverdata[0]);
  1181. }
  1182. }
  1183. if ((ret = ffurl_write(rt->stream, serverdata + 1,
  1184. RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
  1185. return ret;
  1186. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1187. /* Set RC4 keys for encryption and update the keystreams. */
  1188. if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
  1189. return ret;
  1190. }
  1191. }
  1192. return 0;
  1193. }
  1194. static int rtmp_receive_hs_packet(RTMPContext* rt, uint32_t *first_int,
  1195. uint32_t *second_int, char *arraydata,
  1196. int size)
  1197. {
  1198. int inoutsize;
  1199. inoutsize = ffurl_read_complete(rt->stream, arraydata,
  1200. RTMP_HANDSHAKE_PACKET_SIZE);
  1201. if (inoutsize <= 0)
  1202. return AVERROR(EIO);
  1203. if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) {
  1204. av_log(rt, AV_LOG_ERROR, "Erroneous Message size %d"
  1205. " not following standard\n", (int)inoutsize);
  1206. return AVERROR(EINVAL);
  1207. }
  1208. *first_int = AV_RB32(arraydata);
  1209. *second_int = AV_RB32(arraydata + 4);
  1210. return 0;
  1211. }
  1212. static int rtmp_send_hs_packet(RTMPContext* rt, uint32_t first_int,
  1213. uint32_t second_int, char *arraydata, int size)
  1214. {
  1215. int inoutsize;
  1216. AV_WB32(arraydata, first_int);
  1217. AV_WB32(arraydata + 4, second_int);
  1218. inoutsize = ffurl_write(rt->stream, arraydata,
  1219. RTMP_HANDSHAKE_PACKET_SIZE);
  1220. if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) {
  1221. av_log(rt, AV_LOG_ERROR, "Unable to write answer\n");
  1222. return AVERROR(EIO);
  1223. }
  1224. return 0;
  1225. }
  1226. /**
  1227. * rtmp handshake server side
  1228. */
  1229. static int rtmp_server_handshake(URLContext *s, RTMPContext *rt)
  1230. {
  1231. uint8_t buffer[RTMP_HANDSHAKE_PACKET_SIZE];
  1232. uint32_t hs_epoch;
  1233. uint32_t hs_my_epoch;
  1234. uint8_t hs_c1[RTMP_HANDSHAKE_PACKET_SIZE];
  1235. uint8_t hs_s1[RTMP_HANDSHAKE_PACKET_SIZE];
  1236. uint32_t zeroes;
  1237. uint32_t temp = 0;
  1238. int randomidx = 0;
  1239. int inoutsize = 0;
  1240. int ret;
  1241. inoutsize = ffurl_read_complete(rt->stream, buffer, 1); // Receive C0
  1242. if (inoutsize <= 0) {
  1243. av_log(s, AV_LOG_ERROR, "Unable to read handshake\n");
  1244. return AVERROR(EIO);
  1245. }
  1246. // Check Version
  1247. if (buffer[0] != 3) {
  1248. av_log(s, AV_LOG_ERROR, "RTMP protocol version mismatch\n");
  1249. return AVERROR(EIO);
  1250. }
  1251. if (ffurl_write(rt->stream, buffer, 1) <= 0) { // Send S0
  1252. av_log(s, AV_LOG_ERROR,
  1253. "Unable to write answer - RTMP S0\n");
  1254. return AVERROR(EIO);
  1255. }
  1256. /* Receive C1 */
  1257. ret = rtmp_receive_hs_packet(rt, &hs_epoch, &zeroes, hs_c1,
  1258. RTMP_HANDSHAKE_PACKET_SIZE);
  1259. if (ret) {
  1260. av_log(s, AV_LOG_ERROR, "RTMP Handshake C1 Error\n");
  1261. return ret;
  1262. }
  1263. /* Send S1 */
  1264. /* By now same epoch will be sent */
  1265. hs_my_epoch = hs_epoch;
  1266. /* Generate random */
  1267. for (randomidx = 8; randomidx < (RTMP_HANDSHAKE_PACKET_SIZE);
  1268. randomidx += 4)
  1269. AV_WB32(hs_s1 + randomidx, av_get_random_seed());
  1270. ret = rtmp_send_hs_packet(rt, hs_my_epoch, 0, hs_s1,
  1271. RTMP_HANDSHAKE_PACKET_SIZE);
  1272. if (ret) {
  1273. av_log(s, AV_LOG_ERROR, "RTMP Handshake S1 Error\n");
  1274. return ret;
  1275. }
  1276. /* Send S2 */
  1277. ret = rtmp_send_hs_packet(rt, hs_epoch, 0, hs_c1,
  1278. RTMP_HANDSHAKE_PACKET_SIZE);
  1279. if (ret) {
  1280. av_log(s, AV_LOG_ERROR, "RTMP Handshake S2 Error\n");
  1281. return ret;
  1282. }
  1283. /* Receive C2 */
  1284. ret = rtmp_receive_hs_packet(rt, &temp, &zeroes, buffer,
  1285. RTMP_HANDSHAKE_PACKET_SIZE);
  1286. if (ret) {
  1287. av_log(s, AV_LOG_ERROR, "RTMP Handshake C2 Error\n");
  1288. return ret;
  1289. }
  1290. if (temp != hs_my_epoch)
  1291. av_log(s, AV_LOG_WARNING,
  1292. "Erroneous C2 Message epoch does not match up with C1 epoch\n");
  1293. if (memcmp(buffer + 8, hs_s1 + 8,
  1294. RTMP_HANDSHAKE_PACKET_SIZE - 8))
  1295. av_log(s, AV_LOG_WARNING,
  1296. "Erroneous C2 Message random does not match up\n");
  1297. return 0;
  1298. }
  1299. static int handle_chunk_size(URLContext *s, RTMPPacket *pkt)
  1300. {
  1301. RTMPContext *rt = s->priv_data;
  1302. int ret;
  1303. if (pkt->size < 4) {
  1304. av_log(s, AV_LOG_ERROR,
  1305. "Too short chunk size change packet (%d)\n",
  1306. pkt->size);
  1307. return AVERROR_INVALIDDATA;
  1308. }
  1309. if (!rt->is_input) {
  1310. /* Send the same chunk size change packet back to the server,
  1311. * setting the outgoing chunk size to the same as the incoming one. */
  1312. if ((ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
  1313. &rt->prev_pkt[1], &rt->nb_prev_pkt[1])) < 0)
  1314. return ret;
  1315. rt->out_chunk_size = AV_RB32(pkt->data);
  1316. }
  1317. rt->in_chunk_size = AV_RB32(pkt->data);
  1318. if (rt->in_chunk_size <= 0) {
  1319. av_log(s, AV_LOG_ERROR, "Incorrect chunk size %d\n",
  1320. rt->in_chunk_size);
  1321. return AVERROR_INVALIDDATA;
  1322. }
  1323. av_log(s, AV_LOG_DEBUG, "New incoming chunk size = %d\n",
  1324. rt->in_chunk_size);
  1325. return 0;
  1326. }
  1327. static int handle_user_control(URLContext *s, RTMPPacket *pkt)
  1328. {
  1329. RTMPContext *rt = s->priv_data;
  1330. int t, ret;
  1331. if (pkt->size < 2) {
  1332. av_log(s, AV_LOG_ERROR, "Too short user control packet (%d)\n",
  1333. pkt->size);
  1334. return AVERROR_INVALIDDATA;
  1335. }
  1336. t = AV_RB16(pkt->data);
  1337. if (t == 6) { // PingRequest
  1338. if ((ret = gen_pong(s, rt, pkt)) < 0)
  1339. return ret;
  1340. } else if (t == 26) {
  1341. if (rt->swfsize) {
  1342. if ((ret = gen_swf_verification(s, rt)) < 0)
  1343. return ret;
  1344. } else {
  1345. av_log(s, AV_LOG_WARNING, "Ignoring SWFVerification request.\n");
  1346. }
  1347. }
  1348. return 0;
  1349. }
  1350. static int handle_set_peer_bw(URLContext *s, RTMPPacket *pkt)
  1351. {
  1352. RTMPContext *rt = s->priv_data;
  1353. if (pkt->size < 4) {
  1354. av_log(s, AV_LOG_ERROR,
  1355. "Peer bandwidth packet is less than 4 bytes long (%d)\n",
  1356. pkt->size);
  1357. return AVERROR_INVALIDDATA;
  1358. }
  1359. // We currently don't check how much the peer has acknowledged of
  1360. // what we have sent. To do that properly, we should call
  1361. // gen_window_ack_size here, to tell the peer that we want an
  1362. // acknowledgement with (at least) that interval.
  1363. rt->max_sent_unacked = AV_RB32(pkt->data);
  1364. if (rt->max_sent_unacked <= 0) {
  1365. av_log(s, AV_LOG_ERROR, "Incorrect set peer bandwidth %d\n",
  1366. rt->max_sent_unacked);
  1367. return AVERROR_INVALIDDATA;
  1368. }
  1369. av_log(s, AV_LOG_DEBUG, "Max sent, unacked = %d\n", rt->max_sent_unacked);
  1370. return 0;
  1371. }
  1372. static int handle_window_ack_size(URLContext *s, RTMPPacket *pkt)
  1373. {
  1374. RTMPContext *rt = s->priv_data;
  1375. if (pkt->size < 4) {
  1376. av_log(s, AV_LOG_ERROR,
  1377. "Too short window acknowledgement size packet (%d)\n",
  1378. pkt->size);
  1379. return AVERROR_INVALIDDATA;
  1380. }
  1381. rt->receive_report_size = AV_RB32(pkt->data);
  1382. if (rt->receive_report_size <= 0) {
  1383. av_log(s, AV_LOG_ERROR, "Incorrect window acknowledgement size %d\n",
  1384. rt->receive_report_size);
  1385. return AVERROR_INVALIDDATA;
  1386. }
  1387. av_log(s, AV_LOG_DEBUG, "Window acknowledgement size = %d\n", rt->receive_report_size);
  1388. // Send an Acknowledgement packet after receiving half the maximum
  1389. // size, to make sure the peer can keep on sending without waiting
  1390. // for acknowledgements.
  1391. rt->receive_report_size >>= 1;
  1392. return 0;
  1393. }
  1394. static int do_adobe_auth(RTMPContext *rt, const char *user, const char *salt,
  1395. const char *opaque, const char *challenge)
  1396. {
  1397. uint8_t hash[16];
  1398. char hashstr[AV_BASE64_SIZE(sizeof(hash))], challenge2[10];
  1399. struct AVMD5 *md5 = av_md5_alloc();
  1400. if (!md5)
  1401. return AVERROR(ENOMEM);
  1402. snprintf(challenge2, sizeof(challenge2), "%08x", av_get_random_seed());
  1403. av_md5_init(md5);
  1404. av_md5_update(md5, user, strlen(user));
  1405. av_md5_update(md5, salt, strlen(salt));
  1406. av_md5_update(md5, rt->password, strlen(rt->password));
  1407. av_md5_final(md5, hash);
  1408. av_base64_encode(hashstr, sizeof(hashstr), hash,
  1409. sizeof(hash));
  1410. av_md5_init(md5);
  1411. av_md5_update(md5, hashstr, strlen(hashstr));
  1412. if (opaque)
  1413. av_md5_update(md5, opaque, strlen(opaque));
  1414. else if (challenge)
  1415. av_md5_update(md5, challenge, strlen(challenge));
  1416. av_md5_update(md5, challenge2, strlen(challenge2));
  1417. av_md5_final(md5, hash);
  1418. av_base64_encode(hashstr, sizeof(hashstr), hash,
  1419. sizeof(hash));
  1420. snprintf(rt->auth_params, sizeof(rt->auth_params),
  1421. "?authmod=%s&user=%s&challenge=%s&response=%s",
  1422. "adobe", user, challenge2, hashstr);
  1423. if (opaque)
  1424. av_strlcatf(rt->auth_params, sizeof(rt->auth_params),
  1425. "&opaque=%s", opaque);
  1426. av_free(md5);
  1427. return 0;
  1428. }
  1429. static int do_llnw_auth(RTMPContext *rt, const char *user, const char *nonce)
  1430. {
  1431. uint8_t hash[16];
  1432. char hashstr1[33], hashstr2[33];
  1433. const char *realm = "live";
  1434. const char *method = "publish";
  1435. const char *qop = "auth";
  1436. const char *nc = "00000001";
  1437. char cnonce[10];
  1438. struct AVMD5 *md5 = av_md5_alloc();
  1439. if (!md5)
  1440. return AVERROR(ENOMEM);
  1441. snprintf(cnonce, sizeof(cnonce), "%08x", av_get_random_seed());
  1442. av_md5_init(md5);
  1443. av_md5_update(md5, user, strlen(user));
  1444. av_md5_update(md5, ":", 1);
  1445. av_md5_update(md5, realm, strlen(realm));
  1446. av_md5_update(md5, ":", 1);
  1447. av_md5_update(md5, rt->password, strlen(rt->password));
  1448. av_md5_final(md5, hash);
  1449. ff_data_to_hex(hashstr1, hash, 16, 1);
  1450. hashstr1[32] = '\0';
  1451. av_md5_init(md5);
  1452. av_md5_update(md5, method, strlen(method));
  1453. av_md5_update(md5, ":/", 2);
  1454. av_md5_update(md5, rt->app, strlen(rt->app));
  1455. if (!strchr(rt->app, '/'))
  1456. av_md5_update(md5, "/_definst_", strlen("/_definst_"));
  1457. av_md5_final(md5, hash);
  1458. ff_data_to_hex(hashstr2, hash, 16, 1);
  1459. hashstr2[32] = '\0';
  1460. av_md5_init(md5);
  1461. av_md5_update(md5, hashstr1, strlen(hashstr1));
  1462. av_md5_update(md5, ":", 1);
  1463. if (nonce)
  1464. av_md5_update(md5, nonce, strlen(nonce));
  1465. av_md5_update(md5, ":", 1);
  1466. av_md5_update(md5, nc, strlen(nc));
  1467. av_md5_update(md5, ":", 1);
  1468. av_md5_update(md5, cnonce, strlen(cnonce));
  1469. av_md5_update(md5, ":", 1);
  1470. av_md5_update(md5, qop, strlen(qop));
  1471. av_md5_update(md5, ":", 1);
  1472. av_md5_update(md5, hashstr2, strlen(hashstr2));
  1473. av_md5_final(md5, hash);
  1474. ff_data_to_hex(hashstr1, hash, 16, 1);
  1475. snprintf(rt->auth_params, sizeof(rt->auth_params),
  1476. "?authmod=%s&user=%s&nonce=%s&cnonce=%s&nc=%s&response=%s",
  1477. "llnw", user, nonce, cnonce, nc, hashstr1);
  1478. av_free(md5);
  1479. return 0;
  1480. }
  1481. static int handle_connect_error(URLContext *s, const char *desc)
  1482. {
  1483. RTMPContext *rt = s->priv_data;
  1484. char buf[300], *ptr, authmod[15];
  1485. int i = 0, ret = 0;
  1486. const char *user = "", *salt = "", *opaque = NULL,
  1487. *challenge = NULL, *cptr = NULL, *nonce = NULL;
  1488. if (!(cptr = strstr(desc, "authmod=adobe")) &&
  1489. !(cptr = strstr(desc, "authmod=llnw"))) {
  1490. av_log(s, AV_LOG_ERROR,
  1491. "Unknown connect error (unsupported authentication method?)\n");
  1492. return AVERROR_UNKNOWN;
  1493. }
  1494. cptr += strlen("authmod=");
  1495. while (*cptr && *cptr != ' ' && i < sizeof(authmod) - 1)
  1496. authmod[i++] = *cptr++;
  1497. authmod[i] = '\0';
  1498. if (!rt->username[0] || !rt->password[0]) {
  1499. av_log(s, AV_LOG_ERROR, "No credentials set\n");
  1500. return AVERROR_UNKNOWN;
  1501. }
  1502. if (strstr(desc, "?reason=authfailed")) {
  1503. av_log(s, AV_LOG_ERROR, "Incorrect username/password\n");
  1504. return AVERROR_UNKNOWN;
  1505. } else if (strstr(desc, "?reason=nosuchuser")) {
  1506. av_log(s, AV_LOG_ERROR, "Incorrect username\n");
  1507. return AVERROR_UNKNOWN;
  1508. }
  1509. if (rt->auth_tried) {
  1510. av_log(s, AV_LOG_ERROR, "Authentication failed\n");
  1511. return AVERROR_UNKNOWN;
  1512. }
  1513. rt->auth_params[0] = '\0';
  1514. if (strstr(desc, "code=403 need auth")) {
  1515. snprintf(rt->auth_params, sizeof(rt->auth_params),
  1516. "?authmod=%s&user=%s", authmod, rt->username);
  1517. return 0;
  1518. }
  1519. if (!(cptr = strstr(desc, "?reason=needauth"))) {
  1520. av_log(s, AV_LOG_ERROR, "No auth parameters found\n");
  1521. return AVERROR_UNKNOWN;
  1522. }
  1523. av_strlcpy(buf, cptr + 1, sizeof(buf));
  1524. ptr = buf;
  1525. while (ptr) {
  1526. char *next = strchr(ptr, '&');
  1527. char *value = strchr(ptr, '=');
  1528. if (next)
  1529. *next++ = '\0';
  1530. if (value) {
  1531. *value++ = '\0';
  1532. if (!strcmp(ptr, "user")) {
  1533. user = value;
  1534. } else if (!strcmp(ptr, "salt")) {
  1535. salt = value;
  1536. } else if (!strcmp(ptr, "opaque")) {
  1537. opaque = value;
  1538. } else if (!strcmp(ptr, "challenge")) {
  1539. challenge = value;
  1540. } else if (!strcmp(ptr, "nonce")) {
  1541. nonce = value;
  1542. } else {
  1543. av_log(s, AV_LOG_INFO, "Ignoring unsupported var %s\n", ptr);
  1544. }
  1545. } else {
  1546. av_log(s, AV_LOG_WARNING, "Variable %s has NULL value\n", ptr);
  1547. }
  1548. ptr = next;
  1549. }
  1550. if (!strcmp(authmod, "adobe")) {
  1551. if ((ret = do_adobe_auth(rt, user, salt, opaque, challenge)) < 0)
  1552. return ret;
  1553. } else {
  1554. if ((ret = do_llnw_auth(rt, user, nonce)) < 0)
  1555. return ret;
  1556. }
  1557. rt->auth_tried = 1;
  1558. return 0;
  1559. }
  1560. static int handle_invoke_error(URLContext *s, RTMPPacket *pkt)
  1561. {
  1562. RTMPContext *rt = s->priv_data;
  1563. const uint8_t *data_end = pkt->data + pkt->size;
  1564. char *tracked_method = NULL;
  1565. int level = AV_LOG_ERROR;
  1566. uint8_t tmpstr[256];
  1567. int ret;
  1568. if ((ret = find_tracked_method(s, pkt, 9, &tracked_method)) < 0)
  1569. return ret;
  1570. if (!ff_amf_get_field_value(pkt->data + 9, data_end,
  1571. "description", tmpstr, sizeof(tmpstr))) {
  1572. if (tracked_method && (!strcmp(tracked_method, "_checkbw") ||
  1573. !strcmp(tracked_method, "releaseStream") ||
  1574. !strcmp(tracked_method, "FCSubscribe") ||
  1575. !strcmp(tracked_method, "FCPublish"))) {
  1576. /* Gracefully ignore Adobe-specific historical artifact errors. */
  1577. level = AV_LOG_WARNING;
  1578. ret = 0;
  1579. } else if (tracked_method && !strcmp(tracked_method, "getStreamLength")) {
  1580. level = rt->live ? AV_LOG_DEBUG : AV_LOG_WARNING;
  1581. ret = 0;
  1582. } else if (tracked_method && !strcmp(tracked_method, "connect")) {
  1583. ret = handle_connect_error(s, tmpstr);
  1584. if (!ret) {
  1585. rt->do_reconnect = 1;
  1586. level = AV_LOG_VERBOSE;
  1587. }
  1588. } else
  1589. ret = AVERROR_UNKNOWN;
  1590. av_log(s, level, "Server error: %s\n", tmpstr);
  1591. }
  1592. av_free(tracked_method);
  1593. return ret;
  1594. }
  1595. static int write_begin(URLContext *s)
  1596. {
  1597. RTMPContext *rt = s->priv_data;
  1598. PutByteContext pbc;
  1599. RTMPPacket spkt = { 0 };
  1600. int ret;
  1601. // Send Stream Begin 1
  1602. if ((ret = ff_rtmp_packet_create(&spkt, RTMP_NETWORK_CHANNEL,
  1603. RTMP_PT_USER_CONTROL, 0, 6)) < 0) {
  1604. av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
  1605. return ret;
  1606. }
  1607. bytestream2_init_writer(&pbc, spkt.data, spkt.size);
  1608. bytestream2_put_be16(&pbc, 0); // 0 -> Stream Begin
  1609. bytestream2_put_be32(&pbc, rt->nb_streamid);
  1610. ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
  1611. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  1612. ff_rtmp_packet_destroy(&spkt);
  1613. return ret;
  1614. }
  1615. static int write_status(URLContext *s, RTMPPacket *pkt,
  1616. const char *status, const char *filename)
  1617. {
  1618. RTMPContext *rt = s->priv_data;
  1619. RTMPPacket spkt = { 0 };
  1620. char statusmsg[128];
  1621. uint8_t *pp;
  1622. int ret;
  1623. if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
  1624. RTMP_PT_INVOKE, 0,
  1625. RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
  1626. av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
  1627. return ret;
  1628. }
  1629. pp = spkt.data;
  1630. spkt.extra = pkt->extra;
  1631. ff_amf_write_string(&pp, "onStatus");
  1632. ff_amf_write_number(&pp, 0);
  1633. ff_amf_write_null(&pp);
  1634. ff_amf_write_object_start(&pp);
  1635. ff_amf_write_field_name(&pp, "level");
  1636. ff_amf_write_string(&pp, "status");
  1637. ff_amf_write_field_name(&pp, "code");
  1638. ff_amf_write_string(&pp, status);
  1639. ff_amf_write_field_name(&pp, "description");
  1640. snprintf(statusmsg, sizeof(statusmsg),
  1641. "%s is now published", filename);
  1642. ff_amf_write_string(&pp, statusmsg);
  1643. ff_amf_write_field_name(&pp, "details");
  1644. ff_amf_write_string(&pp, filename);
  1645. ff_amf_write_object_end(&pp);
  1646. spkt.size = pp - spkt.data;
  1647. ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
  1648. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  1649. ff_rtmp_packet_destroy(&spkt);
  1650. return ret;
  1651. }
  1652. static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
  1653. {
  1654. RTMPContext *rt = s->priv_data;
  1655. double seqnum;
  1656. char filename[128];
  1657. char command[64];
  1658. int stringlen;
  1659. char *pchar;
  1660. const uint8_t *p = pkt->data;
  1661. uint8_t *pp = NULL;
  1662. RTMPPacket spkt = { 0 };
  1663. GetByteContext gbc;
  1664. int ret;
  1665. bytestream2_init(&gbc, p, pkt->size);
  1666. if (ff_amf_read_string(&gbc, command, sizeof(command),
  1667. &stringlen)) {
  1668. av_log(s, AV_LOG_ERROR, "Error in PT_INVOKE\n");
  1669. return AVERROR_INVALIDDATA;
  1670. }
  1671. ret = ff_amf_read_number(&gbc, &seqnum);
  1672. if (ret)
  1673. return ret;
  1674. ret = ff_amf_read_null(&gbc);
  1675. if (ret)
  1676. return ret;
  1677. if (!strcmp(command, "FCPublish") ||
  1678. !strcmp(command, "publish")) {
  1679. ret = ff_amf_read_string(&gbc, filename,
  1680. sizeof(filename), &stringlen);
  1681. if (ret) {
  1682. if (ret == AVERROR(EINVAL))
  1683. av_log(s, AV_LOG_ERROR, "Unable to parse stream name - name too long?\n");
  1684. else
  1685. av_log(s, AV_LOG_ERROR, "Unable to parse stream name\n");
  1686. return ret;
  1687. }
  1688. // check with url
  1689. if (s->filename) {
  1690. pchar = strrchr(s->filename, '/');
  1691. if (!pchar) {
  1692. av_log(s, AV_LOG_WARNING,
  1693. "Unable to find / in url %s, bad format\n",
  1694. s->filename);
  1695. pchar = s->filename;
  1696. }
  1697. pchar++;
  1698. if (strcmp(pchar, filename))
  1699. av_log(s, AV_LOG_WARNING, "Unexpected stream %s, expecting"
  1700. " %s\n", filename, pchar);
  1701. }
  1702. rt->state = STATE_RECEIVING;
  1703. }
  1704. if (!strcmp(command, "FCPublish")) {
  1705. if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
  1706. RTMP_PT_INVOKE, 0,
  1707. RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
  1708. av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
  1709. return ret;
  1710. }
  1711. pp = spkt.data;
  1712. ff_amf_write_string(&pp, "onFCPublish");
  1713. } else if (!strcmp(command, "publish")) {
  1714. ret = write_begin(s);
  1715. if (ret < 0)
  1716. return ret;
  1717. // Send onStatus(NetStream.Publish.Start)
  1718. return write_status(s, pkt, "NetStream.Publish.Start",
  1719. filename);
  1720. } else if (!strcmp(command, "play")) {
  1721. ret = write_begin(s);
  1722. if (ret < 0)
  1723. return ret;
  1724. rt->state = STATE_SENDING;
  1725. return write_status(s, pkt, "NetStream.Play.Start",
  1726. filename);
  1727. } else {
  1728. if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
  1729. RTMP_PT_INVOKE, 0,
  1730. RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
  1731. av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
  1732. return ret;
  1733. }
  1734. pp = spkt.data;
  1735. ff_amf_write_string(&pp, "_result");
  1736. ff_amf_write_number(&pp, seqnum);
  1737. ff_amf_write_null(&pp);
  1738. if (!strcmp(command, "createStream")) {
  1739. rt->nb_streamid++;
  1740. if (rt->nb_streamid == 0 || rt->nb_streamid == 2)
  1741. rt->nb_streamid++; /* Values 0 and 2 are reserved */
  1742. ff_amf_write_number(&pp, rt->nb_streamid);
  1743. /* By now we don't control which streams are removed in
  1744. * deleteStream. There is no stream creation control
  1745. * if a client creates more than 2^32 - 2 streams. */
  1746. }
  1747. }
  1748. spkt.size = pp - spkt.data;
  1749. ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
  1750. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  1751. ff_rtmp_packet_destroy(&spkt);
  1752. return ret;
  1753. }
  1754. /**
  1755. * Read the AMF_NUMBER response ("_result") to a function call
  1756. * (e.g. createStream()). This response should be made up of the AMF_STRING
  1757. * "result", a NULL object and then the response encoded as AMF_NUMBER. On a
  1758. * successful response, we will return set the value to number (otherwise number
  1759. * will not be changed).
  1760. *
  1761. * @return 0 if reading the value succeeds, negative value otherwise
  1762. */
  1763. static int read_number_result(RTMPPacket *pkt, double *number)
  1764. {
  1765. // We only need to fit "_result" in this.
  1766. uint8_t strbuffer[8];
  1767. int stringlen;
  1768. double numbuffer;
  1769. GetByteContext gbc;
  1770. bytestream2_init(&gbc, pkt->data, pkt->size);
  1771. // Value 1/4: "_result" as AMF_STRING
  1772. if (ff_amf_read_string(&gbc, strbuffer, sizeof(strbuffer), &stringlen))
  1773. return AVERROR_INVALIDDATA;
  1774. if (strcmp(strbuffer, "_result"))
  1775. return AVERROR_INVALIDDATA;
  1776. // Value 2/4: The callee reference number
  1777. if (ff_amf_read_number(&gbc, &numbuffer))
  1778. return AVERROR_INVALIDDATA;
  1779. // Value 3/4: Null
  1780. if (ff_amf_read_null(&gbc))
  1781. return AVERROR_INVALIDDATA;
  1782. // Value 4/4: The response as AMF_NUMBER
  1783. if (ff_amf_read_number(&gbc, &numbuffer))
  1784. return AVERROR_INVALIDDATA;
  1785. else
  1786. *number = numbuffer;
  1787. return 0;
  1788. }
  1789. static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
  1790. {
  1791. RTMPContext *rt = s->priv_data;
  1792. char *tracked_method = NULL;
  1793. int ret = 0;
  1794. if ((ret = find_tracked_method(s, pkt, 10, &tracked_method)) < 0)
  1795. return ret;
  1796. if (!tracked_method) {
  1797. /* Ignore this reply when the current method is not tracked. */
  1798. return ret;
  1799. }
  1800. if (!strcmp(tracked_method, "connect")) {
  1801. if (!rt->is_input) {
  1802. if ((ret = gen_release_stream(s, rt)) < 0)
  1803. goto fail;
  1804. if ((ret = gen_fcpublish_stream(s, rt)) < 0)
  1805. goto fail;
  1806. } else {
  1807. if ((ret = gen_window_ack_size(s, rt)) < 0)
  1808. goto fail;
  1809. }
  1810. if ((ret = gen_create_stream(s, rt)) < 0)
  1811. goto fail;
  1812. if (rt->is_input) {
  1813. /* Send the FCSubscribe command when the name of live
  1814. * stream is defined by the user or if it's a live stream. */
  1815. if (rt->subscribe) {
  1816. if ((ret = gen_fcsubscribe_stream(s, rt, rt->subscribe)) < 0)
  1817. goto fail;
  1818. } else if (rt->live == -1) {
  1819. if ((ret = gen_fcsubscribe_stream(s, rt, rt->playpath)) < 0)
  1820. goto fail;
  1821. }
  1822. }
  1823. } else if (!strcmp(tracked_method, "createStream")) {
  1824. double stream_id;
  1825. if (read_number_result(pkt, &stream_id)) {
  1826. av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
  1827. } else {
  1828. rt->stream_id = stream_id;
  1829. }
  1830. if (!rt->is_input) {
  1831. if ((ret = gen_publish(s, rt)) < 0)
  1832. goto fail;
  1833. } else {
  1834. if (rt->live != -1) {
  1835. if ((ret = gen_get_stream_length(s, rt)) < 0)
  1836. goto fail;
  1837. }
  1838. if ((ret = gen_play(s, rt)) < 0)
  1839. goto fail;
  1840. if ((ret = gen_buffer_time(s, rt)) < 0)
  1841. goto fail;
  1842. }
  1843. } else if (!strcmp(tracked_method, "getStreamLength")) {
  1844. if (read_number_result(pkt, &rt->duration)) {
  1845. av_log(s, AV_LOG_WARNING, "Unexpected reply on getStreamLength()\n");
  1846. }
  1847. }
  1848. fail:
  1849. av_free(tracked_method);
  1850. return ret;
  1851. }
  1852. static int handle_invoke_status(URLContext *s, RTMPPacket *pkt)
  1853. {
  1854. RTMPContext *rt = s->priv_data;
  1855. const uint8_t *data_end = pkt->data + pkt->size;
  1856. const uint8_t *ptr = pkt->data + RTMP_HEADER;
  1857. uint8_t tmpstr[256];
  1858. int i, t;
  1859. for (i = 0; i < 2; i++) {
  1860. t = ff_amf_tag_size(ptr, data_end);
  1861. if (t < 0)
  1862. return 1;
  1863. ptr += t;
  1864. }
  1865. t = ff_amf_get_field_value(ptr, data_end, "level", tmpstr, sizeof(tmpstr));
  1866. if (!t && !strcmp(tmpstr, "error")) {
  1867. t = ff_amf_get_field_value(ptr, data_end,
  1868. "description", tmpstr, sizeof(tmpstr));
  1869. if (t || !tmpstr[0])
  1870. t = ff_amf_get_field_value(ptr, data_end, "code",
  1871. tmpstr, sizeof(tmpstr));
  1872. if (!t)
  1873. av_log(s, AV_LOG_ERROR, "Server error: %s\n", tmpstr);
  1874. return -1;
  1875. }
  1876. t = ff_amf_get_field_value(ptr, data_end, "code", tmpstr, sizeof(tmpstr));
  1877. if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING;
  1878. if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
  1879. if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
  1880. if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
  1881. if (!t && !strcmp(tmpstr, "NetStream.Seek.Notify")) rt->state = STATE_PLAYING;
  1882. return 0;
  1883. }
  1884. static int handle_invoke(URLContext *s, RTMPPacket *pkt)
  1885. {
  1886. RTMPContext *rt = s->priv_data;
  1887. int ret = 0;
  1888. //TODO: check for the messages sent for wrong state?
  1889. if (ff_amf_match_string(pkt->data, pkt->size, "_error")) {
  1890. if ((ret = handle_invoke_error(s, pkt)) < 0)
  1891. return ret;
  1892. } else if (ff_amf_match_string(pkt->data, pkt->size, "_result")) {
  1893. if ((ret = handle_invoke_result(s, pkt)) < 0)
  1894. return ret;
  1895. } else if (ff_amf_match_string(pkt->data, pkt->size, "onStatus")) {
  1896. if ((ret = handle_invoke_status(s, pkt)) < 0)
  1897. return ret;
  1898. } else if (ff_amf_match_string(pkt->data, pkt->size, "onBWDone")) {
  1899. if ((ret = gen_check_bw(s, rt)) < 0)
  1900. return ret;
  1901. } else if (ff_amf_match_string(pkt->data, pkt->size, "releaseStream") ||
  1902. ff_amf_match_string(pkt->data, pkt->size, "FCPublish") ||
  1903. ff_amf_match_string(pkt->data, pkt->size, "publish") ||
  1904. ff_amf_match_string(pkt->data, pkt->size, "play") ||
  1905. ff_amf_match_string(pkt->data, pkt->size, "_checkbw") ||
  1906. ff_amf_match_string(pkt->data, pkt->size, "createStream")) {
  1907. if ((ret = send_invoke_response(s, pkt)) < 0)
  1908. return ret;
  1909. }
  1910. return ret;
  1911. }
  1912. static int update_offset(RTMPContext *rt, int size)
  1913. {
  1914. int old_flv_size;
  1915. // generate packet header and put data into buffer for FLV demuxer
  1916. if (rt->flv_off < rt->flv_size) {
  1917. // There is old unread data in the buffer, thus append at the end
  1918. old_flv_size = rt->flv_size;
  1919. rt->flv_size += size;
  1920. } else {
  1921. // All data has been read, write the new data at the start of the buffer
  1922. old_flv_size = 0;
  1923. rt->flv_size = size;
  1924. rt->flv_off = 0;
  1925. }
  1926. return old_flv_size;
  1927. }
  1928. static int append_flv_data(RTMPContext *rt, RTMPPacket *pkt, int skip)
  1929. {
  1930. int old_flv_size, ret;
  1931. PutByteContext pbc;
  1932. const uint8_t *data = pkt->data + skip;
  1933. const int size = pkt->size - skip;
  1934. uint32_t ts = pkt->timestamp;
  1935. if (pkt->type == RTMP_PT_AUDIO) {
  1936. rt->has_audio = 1;
  1937. } else if (pkt->type == RTMP_PT_VIDEO) {
  1938. rt->has_video = 1;
  1939. }
  1940. old_flv_size = update_offset(rt, size + 15);
  1941. if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
  1942. rt->flv_size = rt->flv_off = 0;
  1943. return ret;
  1944. }
  1945. bytestream2_init_writer(&pbc, rt->flv_data, rt->flv_size);
  1946. bytestream2_skip_p(&pbc, old_flv_size);
  1947. bytestream2_put_byte(&pbc, pkt->type);
  1948. bytestream2_put_be24(&pbc, size);
  1949. bytestream2_put_be24(&pbc, ts);
  1950. bytestream2_put_byte(&pbc, ts >> 24);
  1951. bytestream2_put_be24(&pbc, 0);
  1952. bytestream2_put_buffer(&pbc, data, size);
  1953. bytestream2_put_be32(&pbc, size + RTMP_HEADER);
  1954. return 0;
  1955. }
  1956. static int handle_notify(URLContext *s, RTMPPacket *pkt)
  1957. {
  1958. RTMPContext *rt = s->priv_data;
  1959. uint8_t commandbuffer[64];
  1960. char statusmsg[128];
  1961. int stringlen, ret, skip = 0;
  1962. GetByteContext gbc;
  1963. bytestream2_init(&gbc, pkt->data, pkt->size);
  1964. if (ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer),
  1965. &stringlen))
  1966. return AVERROR_INVALIDDATA;
  1967. if (!strcmp(commandbuffer, "onMetaData")) {
  1968. // metadata properties should be stored in a mixed array
  1969. if (bytestream2_get_byte(&gbc) == AMF_DATA_TYPE_MIXEDARRAY) {
  1970. // We have found a metaData Array so flv can determine the streams
  1971. // from this.
  1972. rt->received_metadata = 1;
  1973. // skip 32-bit max array index
  1974. bytestream2_skip(&gbc, 4);
  1975. while (bytestream2_get_bytes_left(&gbc) > 3) {
  1976. if (ff_amf_get_string(&gbc, statusmsg, sizeof(statusmsg),
  1977. &stringlen))
  1978. return AVERROR_INVALIDDATA;
  1979. // We do not care about the content of the property (yet).
  1980. stringlen = ff_amf_tag_size(gbc.buffer, gbc.buffer_end);
  1981. if (stringlen < 0)
  1982. return AVERROR_INVALIDDATA;
  1983. bytestream2_skip(&gbc, stringlen);
  1984. // The presence of the following properties indicates that the
  1985. // respective streams are present.
  1986. if (!strcmp(statusmsg, "videocodecid")) {
  1987. rt->has_video = 1;
  1988. }
  1989. if (!strcmp(statusmsg, "audiocodecid")) {
  1990. rt->has_audio = 1;
  1991. }
  1992. }
  1993. if (bytestream2_get_be24(&gbc) != AMF_END_OF_OBJECT)
  1994. return AVERROR_INVALIDDATA;
  1995. }
  1996. }
  1997. // Skip the @setDataFrame string and validate it is a notification
  1998. if (!strcmp(commandbuffer, "@setDataFrame")) {
  1999. skip = gbc.buffer - pkt->data;
  2000. ret = ff_amf_read_string(&gbc, statusmsg,
  2001. sizeof(statusmsg), &stringlen);
  2002. if (ret < 0)
  2003. return AVERROR_INVALIDDATA;
  2004. }
  2005. return append_flv_data(rt, pkt, skip);
  2006. }
  2007. /**
  2008. * Parse received packet and possibly perform some action depending on
  2009. * the packet contents.
  2010. * @return 0 for no errors, negative values for serious errors which prevent
  2011. * further communications, positive values for uncritical errors
  2012. */
  2013. static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
  2014. {
  2015. int ret;
  2016. #ifdef DEBUG
  2017. ff_rtmp_packet_dump(s, pkt);
  2018. #endif
  2019. switch (pkt->type) {
  2020. case RTMP_PT_BYTES_READ:
  2021. av_log(s, AV_LOG_TRACE, "received bytes read report\n");
  2022. break;
  2023. case RTMP_PT_CHUNK_SIZE:
  2024. if ((ret = handle_chunk_size(s, pkt)) < 0)
  2025. return ret;
  2026. break;
  2027. case RTMP_PT_USER_CONTROL:
  2028. if ((ret = handle_user_control(s, pkt)) < 0)
  2029. return ret;
  2030. break;
  2031. case RTMP_PT_SET_PEER_BW:
  2032. if ((ret = handle_set_peer_bw(s, pkt)) < 0)
  2033. return ret;
  2034. break;
  2035. case RTMP_PT_WINDOW_ACK_SIZE:
  2036. if ((ret = handle_window_ack_size(s, pkt)) < 0)
  2037. return ret;
  2038. break;
  2039. case RTMP_PT_INVOKE:
  2040. if ((ret = handle_invoke(s, pkt)) < 0)
  2041. return ret;
  2042. break;
  2043. case RTMP_PT_VIDEO:
  2044. case RTMP_PT_AUDIO:
  2045. case RTMP_PT_METADATA:
  2046. case RTMP_PT_NOTIFY:
  2047. /* Audio, Video and Metadata packets are parsed in get_packet() */
  2048. break;
  2049. default:
  2050. av_log(s, AV_LOG_VERBOSE, "Unknown packet type received 0x%02X\n", pkt->type);
  2051. break;
  2052. }
  2053. return 0;
  2054. }
  2055. static int handle_metadata(RTMPContext *rt, RTMPPacket *pkt)
  2056. {
  2057. int ret, old_flv_size, type;
  2058. const uint8_t *next;
  2059. uint8_t *p;
  2060. uint32_t size;
  2061. uint32_t ts, cts, pts = 0;
  2062. old_flv_size = update_offset(rt, pkt->size);
  2063. if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
  2064. rt->flv_size = rt->flv_off = 0;
  2065. return ret;
  2066. }
  2067. next = pkt->data;
  2068. p = rt->flv_data + old_flv_size;
  2069. /* copy data while rewriting timestamps */
  2070. ts = pkt->timestamp;
  2071. while (next - pkt->data < pkt->size - RTMP_HEADER) {
  2072. type = bytestream_get_byte(&next);
  2073. size = bytestream_get_be24(&next);
  2074. cts = bytestream_get_be24(&next);
  2075. cts |= bytestream_get_byte(&next) << 24;
  2076. if (!pts)
  2077. pts = cts;
  2078. ts += cts - pts;
  2079. pts = cts;
  2080. if (size + 3 + 4 > pkt->data + pkt->size - next)
  2081. break;
  2082. bytestream_put_byte(&p, type);
  2083. bytestream_put_be24(&p, size);
  2084. bytestream_put_be24(&p, ts);
  2085. bytestream_put_byte(&p, ts >> 24);
  2086. memcpy(p, next, size + 3 + 4);
  2087. p += size + 3;
  2088. bytestream_put_be32(&p, size + RTMP_HEADER);
  2089. next += size + 3 + 4;
  2090. }
  2091. if (p != rt->flv_data + rt->flv_size) {
  2092. av_log(NULL, AV_LOG_WARNING, "Incomplete flv packets in "
  2093. "RTMP_PT_METADATA packet\n");
  2094. rt->flv_size = p - rt->flv_data;
  2095. }
  2096. return 0;
  2097. }
  2098. /**
  2099. * Interact with the server by receiving and sending RTMP packets until
  2100. * there is some significant data (media data or expected status notification).
  2101. *
  2102. * @param s reading context
  2103. * @param for_header non-zero value tells function to work until it
  2104. * gets notification from the server that playing has been started,
  2105. * otherwise function will work until some media data is received (or
  2106. * an error happens)
  2107. * @return 0 for successful operation, negative value in case of error
  2108. */
  2109. static int get_packet(URLContext *s, int for_header)
  2110. {
  2111. RTMPContext *rt = s->priv_data;
  2112. int ret;
  2113. if (rt->state == STATE_STOPPED)
  2114. return AVERROR_EOF;
  2115. for (;;) {
  2116. RTMPPacket rpkt = { 0 };
  2117. if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
  2118. rt->in_chunk_size, &rt->prev_pkt[0],
  2119. &rt->nb_prev_pkt[0])) <= 0) {
  2120. if (ret == 0) {
  2121. return AVERROR(EAGAIN);
  2122. } else {
  2123. return AVERROR(EIO);
  2124. }
  2125. }
  2126. // Track timestamp for later use
  2127. rt->last_timestamp = rpkt.timestamp;
  2128. rt->bytes_read += ret;
  2129. if (rt->bytes_read - rt->last_bytes_read > rt->receive_report_size) {
  2130. av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
  2131. if ((ret = gen_bytes_read(s, rt, rpkt.timestamp + 1)) < 0)
  2132. return ret;
  2133. rt->last_bytes_read = rt->bytes_read;
  2134. }
  2135. ret = rtmp_parse_result(s, rt, &rpkt);
  2136. // At this point we must check if we are in the seek state and continue
  2137. // with the next packet. handle_invoke will get us out of this state
  2138. // when the right message is encountered
  2139. if (rt->state == STATE_SEEKING) {
  2140. ff_rtmp_packet_destroy(&rpkt);
  2141. // We continue, let the natural flow of things happen:
  2142. // AVERROR(EAGAIN) or handle_invoke gets us out of here
  2143. continue;
  2144. }
  2145. if (ret < 0) {//serious error in current packet
  2146. ff_rtmp_packet_destroy(&rpkt);
  2147. return ret;
  2148. }
  2149. if (rt->do_reconnect && for_header) {
  2150. ff_rtmp_packet_destroy(&rpkt);
  2151. return 0;
  2152. }
  2153. if (rt->state == STATE_STOPPED) {
  2154. ff_rtmp_packet_destroy(&rpkt);
  2155. return AVERROR_EOF;
  2156. }
  2157. if (for_header && (rt->state == STATE_PLAYING ||
  2158. rt->state == STATE_PUBLISHING ||
  2159. rt->state == STATE_SENDING ||
  2160. rt->state == STATE_RECEIVING)) {
  2161. ff_rtmp_packet_destroy(&rpkt);
  2162. return 0;
  2163. }
  2164. if (!rpkt.size || !rt->is_input) {
  2165. ff_rtmp_packet_destroy(&rpkt);
  2166. continue;
  2167. }
  2168. if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO) {
  2169. ret = append_flv_data(rt, &rpkt, 0);
  2170. ff_rtmp_packet_destroy(&rpkt);
  2171. return ret;
  2172. } else if (rpkt.type == RTMP_PT_NOTIFY) {
  2173. ret = handle_notify(s, &rpkt);
  2174. ff_rtmp_packet_destroy(&rpkt);
  2175. return ret;
  2176. } else if (rpkt.type == RTMP_PT_METADATA) {
  2177. ret = handle_metadata(rt, &rpkt);
  2178. ff_rtmp_packet_destroy(&rpkt);
  2179. return ret;
  2180. }
  2181. ff_rtmp_packet_destroy(&rpkt);
  2182. }
  2183. }
  2184. static int rtmp_close(URLContext *h)
  2185. {
  2186. RTMPContext *rt = h->priv_data;
  2187. int ret = 0, i, j;
  2188. if (!rt->is_input) {
  2189. rt->flv_data = NULL;
  2190. if (rt->out_pkt.size)
  2191. ff_rtmp_packet_destroy(&rt->out_pkt);
  2192. if (rt->state > STATE_FCPUBLISH)
  2193. ret = gen_fcunpublish_stream(h, rt);
  2194. }
  2195. if (rt->state > STATE_HANDSHAKED)
  2196. ret = gen_delete_stream(h, rt);
  2197. for (i = 0; i < 2; i++) {
  2198. for (j = 0; j < rt->nb_prev_pkt[i]; j++)
  2199. ff_rtmp_packet_destroy(&rt->prev_pkt[i][j]);
  2200. av_freep(&rt->prev_pkt[i]);
  2201. }
  2202. free_tracked_methods(rt);
  2203. av_freep(&rt->flv_data);
  2204. ffurl_close(rt->stream);
  2205. return ret;
  2206. }
  2207. /**
  2208. * Insert a fake onMetadata packet into the FLV stream to notify the FLV
  2209. * demuxer about the duration of the stream.
  2210. *
  2211. * This should only be done if there was no real onMetadata packet sent by the
  2212. * server at the start of the stream and if we were able to retrieve a valid
  2213. * duration via a getStreamLength call.
  2214. *
  2215. * @return 0 for successful operation, negative value in case of error
  2216. */
  2217. static int inject_fake_duration_metadata(RTMPContext *rt)
  2218. {
  2219. // We need to insert the metadata packet directly after the FLV
  2220. // header, i.e. we need to move all other already read data by the
  2221. // size of our fake metadata packet.
  2222. uint8_t* p;
  2223. // Keep old flv_data pointer
  2224. uint8_t* old_flv_data = rt->flv_data;
  2225. // Allocate a new flv_data pointer with enough space for the additional package
  2226. if (!(rt->flv_data = av_malloc(rt->flv_size + 55))) {
  2227. rt->flv_data = old_flv_data;
  2228. return AVERROR(ENOMEM);
  2229. }
  2230. // Copy FLV header
  2231. memcpy(rt->flv_data, old_flv_data, 13);
  2232. // Copy remaining packets
  2233. memcpy(rt->flv_data + 13 + 55, old_flv_data + 13, rt->flv_size - 13);
  2234. // Increase the size by the injected packet
  2235. rt->flv_size += 55;
  2236. // Delete the old FLV data
  2237. av_freep(&old_flv_data);
  2238. p = rt->flv_data + 13;
  2239. bytestream_put_byte(&p, FLV_TAG_TYPE_META);
  2240. bytestream_put_be24(&p, 40); // size of data part (sum of all parts below)
  2241. bytestream_put_be24(&p, 0); // timestamp
  2242. bytestream_put_be32(&p, 0); // reserved
  2243. // first event name as a string
  2244. bytestream_put_byte(&p, AMF_DATA_TYPE_STRING);
  2245. // "onMetaData" as AMF string
  2246. bytestream_put_be16(&p, 10);
  2247. bytestream_put_buffer(&p, "onMetaData", 10);
  2248. // mixed array (hash) with size and string/type/data tuples
  2249. bytestream_put_byte(&p, AMF_DATA_TYPE_MIXEDARRAY);
  2250. bytestream_put_be32(&p, 1); // metadata_count
  2251. // "duration" as AMF string
  2252. bytestream_put_be16(&p, 8);
  2253. bytestream_put_buffer(&p, "duration", 8);
  2254. bytestream_put_byte(&p, AMF_DATA_TYPE_NUMBER);
  2255. bytestream_put_be64(&p, av_double2int(rt->duration));
  2256. // Finalise object
  2257. bytestream_put_be16(&p, 0); // Empty string
  2258. bytestream_put_byte(&p, AMF_END_OF_OBJECT);
  2259. bytestream_put_be32(&p, 40 + RTMP_HEADER); // size of data part (sum of all parts above)
  2260. return 0;
  2261. }
  2262. /**
  2263. * Open RTMP connection and verify that the stream can be played.
  2264. *
  2265. * URL syntax: rtmp://server[:port][/app][/playpath]
  2266. * where 'app' is first one or two directories in the path
  2267. * (e.g. /ondemand/, /flash/live/, etc.)
  2268. * and 'playpath' is a file name (the rest of the path,
  2269. * may be prefixed with "mp4:")
  2270. */
  2271. static int rtmp_open(URLContext *s, const char *uri, int flags, AVDictionary **opts)
  2272. {
  2273. RTMPContext *rt = s->priv_data;
  2274. char proto[8], hostname[256], path[1024], auth[100], *fname;
  2275. char *old_app, *qmark, *n, fname_buffer[1024];
  2276. uint8_t buf[2048];
  2277. int port;
  2278. int ret;
  2279. if (rt->listen_timeout > 0)
  2280. rt->listen = 1;
  2281. rt->is_input = !(flags & AVIO_FLAG_WRITE);
  2282. av_url_split(proto, sizeof(proto), auth, sizeof(auth),
  2283. hostname, sizeof(hostname), &port,
  2284. path, sizeof(path), s->filename);
  2285. n = strchr(path, ' ');
  2286. if (n) {
  2287. av_log(s, AV_LOG_WARNING,
  2288. "Detected librtmp style URL parameters, these aren't supported "
  2289. "by the libavformat internal RTMP handler currently enabled. "
  2290. "See the documentation for the correct way to pass parameters.\n");
  2291. *n = '\0'; // Trim not supported part
  2292. }
  2293. if (auth[0]) {
  2294. char *ptr = strchr(auth, ':');
  2295. if (ptr) {
  2296. *ptr = '\0';
  2297. av_strlcpy(rt->username, auth, sizeof(rt->username));
  2298. av_strlcpy(rt->password, ptr + 1, sizeof(rt->password));
  2299. }
  2300. }
  2301. if (rt->listen && strcmp(proto, "rtmp")) {
  2302. av_log(s, AV_LOG_ERROR, "rtmp_listen not available for %s\n",
  2303. proto);
  2304. return AVERROR(EINVAL);
  2305. }
  2306. if (!strcmp(proto, "rtmpt") || !strcmp(proto, "rtmpts")) {
  2307. if (!strcmp(proto, "rtmpts"))
  2308. av_dict_set(opts, "ffrtmphttp_tls", "1", 1);
  2309. /* open the http tunneling connection */
  2310. ff_url_join(buf, sizeof(buf), "ffrtmphttp", NULL, hostname, port, NULL);
  2311. } else if (!strcmp(proto, "rtmps")) {
  2312. /* open the tls connection */
  2313. if (port < 0)
  2314. port = RTMPS_DEFAULT_PORT;
  2315. ff_url_join(buf, sizeof(buf), "tls", NULL, hostname, port, NULL);
  2316. } else if (!strcmp(proto, "rtmpe") || (!strcmp(proto, "rtmpte"))) {
  2317. if (!strcmp(proto, "rtmpte"))
  2318. av_dict_set(opts, "ffrtmpcrypt_tunneling", "1", 1);
  2319. /* open the encrypted connection */
  2320. ff_url_join(buf, sizeof(buf), "ffrtmpcrypt", NULL, hostname, port, NULL);
  2321. rt->encrypted = 1;
  2322. } else {
  2323. /* open the tcp connection */
  2324. if (port < 0)
  2325. port = RTMP_DEFAULT_PORT;
  2326. if (rt->listen)
  2327. ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port,
  2328. "?listen&listen_timeout=%d",
  2329. rt->listen_timeout * 1000);
  2330. else
  2331. ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
  2332. }
  2333. reconnect:
  2334. if ((ret = ffurl_open_whitelist(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
  2335. &s->interrupt_callback, opts,
  2336. s->protocol_whitelist, s->protocol_blacklist, s)) < 0) {
  2337. av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf);
  2338. goto fail;
  2339. }
  2340. if (rt->swfverify) {
  2341. if ((ret = rtmp_calc_swfhash(s)) < 0)
  2342. goto fail;
  2343. }
  2344. rt->state = STATE_START;
  2345. if (!rt->listen && (ret = rtmp_handshake(s, rt)) < 0)
  2346. goto fail;
  2347. if (rt->listen && (ret = rtmp_server_handshake(s, rt)) < 0)
  2348. goto fail;
  2349. rt->out_chunk_size = 128;
  2350. rt->in_chunk_size = 128; // Probably overwritten later
  2351. rt->state = STATE_HANDSHAKED;
  2352. // Keep the application name when it has been defined by the user.
  2353. old_app = rt->app;
  2354. rt->app = av_malloc(APP_MAX_LENGTH);
  2355. if (!rt->app) {
  2356. ret = AVERROR(ENOMEM);
  2357. goto fail;
  2358. }
  2359. //extract "app" part from path
  2360. qmark = strchr(path, '?');
  2361. if (qmark && strstr(qmark, "slist=")) {
  2362. char* amp;
  2363. // After slist we have the playpath, the full path is used as app
  2364. av_strlcpy(rt->app, path + 1, APP_MAX_LENGTH);
  2365. fname = strstr(path, "slist=") + 6;
  2366. // Strip any further query parameters from fname
  2367. amp = strchr(fname, '&');
  2368. if (amp) {
  2369. av_strlcpy(fname_buffer, fname, FFMIN(amp - fname + 1,
  2370. sizeof(fname_buffer)));
  2371. fname = fname_buffer;
  2372. }
  2373. } else if (!strncmp(path, "/ondemand/", 10)) {
  2374. fname = path + 10;
  2375. memcpy(rt->app, "ondemand", 9);
  2376. } else {
  2377. char *next = *path ? path + 1 : path;
  2378. char *p = strchr(next, '/');
  2379. if (!p) {
  2380. if (old_app) {
  2381. // If name of application has been defined by the user, assume that
  2382. // playpath is provided in the URL
  2383. fname = next;
  2384. } else {
  2385. fname = NULL;
  2386. av_strlcpy(rt->app, next, APP_MAX_LENGTH);
  2387. }
  2388. } else {
  2389. // make sure we do not mismatch a playpath for an application instance
  2390. char *c = strchr(p + 1, ':');
  2391. fname = strchr(p + 1, '/');
  2392. if (!fname || (c && c < fname)) {
  2393. fname = p + 1;
  2394. av_strlcpy(rt->app, path + 1, FFMIN(p - path, APP_MAX_LENGTH));
  2395. } else {
  2396. fname++;
  2397. av_strlcpy(rt->app, path + 1, FFMIN(fname - path - 1, APP_MAX_LENGTH));
  2398. }
  2399. }
  2400. }
  2401. if (old_app) {
  2402. // The name of application has been defined by the user, override it.
  2403. if (strlen(old_app) >= APP_MAX_LENGTH) {
  2404. ret = AVERROR(EINVAL);
  2405. goto fail;
  2406. }
  2407. av_free(rt->app);
  2408. rt->app = old_app;
  2409. }
  2410. if (!rt->playpath) {
  2411. rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
  2412. if (!rt->playpath) {
  2413. ret = AVERROR(ENOMEM);
  2414. goto fail;
  2415. }
  2416. if (fname) {
  2417. int len = strlen(fname);
  2418. if (!strchr(fname, ':') && len >= 4 &&
  2419. (!strcmp(fname + len - 4, ".f4v") ||
  2420. !strcmp(fname + len - 4, ".mp4"))) {
  2421. memcpy(rt->playpath, "mp4:", 5);
  2422. } else {
  2423. if (len >= 4 && !strcmp(fname + len - 4, ".flv"))
  2424. fname[len - 4] = '\0';
  2425. rt->playpath[0] = 0;
  2426. }
  2427. av_strlcat(rt->playpath, fname, PLAYPATH_MAX_LENGTH);
  2428. } else {
  2429. rt->playpath[0] = '\0';
  2430. }
  2431. }
  2432. if (!rt->tcurl) {
  2433. rt->tcurl = av_malloc(TCURL_MAX_LENGTH);
  2434. if (!rt->tcurl) {
  2435. ret = AVERROR(ENOMEM);
  2436. goto fail;
  2437. }
  2438. ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname,
  2439. port, "/%s", rt->app);
  2440. }
  2441. if (!rt->flashver) {
  2442. rt->flashver = av_malloc(FLASHVER_MAX_LENGTH);
  2443. if (!rt->flashver) {
  2444. ret = AVERROR(ENOMEM);
  2445. goto fail;
  2446. }
  2447. if (rt->is_input) {
  2448. snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d",
  2449. RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2,
  2450. RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
  2451. } else {
  2452. snprintf(rt->flashver, FLASHVER_MAX_LENGTH,
  2453. "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
  2454. }
  2455. }
  2456. rt->receive_report_size = 1048576;
  2457. rt->bytes_read = 0;
  2458. rt->has_audio = 0;
  2459. rt->has_video = 0;
  2460. rt->received_metadata = 0;
  2461. rt->last_bytes_read = 0;
  2462. rt->max_sent_unacked = 2500000;
  2463. rt->duration = 0;
  2464. av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
  2465. proto, path, rt->app, rt->playpath);
  2466. if (!rt->listen) {
  2467. if ((ret = gen_connect(s, rt)) < 0)
  2468. goto fail;
  2469. } else {
  2470. if ((ret = read_connect(s, s->priv_data)) < 0)
  2471. goto fail;
  2472. }
  2473. do {
  2474. ret = get_packet(s, 1);
  2475. } while (ret == AVERROR(EAGAIN));
  2476. if (ret < 0)
  2477. goto fail;
  2478. if (rt->do_reconnect) {
  2479. int i;
  2480. ffurl_close(rt->stream);
  2481. rt->stream = NULL;
  2482. rt->do_reconnect = 0;
  2483. rt->nb_invokes = 0;
  2484. for (i = 0; i < 2; i++)
  2485. memset(rt->prev_pkt[i], 0,
  2486. sizeof(**rt->prev_pkt) * rt->nb_prev_pkt[i]);
  2487. free_tracked_methods(rt);
  2488. goto reconnect;
  2489. }
  2490. if (rt->is_input) {
  2491. // generate FLV header for demuxer
  2492. rt->flv_size = 13;
  2493. if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0)
  2494. goto fail;
  2495. rt->flv_off = 0;
  2496. memcpy(rt->flv_data, "FLV\1\0\0\0\0\011\0\0\0\0", rt->flv_size);
  2497. // Read packets until we reach the first A/V packet or read metadata.
  2498. // If there was a metadata package in front of the A/V packets, we can
  2499. // build the FLV header from this. If we do not receive any metadata,
  2500. // the FLV decoder will allocate the needed streams when their first
  2501. // audio or video packet arrives.
  2502. while (!rt->has_audio && !rt->has_video && !rt->received_metadata) {
  2503. if ((ret = get_packet(s, 0)) < 0)
  2504. goto fail;
  2505. }
  2506. // Either after we have read the metadata or (if there is none) the
  2507. // first packet of an A/V stream, we have a better knowledge about the
  2508. // streams, so set the FLV header accordingly.
  2509. if (rt->has_audio) {
  2510. rt->flv_data[4] |= FLV_HEADER_FLAG_HASAUDIO;
  2511. }
  2512. if (rt->has_video) {
  2513. rt->flv_data[4] |= FLV_HEADER_FLAG_HASVIDEO;
  2514. }
  2515. // If we received the first packet of an A/V stream and no metadata but
  2516. // the server returned a valid duration, create a fake metadata packet
  2517. // to inform the FLV decoder about the duration.
  2518. if (!rt->received_metadata && rt->duration > 0) {
  2519. if ((ret = inject_fake_duration_metadata(rt)) < 0)
  2520. goto fail;
  2521. }
  2522. } else {
  2523. rt->flv_size = 0;
  2524. rt->flv_data = NULL;
  2525. rt->flv_off = 0;
  2526. rt->skip_bytes = 13;
  2527. }
  2528. s->max_packet_size = rt->stream->max_packet_size;
  2529. s->is_streamed = 1;
  2530. return 0;
  2531. fail:
  2532. av_dict_free(opts);
  2533. rtmp_close(s);
  2534. return ret;
  2535. }
  2536. static int rtmp_read(URLContext *s, uint8_t *buf, int size)
  2537. {
  2538. RTMPContext *rt = s->priv_data;
  2539. int orig_size = size;
  2540. int ret;
  2541. while (size > 0) {
  2542. int data_left = rt->flv_size - rt->flv_off;
  2543. if (data_left >= size) {
  2544. memcpy(buf, rt->flv_data + rt->flv_off, size);
  2545. rt->flv_off += size;
  2546. return orig_size;
  2547. }
  2548. if (data_left > 0) {
  2549. memcpy(buf, rt->flv_data + rt->flv_off, data_left);
  2550. buf += data_left;
  2551. size -= data_left;
  2552. rt->flv_off = rt->flv_size;
  2553. return data_left;
  2554. }
  2555. if ((ret = get_packet(s, 0)) < 0)
  2556. return ret;
  2557. }
  2558. return orig_size;
  2559. }
  2560. static int64_t rtmp_seek(URLContext *s, int stream_index, int64_t timestamp,
  2561. int flags)
  2562. {
  2563. RTMPContext *rt = s->priv_data;
  2564. int ret;
  2565. av_log(s, AV_LOG_DEBUG,
  2566. "Seek on stream index %d at timestamp %"PRId64" with flags %08x\n",
  2567. stream_index, timestamp, flags);
  2568. if ((ret = gen_seek(s, rt, timestamp)) < 0) {
  2569. av_log(s, AV_LOG_ERROR,
  2570. "Unable to send seek command on stream index %d at timestamp "
  2571. "%"PRId64" with flags %08x\n",
  2572. stream_index, timestamp, flags);
  2573. return ret;
  2574. }
  2575. rt->flv_off = rt->flv_size;
  2576. rt->state = STATE_SEEKING;
  2577. return timestamp;
  2578. }
  2579. static int rtmp_pause(URLContext *s, int pause)
  2580. {
  2581. RTMPContext *rt = s->priv_data;
  2582. int ret;
  2583. av_log(s, AV_LOG_DEBUG, "Pause at timestamp %d\n",
  2584. rt->last_timestamp);
  2585. if ((ret = gen_pause(s, rt, pause, rt->last_timestamp)) < 0) {
  2586. av_log(s, AV_LOG_ERROR, "Unable to send pause command at timestamp %d\n",
  2587. rt->last_timestamp);
  2588. return ret;
  2589. }
  2590. return 0;
  2591. }
  2592. static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
  2593. {
  2594. RTMPContext *rt = s->priv_data;
  2595. int size_temp = size;
  2596. int pktsize, pkttype, copy;
  2597. uint32_t ts;
  2598. const uint8_t *buf_temp = buf;
  2599. uint8_t c;
  2600. int ret;
  2601. do {
  2602. if (rt->skip_bytes) {
  2603. int skip = FFMIN(rt->skip_bytes, size_temp);
  2604. buf_temp += skip;
  2605. size_temp -= skip;
  2606. rt->skip_bytes -= skip;
  2607. continue;
  2608. }
  2609. if (rt->flv_header_bytes < RTMP_HEADER) {
  2610. const uint8_t *header = rt->flv_header;
  2611. int channel = RTMP_AUDIO_CHANNEL;
  2612. copy = FFMIN(RTMP_HEADER - rt->flv_header_bytes, size_temp);
  2613. bytestream_get_buffer(&buf_temp, rt->flv_header + rt->flv_header_bytes, copy);
  2614. rt->flv_header_bytes += copy;
  2615. size_temp -= copy;
  2616. if (rt->flv_header_bytes < RTMP_HEADER)
  2617. break;
  2618. pkttype = bytestream_get_byte(&header);
  2619. pktsize = bytestream_get_be24(&header);
  2620. ts = bytestream_get_be24(&header);
  2621. ts |= bytestream_get_byte(&header) << 24;
  2622. bytestream_get_be24(&header);
  2623. rt->flv_size = pktsize;
  2624. if (pkttype == RTMP_PT_VIDEO)
  2625. channel = RTMP_VIDEO_CHANNEL;
  2626. if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
  2627. pkttype == RTMP_PT_NOTIFY) {
  2628. if ((ret = ff_rtmp_check_alloc_array(&rt->prev_pkt[1],
  2629. &rt->nb_prev_pkt[1],
  2630. channel)) < 0)
  2631. return ret;
  2632. // Force sending a full 12 bytes header by clearing the
  2633. // channel id, to make it not match a potential earlier
  2634. // packet in the same channel.
  2635. rt->prev_pkt[1][channel].channel_id = 0;
  2636. }
  2637. //this can be a big packet, it's better to send it right here
  2638. if ((ret = ff_rtmp_packet_create(&rt->out_pkt, channel,
  2639. pkttype, ts, pktsize)) < 0)
  2640. return ret;
  2641. rt->out_pkt.extra = rt->stream_id;
  2642. rt->flv_data = rt->out_pkt.data;
  2643. }
  2644. copy = FFMIN(rt->flv_size - rt->flv_off, size_temp);
  2645. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, copy);
  2646. rt->flv_off += copy;
  2647. size_temp -= copy;
  2648. if (rt->flv_off == rt->flv_size) {
  2649. rt->skip_bytes = 4;
  2650. if (rt->out_pkt.type == RTMP_PT_NOTIFY) {
  2651. // For onMetaData and |RtmpSampleAccess packets, we want
  2652. // @setDataFrame prepended to the packet before it gets sent.
  2653. // However, not all RTMP_PT_NOTIFY packets (e.g., onTextData
  2654. // and onCuePoint).
  2655. uint8_t commandbuffer[64];
  2656. int stringlen = 0;
  2657. GetByteContext gbc;
  2658. bytestream2_init(&gbc, rt->flv_data, rt->flv_size);
  2659. if (!ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer),
  2660. &stringlen)) {
  2661. if (!strcmp(commandbuffer, "onMetaData") ||
  2662. !strcmp(commandbuffer, "|RtmpSampleAccess")) {
  2663. uint8_t *ptr;
  2664. if ((ret = av_reallocp(&rt->out_pkt.data, rt->out_pkt.size + 16)) < 0) {
  2665. rt->flv_size = rt->flv_off = rt->flv_header_bytes = 0;
  2666. return ret;
  2667. }
  2668. memmove(rt->out_pkt.data + 16, rt->out_pkt.data, rt->out_pkt.size);
  2669. rt->out_pkt.size += 16;
  2670. ptr = rt->out_pkt.data;
  2671. ff_amf_write_string(&ptr, "@setDataFrame");
  2672. }
  2673. }
  2674. }
  2675. if ((ret = rtmp_send_packet(rt, &rt->out_pkt, 0)) < 0)
  2676. return ret;
  2677. rt->flv_size = 0;
  2678. rt->flv_off = 0;
  2679. rt->flv_header_bytes = 0;
  2680. rt->flv_nb_packets++;
  2681. }
  2682. } while (buf_temp - buf < size);
  2683. if (rt->flv_nb_packets < rt->flush_interval)
  2684. return size;
  2685. rt->flv_nb_packets = 0;
  2686. /* set stream into nonblocking mode */
  2687. rt->stream->flags |= AVIO_FLAG_NONBLOCK;
  2688. /* try to read one byte from the stream */
  2689. ret = ffurl_read(rt->stream, &c, 1);
  2690. /* switch the stream back into blocking mode */
  2691. rt->stream->flags &= ~AVIO_FLAG_NONBLOCK;
  2692. if (ret == AVERROR(EAGAIN)) {
  2693. /* no incoming data to handle */
  2694. return size;
  2695. } else if (ret < 0) {
  2696. return ret;
  2697. } else if (ret == 1) {
  2698. RTMPPacket rpkt = { 0 };
  2699. if ((ret = ff_rtmp_packet_read_internal(rt->stream, &rpkt,
  2700. rt->in_chunk_size,
  2701. &rt->prev_pkt[0],
  2702. &rt->nb_prev_pkt[0], c)) <= 0)
  2703. return ret;
  2704. if ((ret = rtmp_parse_result(s, rt, &rpkt)) < 0)
  2705. return ret;
  2706. ff_rtmp_packet_destroy(&rpkt);
  2707. }
  2708. return size;
  2709. }
  2710. #define OFFSET(x) offsetof(RTMPContext, x)
  2711. #define DEC AV_OPT_FLAG_DECODING_PARAM
  2712. #define ENC AV_OPT_FLAG_ENCODING_PARAM
  2713. static const AVOption rtmp_options[] = {
  2714. {"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2715. {"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {.i64 = 3000}, 0, INT_MAX, DEC|ENC},
  2716. {"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2717. {"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2718. {"rtmp_flush_interval", "Number of packets flushed in the same request (RTMPT only).", OFFSET(flush_interval), AV_OPT_TYPE_INT, {.i64 = 10}, 0, INT_MAX, ENC},
  2719. {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {.i64 = -2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
  2720. {"any", "both", 0, AV_OPT_TYPE_CONST, {.i64 = -2}, 0, 0, DEC, "rtmp_live"},
  2721. {"live", "live stream", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, DEC, "rtmp_live"},
  2722. {"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, DEC, "rtmp_live"},
  2723. {"rtmp_pageurl", "URL of the web page in which the media was embedded. By default no value will be sent.", OFFSET(pageurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
  2724. {"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2725. {"rtmp_subscribe", "Name of live stream to subscribe to. Defaults to rtmp_playpath.", OFFSET(subscribe), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
  2726. {"rtmp_swfhash", "SHA256 hash of the decompressed SWF file (32 bytes).", OFFSET(swfhash), AV_OPT_TYPE_BINARY, .flags = DEC},
  2727. {"rtmp_swfsize", "Size of the decompressed SWF file, required for SWFVerification.", OFFSET(swfsize), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC},
  2728. {"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2729. {"rtmp_swfverify", "URL to player swf file, compute hash/size automatically.", OFFSET(swfverify), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
  2730. {"rtmp_tcurl", "URL of the target stream. Defaults to proto://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2731. {"rtmp_listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
  2732. {"listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
  2733. {"timeout", "Maximum timeout (in seconds) to wait for incoming connections. -1 is infinite. Implies -rtmp_listen 1", OFFSET(listen_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
  2734. { NULL },
  2735. };
  2736. #define RTMP_PROTOCOL(flavor) \
  2737. static const AVClass flavor##_class = { \
  2738. .class_name = #flavor, \
  2739. .item_name = av_default_item_name, \
  2740. .option = rtmp_options, \
  2741. .version = LIBAVUTIL_VERSION_INT, \
  2742. }; \
  2743. \
  2744. const URLProtocol ff_##flavor##_protocol = { \
  2745. .name = #flavor, \
  2746. .url_open2 = rtmp_open, \
  2747. .url_read = rtmp_read, \
  2748. .url_read_seek = rtmp_seek, \
  2749. .url_read_pause = rtmp_pause, \
  2750. .url_write = rtmp_write, \
  2751. .url_close = rtmp_close, \
  2752. .priv_data_size = sizeof(RTMPContext), \
  2753. .flags = URL_PROTOCOL_FLAG_NETWORK, \
  2754. .priv_data_class= &flavor##_class, \
  2755. };
  2756. RTMP_PROTOCOL(rtmp)
  2757. RTMP_PROTOCOL(rtmpe)
  2758. RTMP_PROTOCOL(rtmps)
  2759. RTMP_PROTOCOL(rtmpt)
  2760. RTMP_PROTOCOL(rtmpte)
  2761. RTMP_PROTOCOL(rtmpts)