You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

3165 lines
105KB

  1. /*
  2. * RTMP network protocol
  3. * Copyright (c) 2009 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RTMP protocol
  24. */
  25. #include "libavcodec/bytestream.h"
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/base64.h"
  28. #include "libavutil/hmac.h"
  29. #include "libavutil/intfloat.h"
  30. #include "libavutil/lfg.h"
  31. #include "libavutil/md5.h"
  32. #include "libavutil/opt.h"
  33. #include "libavutil/random_seed.h"
  34. #include "avformat.h"
  35. #include "internal.h"
  36. #include "network.h"
  37. #include "flv.h"
  38. #include "rtmp.h"
  39. #include "rtmpcrypt.h"
  40. #include "rtmppkt.h"
  41. #include "url.h"
  42. #if CONFIG_ZLIB
  43. #include <zlib.h>
  44. #endif
  45. #define APP_MAX_LENGTH 1024
  46. #define PLAYPATH_MAX_LENGTH 512
  47. #define TCURL_MAX_LENGTH 1024
  48. #define FLASHVER_MAX_LENGTH 64
  49. #define RTMP_PKTDATA_DEFAULT_SIZE 4096
  50. #define RTMP_HEADER 11
  51. /** RTMP protocol handler state */
  52. typedef enum {
  53. STATE_START, ///< client has not done anything yet
  54. STATE_HANDSHAKED, ///< client has performed handshake
  55. STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
  56. STATE_PLAYING, ///< client has started receiving multimedia data from server
  57. STATE_SEEKING, ///< client has started the seek operation. Back on STATE_PLAYING when the time comes
  58. STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
  59. STATE_RECEIVING, ///< received a publish command (for input)
  60. STATE_SENDING, ///< received a play command (for output)
  61. STATE_STOPPED, ///< the broadcast has been stopped
  62. } ClientState;
  63. typedef struct TrackedMethod {
  64. char *name;
  65. int id;
  66. } TrackedMethod;
  67. /** protocol handler context */
  68. typedef struct RTMPContext {
  69. const AVClass *class;
  70. URLContext* stream; ///< TCP stream used in interactions with RTMP server
  71. RTMPPacket *prev_pkt[2]; ///< packet history used when reading and sending packets ([0] for reading, [1] for writing)
  72. int nb_prev_pkt[2]; ///< number of elements in prev_pkt
  73. int in_chunk_size; ///< size of the chunks incoming RTMP packets are divided into
  74. int out_chunk_size; ///< size of the chunks outgoing RTMP packets are divided into
  75. int is_input; ///< input/output flag
  76. char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
  77. int live; ///< 0: recorded, -1: live, -2: both
  78. char *app; ///< name of application
  79. char *conn; ///< append arbitrary AMF data to the Connect message
  80. ClientState state; ///< current state
  81. int stream_id; ///< ID assigned by the server for the stream
  82. uint8_t* flv_data; ///< buffer with data for demuxer
  83. int flv_size; ///< current buffer size
  84. int flv_off; ///< number of bytes read from current buffer
  85. int flv_nb_packets; ///< number of flv packets published
  86. RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output)
  87. uint32_t client_report_size; ///< number of bytes after which client should report to server
  88. uint64_t bytes_read; ///< number of bytes read from server
  89. uint64_t last_bytes_read; ///< number of bytes read last reported to server
  90. uint32_t last_timestamp; ///< last timestamp received in a packet
  91. int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call
  92. int has_audio; ///< presence of audio data
  93. int has_video; ///< presence of video data
  94. int received_metadata; ///< Indicates if we have received metadata about the streams
  95. uint8_t flv_header[RTMP_HEADER]; ///< partial incoming flv packet header
  96. int flv_header_bytes; ///< number of initialized bytes in flv_header
  97. int nb_invokes; ///< keeps track of invoke messages
  98. char* tcurl; ///< url of the target stream
  99. char* flashver; ///< version of the flash plugin
  100. char* swfhash; ///< SHA256 hash of the decompressed SWF file (32 bytes)
  101. int swfhash_len; ///< length of the SHA256 hash
  102. int swfsize; ///< size of the decompressed SWF file
  103. char* swfurl; ///< url of the swf player
  104. char* swfverify; ///< URL to player swf file, compute hash/size automatically
  105. char swfverification[42]; ///< hash of the SWF verification
  106. char* pageurl; ///< url of the web page
  107. char* subscribe; ///< name of live stream to subscribe
  108. int server_bw; ///< server bandwidth
  109. int client_buffer_time; ///< client buffer time in ms
  110. int flush_interval; ///< number of packets flushed in the same request (RTMPT only)
  111. int encrypted; ///< use an encrypted connection (RTMPE only)
  112. TrackedMethod*tracked_methods; ///< tracked methods buffer
  113. int nb_tracked_methods; ///< number of tracked methods
  114. int tracked_methods_size; ///< size of the tracked methods buffer
  115. int listen; ///< listen mode flag
  116. int listen_timeout; ///< listen timeout to wait for new connections
  117. int nb_streamid; ///< The next stream id to return on createStream calls
  118. double duration; ///< Duration of the stream in seconds as returned by the server (only valid if non-zero)
  119. char username[50];
  120. char password[50];
  121. char auth_params[500];
  122. int do_reconnect;
  123. int auth_tried;
  124. } RTMPContext;
  125. #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
  126. /** Client key used for digest signing */
  127. static const uint8_t rtmp_player_key[] = {
  128. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  129. 'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1',
  130. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  131. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  132. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  133. };
  134. #define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing
  135. /** Key used for RTMP server digest signing */
  136. static const uint8_t rtmp_server_key[] = {
  137. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  138. 'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ',
  139. 'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1',
  140. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  141. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  142. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  143. };
  144. static int handle_chunk_size(URLContext *s, RTMPPacket *pkt);
  145. static int handle_server_bw(URLContext *s, RTMPPacket *pkt);
  146. static int handle_client_bw(URLContext *s, RTMPPacket *pkt);
  147. static int add_tracked_method(RTMPContext *rt, const char *name, int id)
  148. {
  149. int err;
  150. if (rt->nb_tracked_methods + 1 > rt->tracked_methods_size) {
  151. rt->tracked_methods_size = (rt->nb_tracked_methods + 1) * 2;
  152. if ((err = av_reallocp(&rt->tracked_methods, rt->tracked_methods_size *
  153. sizeof(*rt->tracked_methods))) < 0) {
  154. rt->nb_tracked_methods = 0;
  155. rt->tracked_methods_size = 0;
  156. return err;
  157. }
  158. }
  159. rt->tracked_methods[rt->nb_tracked_methods].name = av_strdup(name);
  160. if (!rt->tracked_methods[rt->nb_tracked_methods].name)
  161. return AVERROR(ENOMEM);
  162. rt->tracked_methods[rt->nb_tracked_methods].id = id;
  163. rt->nb_tracked_methods++;
  164. return 0;
  165. }
  166. static void del_tracked_method(RTMPContext *rt, int index)
  167. {
  168. memmove(&rt->tracked_methods[index], &rt->tracked_methods[index + 1],
  169. sizeof(*rt->tracked_methods) * (rt->nb_tracked_methods - index - 1));
  170. rt->nb_tracked_methods--;
  171. }
  172. static int find_tracked_method(URLContext *s, RTMPPacket *pkt, int offset,
  173. char **tracked_method)
  174. {
  175. RTMPContext *rt = s->priv_data;
  176. GetByteContext gbc;
  177. double pkt_id;
  178. int ret;
  179. int i;
  180. bytestream2_init(&gbc, pkt->data + offset, pkt->size - offset);
  181. if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0)
  182. return ret;
  183. for (i = 0; i < rt->nb_tracked_methods; i++) {
  184. if (rt->tracked_methods[i].id != pkt_id)
  185. continue;
  186. *tracked_method = rt->tracked_methods[i].name;
  187. del_tracked_method(rt, i);
  188. break;
  189. }
  190. return 0;
  191. }
  192. static void free_tracked_methods(RTMPContext *rt)
  193. {
  194. int i;
  195. for (i = 0; i < rt->nb_tracked_methods; i ++)
  196. av_freep(&rt->tracked_methods[i].name);
  197. av_freep(&rt->tracked_methods);
  198. rt->tracked_methods_size = 0;
  199. rt->nb_tracked_methods = 0;
  200. }
  201. static int rtmp_send_packet(RTMPContext *rt, RTMPPacket *pkt, int track)
  202. {
  203. int ret;
  204. if (pkt->type == RTMP_PT_INVOKE && track) {
  205. GetByteContext gbc;
  206. char name[128];
  207. double pkt_id;
  208. int len;
  209. bytestream2_init(&gbc, pkt->data, pkt->size);
  210. if ((ret = ff_amf_read_string(&gbc, name, sizeof(name), &len)) < 0)
  211. goto fail;
  212. if ((ret = ff_amf_read_number(&gbc, &pkt_id)) < 0)
  213. goto fail;
  214. if ((ret = add_tracked_method(rt, name, pkt_id)) < 0)
  215. goto fail;
  216. }
  217. ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
  218. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  219. fail:
  220. ff_rtmp_packet_destroy(pkt);
  221. return ret;
  222. }
  223. static int rtmp_write_amf_data(URLContext *s, char *param, uint8_t **p)
  224. {
  225. char *field, *value;
  226. char type;
  227. /* The type must be B for Boolean, N for number, S for string, O for
  228. * object, or Z for null. For Booleans the data must be either 0 or 1 for
  229. * FALSE or TRUE, respectively. Likewise for Objects the data must be
  230. * 0 or 1 to end or begin an object, respectively. Data items in subobjects
  231. * may be named, by prefixing the type with 'N' and specifying the name
  232. * before the value (ie. NB:myFlag:1). This option may be used multiple times
  233. * to construct arbitrary AMF sequences. */
  234. if (param[0] && param[1] == ':') {
  235. type = param[0];
  236. value = param + 2;
  237. } else if (param[0] == 'N' && param[1] && param[2] == ':') {
  238. type = param[1];
  239. field = param + 3;
  240. value = strchr(field, ':');
  241. if (!value)
  242. goto fail;
  243. *value = '\0';
  244. value++;
  245. ff_amf_write_field_name(p, field);
  246. } else {
  247. goto fail;
  248. }
  249. switch (type) {
  250. case 'B':
  251. ff_amf_write_bool(p, value[0] != '0');
  252. break;
  253. case 'S':
  254. ff_amf_write_string(p, value);
  255. break;
  256. case 'N':
  257. ff_amf_write_number(p, strtod(value, NULL));
  258. break;
  259. case 'Z':
  260. ff_amf_write_null(p);
  261. break;
  262. case 'O':
  263. if (value[0] != '0')
  264. ff_amf_write_object_start(p);
  265. else
  266. ff_amf_write_object_end(p);
  267. break;
  268. default:
  269. goto fail;
  270. break;
  271. }
  272. return 0;
  273. fail:
  274. av_log(s, AV_LOG_ERROR, "Invalid AMF parameter: %s\n", param);
  275. return AVERROR(EINVAL);
  276. }
  277. /**
  278. * Generate 'connect' call and send it to the server.
  279. */
  280. static int gen_connect(URLContext *s, RTMPContext *rt)
  281. {
  282. RTMPPacket pkt;
  283. uint8_t *p;
  284. int ret;
  285. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  286. 0, 4096 + APP_MAX_LENGTH)) < 0)
  287. return ret;
  288. p = pkt.data;
  289. ff_amf_write_string(&p, "connect");
  290. ff_amf_write_number(&p, ++rt->nb_invokes);
  291. ff_amf_write_object_start(&p);
  292. ff_amf_write_field_name(&p, "app");
  293. ff_amf_write_string2(&p, rt->app, rt->auth_params);
  294. if (!rt->is_input) {
  295. ff_amf_write_field_name(&p, "type");
  296. ff_amf_write_string(&p, "nonprivate");
  297. }
  298. ff_amf_write_field_name(&p, "flashVer");
  299. ff_amf_write_string(&p, rt->flashver);
  300. if (rt->swfurl) {
  301. ff_amf_write_field_name(&p, "swfUrl");
  302. ff_amf_write_string(&p, rt->swfurl);
  303. }
  304. ff_amf_write_field_name(&p, "tcUrl");
  305. ff_amf_write_string2(&p, rt->tcurl, rt->auth_params);
  306. if (rt->is_input) {
  307. ff_amf_write_field_name(&p, "fpad");
  308. ff_amf_write_bool(&p, 0);
  309. ff_amf_write_field_name(&p, "capabilities");
  310. ff_amf_write_number(&p, 15.0);
  311. /* Tell the server we support all the audio codecs except
  312. * SUPPORT_SND_INTEL (0x0008) and SUPPORT_SND_UNUSED (0x0010)
  313. * which are unused in the RTMP protocol implementation. */
  314. ff_amf_write_field_name(&p, "audioCodecs");
  315. ff_amf_write_number(&p, 4071.0);
  316. ff_amf_write_field_name(&p, "videoCodecs");
  317. ff_amf_write_number(&p, 252.0);
  318. ff_amf_write_field_name(&p, "videoFunction");
  319. ff_amf_write_number(&p, 1.0);
  320. if (rt->pageurl) {
  321. ff_amf_write_field_name(&p, "pageUrl");
  322. ff_amf_write_string(&p, rt->pageurl);
  323. }
  324. }
  325. ff_amf_write_object_end(&p);
  326. if (rt->conn) {
  327. char *param = rt->conn;
  328. // Write arbitrary AMF data to the Connect message.
  329. while (param) {
  330. char *sep;
  331. param += strspn(param, " ");
  332. if (!*param)
  333. break;
  334. sep = strchr(param, ' ');
  335. if (sep)
  336. *sep = '\0';
  337. if ((ret = rtmp_write_amf_data(s, param, &p)) < 0) {
  338. // Invalid AMF parameter.
  339. ff_rtmp_packet_destroy(&pkt);
  340. return ret;
  341. }
  342. if (sep)
  343. param = sep + 1;
  344. else
  345. break;
  346. }
  347. }
  348. pkt.size = p - pkt.data;
  349. return rtmp_send_packet(rt, &pkt, 1);
  350. }
  351. #define RTMP_CTRL_ABORT_MESSAGE (2)
  352. static int read_connect(URLContext *s, RTMPContext *rt)
  353. {
  354. RTMPPacket pkt = { 0 };
  355. uint8_t *p;
  356. const uint8_t *cp;
  357. int ret;
  358. char command[64];
  359. int stringlen;
  360. double seqnum;
  361. uint8_t tmpstr[256];
  362. GetByteContext gbc;
  363. // handle RTMP Protocol Control Messages
  364. for (;;) {
  365. if ((ret = ff_rtmp_packet_read(rt->stream, &pkt, rt->in_chunk_size,
  366. &rt->prev_pkt[0], &rt->nb_prev_pkt[0])) < 0)
  367. return ret;
  368. #ifdef DEBUG
  369. ff_rtmp_packet_dump(s, &pkt);
  370. #endif
  371. if (pkt.type == RTMP_PT_CHUNK_SIZE) {
  372. if ((ret = handle_chunk_size(s, &pkt)) < 0) {
  373. ff_rtmp_packet_destroy(&pkt);
  374. return ret;
  375. }
  376. } else if (pkt.type == RTMP_CTRL_ABORT_MESSAGE) {
  377. av_log(s, AV_LOG_ERROR, "received abort message\n");
  378. ff_rtmp_packet_destroy(&pkt);
  379. return AVERROR_UNKNOWN;
  380. } else if (pkt.type == RTMP_PT_BYTES_READ) {
  381. av_log(s, AV_LOG_TRACE, "received acknowledgement\n");
  382. } else if (pkt.type == RTMP_PT_SERVER_BW) {
  383. if ((ret = handle_server_bw(s, &pkt)) < 0) {
  384. ff_rtmp_packet_destroy(&pkt);
  385. return ret;
  386. }
  387. } else if (pkt.type == RTMP_PT_CLIENT_BW) {
  388. if ((ret = handle_client_bw(s, &pkt)) < 0) {
  389. ff_rtmp_packet_destroy(&pkt);
  390. return ret;
  391. }
  392. } else if (pkt.type == RTMP_PT_INVOKE) {
  393. // received RTMP Command Message
  394. break;
  395. } else {
  396. av_log(s, AV_LOG_ERROR, "Unknown control message type (%d)\n", pkt.type);
  397. }
  398. ff_rtmp_packet_destroy(&pkt);
  399. }
  400. cp = pkt.data;
  401. bytestream2_init(&gbc, cp, pkt.size);
  402. if (ff_amf_read_string(&gbc, command, sizeof(command), &stringlen)) {
  403. av_log(s, AV_LOG_ERROR, "Unable to read command string\n");
  404. ff_rtmp_packet_destroy(&pkt);
  405. return AVERROR_INVALIDDATA;
  406. }
  407. if (strcmp(command, "connect")) {
  408. av_log(s, AV_LOG_ERROR, "Expecting connect, got %s\n", command);
  409. ff_rtmp_packet_destroy(&pkt);
  410. return AVERROR_INVALIDDATA;
  411. }
  412. ret = ff_amf_read_number(&gbc, &seqnum);
  413. if (ret)
  414. av_log(s, AV_LOG_WARNING, "SeqNum not found\n");
  415. /* Here one could parse an AMF Object with data as flashVers and others. */
  416. ret = ff_amf_get_field_value(gbc.buffer,
  417. gbc.buffer + bytestream2_get_bytes_left(&gbc),
  418. "app", tmpstr, sizeof(tmpstr));
  419. if (ret)
  420. av_log(s, AV_LOG_WARNING, "App field not found in connect\n");
  421. if (!ret && strcmp(tmpstr, rt->app))
  422. av_log(s, AV_LOG_WARNING, "App field don't match up: %s <-> %s\n",
  423. tmpstr, rt->app);
  424. ff_rtmp_packet_destroy(&pkt);
  425. // Send Window Acknowledgement Size (as defined in specification)
  426. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
  427. RTMP_PT_SERVER_BW, 0, 4)) < 0)
  428. return ret;
  429. p = pkt.data;
  430. bytestream_put_be32(&p, rt->server_bw);
  431. pkt.size = p - pkt.data;
  432. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  433. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  434. ff_rtmp_packet_destroy(&pkt);
  435. if (ret < 0)
  436. return ret;
  437. // Send Peer Bandwidth
  438. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
  439. RTMP_PT_CLIENT_BW, 0, 5)) < 0)
  440. return ret;
  441. p = pkt.data;
  442. bytestream_put_be32(&p, rt->server_bw);
  443. bytestream_put_byte(&p, 2); // dynamic
  444. pkt.size = p - pkt.data;
  445. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  446. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  447. ff_rtmp_packet_destroy(&pkt);
  448. if (ret < 0)
  449. return ret;
  450. // Ping request
  451. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
  452. RTMP_PT_PING, 0, 6)) < 0)
  453. return ret;
  454. p = pkt.data;
  455. bytestream_put_be16(&p, 0); // 0 -> Stream Begin
  456. bytestream_put_be32(&p, 0);
  457. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  458. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  459. ff_rtmp_packet_destroy(&pkt);
  460. if (ret < 0)
  461. return ret;
  462. // Chunk size
  463. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL,
  464. RTMP_PT_CHUNK_SIZE, 0, 4)) < 0)
  465. return ret;
  466. p = pkt.data;
  467. bytestream_put_be32(&p, rt->out_chunk_size);
  468. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  469. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  470. ff_rtmp_packet_destroy(&pkt);
  471. if (ret < 0)
  472. return ret;
  473. // Send _result NetConnection.Connect.Success to connect
  474. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
  475. RTMP_PT_INVOKE, 0,
  476. RTMP_PKTDATA_DEFAULT_SIZE)) < 0)
  477. return ret;
  478. p = pkt.data;
  479. ff_amf_write_string(&p, "_result");
  480. ff_amf_write_number(&p, seqnum);
  481. ff_amf_write_object_start(&p);
  482. ff_amf_write_field_name(&p, "fmsVer");
  483. ff_amf_write_string(&p, "FMS/3,0,1,123");
  484. ff_amf_write_field_name(&p, "capabilities");
  485. ff_amf_write_number(&p, 31);
  486. ff_amf_write_object_end(&p);
  487. ff_amf_write_object_start(&p);
  488. ff_amf_write_field_name(&p, "level");
  489. ff_amf_write_string(&p, "status");
  490. ff_amf_write_field_name(&p, "code");
  491. ff_amf_write_string(&p, "NetConnection.Connect.Success");
  492. ff_amf_write_field_name(&p, "description");
  493. ff_amf_write_string(&p, "Connection succeeded.");
  494. ff_amf_write_field_name(&p, "objectEncoding");
  495. ff_amf_write_number(&p, 0);
  496. ff_amf_write_object_end(&p);
  497. pkt.size = p - pkt.data;
  498. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  499. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  500. ff_rtmp_packet_destroy(&pkt);
  501. if (ret < 0)
  502. return ret;
  503. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL,
  504. RTMP_PT_INVOKE, 0, 30)) < 0)
  505. return ret;
  506. p = pkt.data;
  507. ff_amf_write_string(&p, "onBWDone");
  508. ff_amf_write_number(&p, 0);
  509. ff_amf_write_null(&p);
  510. ff_amf_write_number(&p, 8192);
  511. pkt.size = p - pkt.data;
  512. ret = ff_rtmp_packet_write(rt->stream, &pkt, rt->out_chunk_size,
  513. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  514. ff_rtmp_packet_destroy(&pkt);
  515. return ret;
  516. }
  517. /**
  518. * Generate 'releaseStream' call and send it to the server. It should make
  519. * the server release some channel for media streams.
  520. */
  521. static int gen_release_stream(URLContext *s, RTMPContext *rt)
  522. {
  523. RTMPPacket pkt;
  524. uint8_t *p;
  525. int ret;
  526. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  527. 0, 29 + strlen(rt->playpath))) < 0)
  528. return ret;
  529. av_log(s, AV_LOG_DEBUG, "Releasing stream...\n");
  530. p = pkt.data;
  531. ff_amf_write_string(&p, "releaseStream");
  532. ff_amf_write_number(&p, ++rt->nb_invokes);
  533. ff_amf_write_null(&p);
  534. ff_amf_write_string(&p, rt->playpath);
  535. return rtmp_send_packet(rt, &pkt, 1);
  536. }
  537. /**
  538. * Generate 'FCPublish' call and send it to the server. It should make
  539. * the server prepare for receiving media streams.
  540. */
  541. static int gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
  542. {
  543. RTMPPacket pkt;
  544. uint8_t *p;
  545. int ret;
  546. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  547. 0, 25 + strlen(rt->playpath))) < 0)
  548. return ret;
  549. av_log(s, AV_LOG_DEBUG, "FCPublish stream...\n");
  550. p = pkt.data;
  551. ff_amf_write_string(&p, "FCPublish");
  552. ff_amf_write_number(&p, ++rt->nb_invokes);
  553. ff_amf_write_null(&p);
  554. ff_amf_write_string(&p, rt->playpath);
  555. return rtmp_send_packet(rt, &pkt, 1);
  556. }
  557. /**
  558. * Generate 'FCUnpublish' call and send it to the server. It should make
  559. * the server destroy stream.
  560. */
  561. static int gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
  562. {
  563. RTMPPacket pkt;
  564. uint8_t *p;
  565. int ret;
  566. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  567. 0, 27 + strlen(rt->playpath))) < 0)
  568. return ret;
  569. av_log(s, AV_LOG_DEBUG, "UnPublishing stream...\n");
  570. p = pkt.data;
  571. ff_amf_write_string(&p, "FCUnpublish");
  572. ff_amf_write_number(&p, ++rt->nb_invokes);
  573. ff_amf_write_null(&p);
  574. ff_amf_write_string(&p, rt->playpath);
  575. return rtmp_send_packet(rt, &pkt, 0);
  576. }
  577. /**
  578. * Generate 'createStream' call and send it to the server. It should make
  579. * the server allocate some channel for media streams.
  580. */
  581. static int gen_create_stream(URLContext *s, RTMPContext *rt)
  582. {
  583. RTMPPacket pkt;
  584. uint8_t *p;
  585. int ret;
  586. av_log(s, AV_LOG_DEBUG, "Creating stream...\n");
  587. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  588. 0, 25)) < 0)
  589. return ret;
  590. p = pkt.data;
  591. ff_amf_write_string(&p, "createStream");
  592. ff_amf_write_number(&p, ++rt->nb_invokes);
  593. ff_amf_write_null(&p);
  594. return rtmp_send_packet(rt, &pkt, 1);
  595. }
  596. /**
  597. * Generate 'deleteStream' call and send it to the server. It should make
  598. * the server remove some channel for media streams.
  599. */
  600. static int gen_delete_stream(URLContext *s, RTMPContext *rt)
  601. {
  602. RTMPPacket pkt;
  603. uint8_t *p;
  604. int ret;
  605. av_log(s, AV_LOG_DEBUG, "Deleting stream...\n");
  606. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  607. 0, 34)) < 0)
  608. return ret;
  609. p = pkt.data;
  610. ff_amf_write_string(&p, "deleteStream");
  611. ff_amf_write_number(&p, ++rt->nb_invokes);
  612. ff_amf_write_null(&p);
  613. ff_amf_write_number(&p, rt->stream_id);
  614. return rtmp_send_packet(rt, &pkt, 0);
  615. }
  616. /**
  617. * Generate 'getStreamLength' call and send it to the server. If the server
  618. * knows the duration of the selected stream, it will reply with the duration
  619. * in seconds.
  620. */
  621. static int gen_get_stream_length(URLContext *s, RTMPContext *rt)
  622. {
  623. RTMPPacket pkt;
  624. uint8_t *p;
  625. int ret;
  626. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
  627. 0, 31 + strlen(rt->playpath))) < 0)
  628. return ret;
  629. p = pkt.data;
  630. ff_amf_write_string(&p, "getStreamLength");
  631. ff_amf_write_number(&p, ++rt->nb_invokes);
  632. ff_amf_write_null(&p);
  633. ff_amf_write_string(&p, rt->playpath);
  634. return rtmp_send_packet(rt, &pkt, 1);
  635. }
  636. /**
  637. * Generate client buffer time and send it to the server.
  638. */
  639. static int gen_buffer_time(URLContext *s, RTMPContext *rt)
  640. {
  641. RTMPPacket pkt;
  642. uint8_t *p;
  643. int ret;
  644. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
  645. 1, 10)) < 0)
  646. return ret;
  647. p = pkt.data;
  648. bytestream_put_be16(&p, 3);
  649. bytestream_put_be32(&p, rt->stream_id);
  650. bytestream_put_be32(&p, rt->client_buffer_time);
  651. return rtmp_send_packet(rt, &pkt, 0);
  652. }
  653. /**
  654. * Generate 'play' call and send it to the server, then ping the server
  655. * to start actual playing.
  656. */
  657. static int gen_play(URLContext *s, RTMPContext *rt)
  658. {
  659. RTMPPacket pkt;
  660. uint8_t *p;
  661. int ret;
  662. av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
  663. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
  664. 0, 29 + strlen(rt->playpath))) < 0)
  665. return ret;
  666. pkt.extra = rt->stream_id;
  667. p = pkt.data;
  668. ff_amf_write_string(&p, "play");
  669. ff_amf_write_number(&p, ++rt->nb_invokes);
  670. ff_amf_write_null(&p);
  671. ff_amf_write_string(&p, rt->playpath);
  672. ff_amf_write_number(&p, rt->live * 1000);
  673. return rtmp_send_packet(rt, &pkt, 1);
  674. }
  675. static int gen_seek(URLContext *s, RTMPContext *rt, int64_t timestamp)
  676. {
  677. RTMPPacket pkt;
  678. uint8_t *p;
  679. int ret;
  680. av_log(s, AV_LOG_DEBUG, "Sending seek command for timestamp %"PRId64"\n",
  681. timestamp);
  682. if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 26)) < 0)
  683. return ret;
  684. pkt.extra = rt->stream_id;
  685. p = pkt.data;
  686. ff_amf_write_string(&p, "seek");
  687. ff_amf_write_number(&p, 0); //no tracking back responses
  688. ff_amf_write_null(&p); //as usual, the first null param
  689. ff_amf_write_number(&p, timestamp); //where we want to jump
  690. return rtmp_send_packet(rt, &pkt, 1);
  691. }
  692. /**
  693. * Generate a pause packet that either pauses or unpauses the current stream.
  694. */
  695. static int gen_pause(URLContext *s, RTMPContext *rt, int pause, uint32_t timestamp)
  696. {
  697. RTMPPacket pkt;
  698. uint8_t *p;
  699. int ret;
  700. av_log(s, AV_LOG_DEBUG, "Sending pause command for timestamp %d\n",
  701. timestamp);
  702. if ((ret = ff_rtmp_packet_create(&pkt, 3, RTMP_PT_INVOKE, 0, 29)) < 0)
  703. return ret;
  704. pkt.extra = rt->stream_id;
  705. p = pkt.data;
  706. ff_amf_write_string(&p, "pause");
  707. ff_amf_write_number(&p, 0); //no tracking back responses
  708. ff_amf_write_null(&p); //as usual, the first null param
  709. ff_amf_write_bool(&p, pause); // pause or unpause
  710. ff_amf_write_number(&p, timestamp); //where we pause the stream
  711. return rtmp_send_packet(rt, &pkt, 1);
  712. }
  713. /**
  714. * Generate 'publish' call and send it to the server.
  715. */
  716. static int gen_publish(URLContext *s, RTMPContext *rt)
  717. {
  718. RTMPPacket pkt;
  719. uint8_t *p;
  720. int ret;
  721. av_log(s, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
  722. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE,
  723. 0, 30 + strlen(rt->playpath))) < 0)
  724. return ret;
  725. pkt.extra = rt->stream_id;
  726. p = pkt.data;
  727. ff_amf_write_string(&p, "publish");
  728. ff_amf_write_number(&p, ++rt->nb_invokes);
  729. ff_amf_write_null(&p);
  730. ff_amf_write_string(&p, rt->playpath);
  731. ff_amf_write_string(&p, "live");
  732. return rtmp_send_packet(rt, &pkt, 1);
  733. }
  734. /**
  735. * Generate ping reply and send it to the server.
  736. */
  737. static int gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
  738. {
  739. RTMPPacket pkt;
  740. uint8_t *p;
  741. int ret;
  742. if (ppkt->size < 6) {
  743. av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
  744. ppkt->size);
  745. return AVERROR_INVALIDDATA;
  746. }
  747. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
  748. ppkt->timestamp + 1, 6)) < 0)
  749. return ret;
  750. p = pkt.data;
  751. bytestream_put_be16(&p, 7);
  752. bytestream_put_be32(&p, AV_RB32(ppkt->data+2));
  753. return rtmp_send_packet(rt, &pkt, 0);
  754. }
  755. /**
  756. * Generate SWF verification message and send it to the server.
  757. */
  758. static int gen_swf_verification(URLContext *s, RTMPContext *rt)
  759. {
  760. RTMPPacket pkt;
  761. uint8_t *p;
  762. int ret;
  763. av_log(s, AV_LOG_DEBUG, "Sending SWF verification...\n");
  764. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING,
  765. 0, 44)) < 0)
  766. return ret;
  767. p = pkt.data;
  768. bytestream_put_be16(&p, 27);
  769. memcpy(p, rt->swfverification, 42);
  770. return rtmp_send_packet(rt, &pkt, 0);
  771. }
  772. /**
  773. * Generate server bandwidth message and send it to the server.
  774. */
  775. static int gen_server_bw(URLContext *s, RTMPContext *rt)
  776. {
  777. RTMPPacket pkt;
  778. uint8_t *p;
  779. int ret;
  780. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_SERVER_BW,
  781. 0, 4)) < 0)
  782. return ret;
  783. p = pkt.data;
  784. bytestream_put_be32(&p, rt->server_bw);
  785. return rtmp_send_packet(rt, &pkt, 0);
  786. }
  787. /**
  788. * Generate check bandwidth message and send it to the server.
  789. */
  790. static int gen_check_bw(URLContext *s, RTMPContext *rt)
  791. {
  792. RTMPPacket pkt;
  793. uint8_t *p;
  794. int ret;
  795. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  796. 0, 21)) < 0)
  797. return ret;
  798. p = pkt.data;
  799. ff_amf_write_string(&p, "_checkbw");
  800. ff_amf_write_number(&p, ++rt->nb_invokes);
  801. ff_amf_write_null(&p);
  802. return rtmp_send_packet(rt, &pkt, 1);
  803. }
  804. /**
  805. * Generate report on bytes read so far and send it to the server.
  806. */
  807. static int gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
  808. {
  809. RTMPPacket pkt;
  810. uint8_t *p;
  811. int ret;
  812. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ,
  813. ts, 4)) < 0)
  814. return ret;
  815. p = pkt.data;
  816. bytestream_put_be32(&p, rt->bytes_read);
  817. return rtmp_send_packet(rt, &pkt, 0);
  818. }
  819. static int gen_fcsubscribe_stream(URLContext *s, RTMPContext *rt,
  820. const char *subscribe)
  821. {
  822. RTMPPacket pkt;
  823. uint8_t *p;
  824. int ret;
  825. if ((ret = ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE,
  826. 0, 27 + strlen(subscribe))) < 0)
  827. return ret;
  828. p = pkt.data;
  829. ff_amf_write_string(&p, "FCSubscribe");
  830. ff_amf_write_number(&p, ++rt->nb_invokes);
  831. ff_amf_write_null(&p);
  832. ff_amf_write_string(&p, subscribe);
  833. return rtmp_send_packet(rt, &pkt, 1);
  834. }
  835. int ff_rtmp_calc_digest(const uint8_t *src, int len, int gap,
  836. const uint8_t *key, int keylen, uint8_t *dst)
  837. {
  838. AVHMAC *hmac;
  839. hmac = av_hmac_alloc(AV_HMAC_SHA256);
  840. if (!hmac)
  841. return AVERROR(ENOMEM);
  842. av_hmac_init(hmac, key, keylen);
  843. if (gap <= 0) {
  844. av_hmac_update(hmac, src, len);
  845. } else { //skip 32 bytes used for storing digest
  846. av_hmac_update(hmac, src, gap);
  847. av_hmac_update(hmac, src + gap + 32, len - gap - 32);
  848. }
  849. av_hmac_final(hmac, dst, 32);
  850. av_hmac_free(hmac);
  851. return 0;
  852. }
  853. int ff_rtmp_calc_digest_pos(const uint8_t *buf, int off, int mod_val,
  854. int add_val)
  855. {
  856. int i, digest_pos = 0;
  857. for (i = 0; i < 4; i++)
  858. digest_pos += buf[i + off];
  859. digest_pos = digest_pos % mod_val + add_val;
  860. return digest_pos;
  861. }
  862. /**
  863. * Put HMAC-SHA2 digest of packet data (except for the bytes where this digest
  864. * will be stored) into that packet.
  865. *
  866. * @param buf handshake data (1536 bytes)
  867. * @param encrypted use an encrypted connection (RTMPE)
  868. * @return offset to the digest inside input data
  869. */
  870. static int rtmp_handshake_imprint_with_digest(uint8_t *buf, int encrypted)
  871. {
  872. int ret, digest_pos;
  873. if (encrypted)
  874. digest_pos = ff_rtmp_calc_digest_pos(buf, 772, 728, 776);
  875. else
  876. digest_pos = ff_rtmp_calc_digest_pos(buf, 8, 728, 12);
  877. ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  878. rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
  879. buf + digest_pos);
  880. if (ret < 0)
  881. return ret;
  882. return digest_pos;
  883. }
  884. /**
  885. * Verify that the received server response has the expected digest value.
  886. *
  887. * @param buf handshake data received from the server (1536 bytes)
  888. * @param off position to search digest offset from
  889. * @return 0 if digest is valid, digest position otherwise
  890. */
  891. static int rtmp_validate_digest(uint8_t *buf, int off)
  892. {
  893. uint8_t digest[32];
  894. int ret, digest_pos;
  895. digest_pos = ff_rtmp_calc_digest_pos(buf, off, 728, off + 4);
  896. ret = ff_rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  897. rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
  898. digest);
  899. if (ret < 0)
  900. return ret;
  901. if (!memcmp(digest, buf + digest_pos, 32))
  902. return digest_pos;
  903. return 0;
  904. }
  905. static int rtmp_calc_swf_verification(URLContext *s, RTMPContext *rt,
  906. uint8_t *buf)
  907. {
  908. uint8_t *p;
  909. int ret;
  910. if (rt->swfhash_len != 32) {
  911. av_log(s, AV_LOG_ERROR,
  912. "Hash of the decompressed SWF file is not 32 bytes long.\n");
  913. return AVERROR(EINVAL);
  914. }
  915. p = &rt->swfverification[0];
  916. bytestream_put_byte(&p, 1);
  917. bytestream_put_byte(&p, 1);
  918. bytestream_put_be32(&p, rt->swfsize);
  919. bytestream_put_be32(&p, rt->swfsize);
  920. if ((ret = ff_rtmp_calc_digest(rt->swfhash, 32, 0, buf, 32, p)) < 0)
  921. return ret;
  922. return 0;
  923. }
  924. #if CONFIG_ZLIB
  925. static int rtmp_uncompress_swfplayer(uint8_t *in_data, int64_t in_size,
  926. uint8_t **out_data, int64_t *out_size)
  927. {
  928. z_stream zs = { 0 };
  929. void *ptr;
  930. int size;
  931. int ret = 0;
  932. zs.avail_in = in_size;
  933. zs.next_in = in_data;
  934. ret = inflateInit(&zs);
  935. if (ret != Z_OK)
  936. return AVERROR_UNKNOWN;
  937. do {
  938. uint8_t tmp_buf[16384];
  939. zs.avail_out = sizeof(tmp_buf);
  940. zs.next_out = tmp_buf;
  941. ret = inflate(&zs, Z_NO_FLUSH);
  942. if (ret != Z_OK && ret != Z_STREAM_END) {
  943. ret = AVERROR_UNKNOWN;
  944. goto fail;
  945. }
  946. size = sizeof(tmp_buf) - zs.avail_out;
  947. if (!(ptr = av_realloc(*out_data, *out_size + size))) {
  948. ret = AVERROR(ENOMEM);
  949. goto fail;
  950. }
  951. *out_data = ptr;
  952. memcpy(*out_data + *out_size, tmp_buf, size);
  953. *out_size += size;
  954. } while (zs.avail_out == 0);
  955. fail:
  956. inflateEnd(&zs);
  957. return ret;
  958. }
  959. #endif
  960. static int rtmp_calc_swfhash(URLContext *s)
  961. {
  962. RTMPContext *rt = s->priv_data;
  963. uint8_t *in_data = NULL, *out_data = NULL, *swfdata;
  964. int64_t in_size, out_size;
  965. URLContext *stream;
  966. char swfhash[32];
  967. int swfsize;
  968. int ret = 0;
  969. /* Get the SWF player file. */
  970. if ((ret = ffurl_open_whitelist(&stream, rt->swfverify, AVIO_FLAG_READ,
  971. &s->interrupt_callback, NULL,
  972. s->protocol_whitelist, s->protocol_blacklist, s)) < 0) {
  973. av_log(s, AV_LOG_ERROR, "Cannot open connection %s.\n", rt->swfverify);
  974. goto fail;
  975. }
  976. if ((in_size = ffurl_seek(stream, 0, AVSEEK_SIZE)) < 0) {
  977. ret = AVERROR(EIO);
  978. goto fail;
  979. }
  980. if (!(in_data = av_malloc(in_size))) {
  981. ret = AVERROR(ENOMEM);
  982. goto fail;
  983. }
  984. if ((ret = ffurl_read_complete(stream, in_data, in_size)) < 0)
  985. goto fail;
  986. if (in_size < 3) {
  987. ret = AVERROR_INVALIDDATA;
  988. goto fail;
  989. }
  990. if (!memcmp(in_data, "CWS", 3)) {
  991. /* Decompress the SWF player file using Zlib. */
  992. if (!(out_data = av_malloc(8))) {
  993. ret = AVERROR(ENOMEM);
  994. goto fail;
  995. }
  996. *in_data = 'F'; // magic stuff
  997. memcpy(out_data, in_data, 8);
  998. out_size = 8;
  999. #if CONFIG_ZLIB
  1000. if ((ret = rtmp_uncompress_swfplayer(in_data + 8, in_size - 8,
  1001. &out_data, &out_size)) < 0)
  1002. goto fail;
  1003. #else
  1004. av_log(s, AV_LOG_ERROR,
  1005. "Zlib is required for decompressing the SWF player file.\n");
  1006. ret = AVERROR(EINVAL);
  1007. goto fail;
  1008. #endif
  1009. swfsize = out_size;
  1010. swfdata = out_data;
  1011. } else {
  1012. swfsize = in_size;
  1013. swfdata = in_data;
  1014. }
  1015. /* Compute the SHA256 hash of the SWF player file. */
  1016. if ((ret = ff_rtmp_calc_digest(swfdata, swfsize, 0,
  1017. "Genuine Adobe Flash Player 001", 30,
  1018. swfhash)) < 0)
  1019. goto fail;
  1020. /* Set SWFVerification parameters. */
  1021. av_opt_set_bin(rt, "rtmp_swfhash", swfhash, 32, 0);
  1022. rt->swfsize = swfsize;
  1023. fail:
  1024. av_freep(&in_data);
  1025. av_freep(&out_data);
  1026. ffurl_close(stream);
  1027. return ret;
  1028. }
  1029. /**
  1030. * Perform handshake with the server by means of exchanging pseudorandom data
  1031. * signed with HMAC-SHA2 digest.
  1032. *
  1033. * @return 0 if handshake succeeds, negative value otherwise
  1034. */
  1035. static int rtmp_handshake(URLContext *s, RTMPContext *rt)
  1036. {
  1037. AVLFG rnd;
  1038. uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = {
  1039. 3, // unencrypted data
  1040. 0, 0, 0, 0, // client uptime
  1041. RTMP_CLIENT_VER1,
  1042. RTMP_CLIENT_VER2,
  1043. RTMP_CLIENT_VER3,
  1044. RTMP_CLIENT_VER4,
  1045. };
  1046. uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE];
  1047. uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1];
  1048. int i;
  1049. int server_pos, client_pos;
  1050. uint8_t digest[32], signature[32];
  1051. int ret, type = 0;
  1052. av_log(s, AV_LOG_DEBUG, "Handshaking...\n");
  1053. av_lfg_init(&rnd, 0xDEADC0DE);
  1054. // generate handshake packet - 1536 bytes of pseudorandom data
  1055. for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
  1056. tosend[i] = av_lfg_get(&rnd) >> 24;
  1057. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1058. /* When the client wants to use RTMPE, we have to change the command
  1059. * byte to 0x06 which means to use encrypted data and we have to set
  1060. * the flash version to at least 9.0.115.0. */
  1061. tosend[0] = 6;
  1062. tosend[5] = 128;
  1063. tosend[6] = 0;
  1064. tosend[7] = 3;
  1065. tosend[8] = 2;
  1066. /* Initialize the Diffie-Hellmann context and generate the public key
  1067. * to send to the server. */
  1068. if ((ret = ff_rtmpe_gen_pub_key(rt->stream, tosend + 1)) < 0)
  1069. return ret;
  1070. }
  1071. client_pos = rtmp_handshake_imprint_with_digest(tosend + 1, rt->encrypted);
  1072. if (client_pos < 0)
  1073. return client_pos;
  1074. if ((ret = ffurl_write(rt->stream, tosend,
  1075. RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
  1076. av_log(s, AV_LOG_ERROR, "Cannot write RTMP handshake request\n");
  1077. return ret;
  1078. }
  1079. if ((ret = ffurl_read_complete(rt->stream, serverdata,
  1080. RTMP_HANDSHAKE_PACKET_SIZE + 1)) < 0) {
  1081. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  1082. return ret;
  1083. }
  1084. if ((ret = ffurl_read_complete(rt->stream, clientdata,
  1085. RTMP_HANDSHAKE_PACKET_SIZE)) < 0) {
  1086. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  1087. return ret;
  1088. }
  1089. av_log(s, AV_LOG_DEBUG, "Type answer %d\n", serverdata[0]);
  1090. av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
  1091. serverdata[5], serverdata[6], serverdata[7], serverdata[8]);
  1092. if (rt->is_input && serverdata[5] >= 3) {
  1093. server_pos = rtmp_validate_digest(serverdata + 1, 772);
  1094. if (server_pos < 0)
  1095. return server_pos;
  1096. if (!server_pos) {
  1097. type = 1;
  1098. server_pos = rtmp_validate_digest(serverdata + 1, 8);
  1099. if (server_pos < 0)
  1100. return server_pos;
  1101. if (!server_pos) {
  1102. av_log(s, AV_LOG_ERROR, "Server response validating failed\n");
  1103. return AVERROR(EIO);
  1104. }
  1105. }
  1106. /* Generate SWFVerification token (SHA256 HMAC hash of decompressed SWF,
  1107. * key are the last 32 bytes of the server handshake. */
  1108. if (rt->swfsize) {
  1109. if ((ret = rtmp_calc_swf_verification(s, rt, serverdata + 1 +
  1110. RTMP_HANDSHAKE_PACKET_SIZE - 32)) < 0)
  1111. return ret;
  1112. }
  1113. ret = ff_rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
  1114. rtmp_server_key, sizeof(rtmp_server_key),
  1115. digest);
  1116. if (ret < 0)
  1117. return ret;
  1118. ret = ff_rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE - 32,
  1119. 0, digest, 32, signature);
  1120. if (ret < 0)
  1121. return ret;
  1122. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1123. /* Compute the shared secret key sent by the server and initialize
  1124. * the RC4 encryption. */
  1125. if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
  1126. tosend + 1, type)) < 0)
  1127. return ret;
  1128. /* Encrypt the signature received by the server. */
  1129. ff_rtmpe_encrypt_sig(rt->stream, signature, digest, serverdata[0]);
  1130. }
  1131. if (memcmp(signature, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
  1132. av_log(s, AV_LOG_ERROR, "Signature mismatch\n");
  1133. return AVERROR(EIO);
  1134. }
  1135. for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
  1136. tosend[i] = av_lfg_get(&rnd) >> 24;
  1137. ret = ff_rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
  1138. rtmp_player_key, sizeof(rtmp_player_key),
  1139. digest);
  1140. if (ret < 0)
  1141. return ret;
  1142. ret = ff_rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
  1143. digest, 32,
  1144. tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
  1145. if (ret < 0)
  1146. return ret;
  1147. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1148. /* Encrypt the signature to be send to the server. */
  1149. ff_rtmpe_encrypt_sig(rt->stream, tosend +
  1150. RTMP_HANDSHAKE_PACKET_SIZE - 32, digest,
  1151. serverdata[0]);
  1152. }
  1153. // write reply back to the server
  1154. if ((ret = ffurl_write(rt->stream, tosend,
  1155. RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
  1156. return ret;
  1157. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1158. /* Set RC4 keys for encryption and update the keystreams. */
  1159. if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
  1160. return ret;
  1161. }
  1162. } else {
  1163. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1164. /* Compute the shared secret key sent by the server and initialize
  1165. * the RC4 encryption. */
  1166. if ((ret = ff_rtmpe_compute_secret_key(rt->stream, serverdata + 1,
  1167. tosend + 1, 1)) < 0)
  1168. return ret;
  1169. if (serverdata[0] == 9) {
  1170. /* Encrypt the signature received by the server. */
  1171. ff_rtmpe_encrypt_sig(rt->stream, signature, digest,
  1172. serverdata[0]);
  1173. }
  1174. }
  1175. if ((ret = ffurl_write(rt->stream, serverdata + 1,
  1176. RTMP_HANDSHAKE_PACKET_SIZE)) < 0)
  1177. return ret;
  1178. if (CONFIG_FFRTMPCRYPT_PROTOCOL && rt->encrypted) {
  1179. /* Set RC4 keys for encryption and update the keystreams. */
  1180. if ((ret = ff_rtmpe_update_keystream(rt->stream)) < 0)
  1181. return ret;
  1182. }
  1183. }
  1184. return 0;
  1185. }
  1186. static int rtmp_receive_hs_packet(RTMPContext* rt, uint32_t *first_int,
  1187. uint32_t *second_int, char *arraydata,
  1188. int size)
  1189. {
  1190. int inoutsize;
  1191. inoutsize = ffurl_read_complete(rt->stream, arraydata,
  1192. RTMP_HANDSHAKE_PACKET_SIZE);
  1193. if (inoutsize <= 0)
  1194. return AVERROR(EIO);
  1195. if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) {
  1196. av_log(rt, AV_LOG_ERROR, "Erroneous Message size %d"
  1197. " not following standard\n", (int)inoutsize);
  1198. return AVERROR(EINVAL);
  1199. }
  1200. *first_int = AV_RB32(arraydata);
  1201. *second_int = AV_RB32(arraydata + 4);
  1202. return 0;
  1203. }
  1204. static int rtmp_send_hs_packet(RTMPContext* rt, uint32_t first_int,
  1205. uint32_t second_int, char *arraydata, int size)
  1206. {
  1207. int inoutsize;
  1208. AV_WB32(arraydata, first_int);
  1209. AV_WB32(arraydata + 4, second_int);
  1210. inoutsize = ffurl_write(rt->stream, arraydata,
  1211. RTMP_HANDSHAKE_PACKET_SIZE);
  1212. if (inoutsize != RTMP_HANDSHAKE_PACKET_SIZE) {
  1213. av_log(rt, AV_LOG_ERROR, "Unable to write answer\n");
  1214. return AVERROR(EIO);
  1215. }
  1216. return 0;
  1217. }
  1218. /**
  1219. * rtmp handshake server side
  1220. */
  1221. static int rtmp_server_handshake(URLContext *s, RTMPContext *rt)
  1222. {
  1223. uint8_t buffer[RTMP_HANDSHAKE_PACKET_SIZE];
  1224. uint32_t hs_epoch;
  1225. uint32_t hs_my_epoch;
  1226. uint8_t hs_c1[RTMP_HANDSHAKE_PACKET_SIZE];
  1227. uint8_t hs_s1[RTMP_HANDSHAKE_PACKET_SIZE];
  1228. uint32_t zeroes;
  1229. uint32_t temp = 0;
  1230. int randomidx = 0;
  1231. int inoutsize = 0;
  1232. int ret;
  1233. inoutsize = ffurl_read_complete(rt->stream, buffer, 1); // Receive C0
  1234. if (inoutsize <= 0) {
  1235. av_log(s, AV_LOG_ERROR, "Unable to read handshake\n");
  1236. return AVERROR(EIO);
  1237. }
  1238. // Check Version
  1239. if (buffer[0] != 3) {
  1240. av_log(s, AV_LOG_ERROR, "RTMP protocol version mismatch\n");
  1241. return AVERROR(EIO);
  1242. }
  1243. if (ffurl_write(rt->stream, buffer, 1) <= 0) { // Send S0
  1244. av_log(s, AV_LOG_ERROR,
  1245. "Unable to write answer - RTMP S0\n");
  1246. return AVERROR(EIO);
  1247. }
  1248. /* Receive C1 */
  1249. ret = rtmp_receive_hs_packet(rt, &hs_epoch, &zeroes, hs_c1,
  1250. RTMP_HANDSHAKE_PACKET_SIZE);
  1251. if (ret) {
  1252. av_log(s, AV_LOG_ERROR, "RTMP Handshake C1 Error\n");
  1253. return ret;
  1254. }
  1255. /* Send S1 */
  1256. /* By now same epoch will be sent */
  1257. hs_my_epoch = hs_epoch;
  1258. /* Generate random */
  1259. for (randomidx = 8; randomidx < (RTMP_HANDSHAKE_PACKET_SIZE);
  1260. randomidx += 4)
  1261. AV_WB32(hs_s1 + randomidx, av_get_random_seed());
  1262. ret = rtmp_send_hs_packet(rt, hs_my_epoch, 0, hs_s1,
  1263. RTMP_HANDSHAKE_PACKET_SIZE);
  1264. if (ret) {
  1265. av_log(s, AV_LOG_ERROR, "RTMP Handshake S1 Error\n");
  1266. return ret;
  1267. }
  1268. /* Send S2 */
  1269. ret = rtmp_send_hs_packet(rt, hs_epoch, 0, hs_c1,
  1270. RTMP_HANDSHAKE_PACKET_SIZE);
  1271. if (ret) {
  1272. av_log(s, AV_LOG_ERROR, "RTMP Handshake S2 Error\n");
  1273. return ret;
  1274. }
  1275. /* Receive C2 */
  1276. ret = rtmp_receive_hs_packet(rt, &temp, &zeroes, buffer,
  1277. RTMP_HANDSHAKE_PACKET_SIZE);
  1278. if (ret) {
  1279. av_log(s, AV_LOG_ERROR, "RTMP Handshake C2 Error\n");
  1280. return ret;
  1281. }
  1282. if (temp != hs_my_epoch)
  1283. av_log(s, AV_LOG_WARNING,
  1284. "Erroneous C2 Message epoch does not match up with C1 epoch\n");
  1285. if (memcmp(buffer + 8, hs_s1 + 8,
  1286. RTMP_HANDSHAKE_PACKET_SIZE - 8))
  1287. av_log(s, AV_LOG_WARNING,
  1288. "Erroneous C2 Message random does not match up\n");
  1289. return 0;
  1290. }
  1291. static int handle_chunk_size(URLContext *s, RTMPPacket *pkt)
  1292. {
  1293. RTMPContext *rt = s->priv_data;
  1294. int ret;
  1295. if (pkt->size < 4) {
  1296. av_log(s, AV_LOG_ERROR,
  1297. "Too short chunk size change packet (%d)\n",
  1298. pkt->size);
  1299. return AVERROR_INVALIDDATA;
  1300. }
  1301. if (!rt->is_input) {
  1302. /* Send the same chunk size change packet back to the server,
  1303. * setting the outgoing chunk size to the same as the incoming one. */
  1304. if ((ret = ff_rtmp_packet_write(rt->stream, pkt, rt->out_chunk_size,
  1305. &rt->prev_pkt[1], &rt->nb_prev_pkt[1])) < 0)
  1306. return ret;
  1307. rt->out_chunk_size = AV_RB32(pkt->data);
  1308. }
  1309. rt->in_chunk_size = AV_RB32(pkt->data);
  1310. if (rt->in_chunk_size <= 0) {
  1311. av_log(s, AV_LOG_ERROR, "Incorrect chunk size %d\n",
  1312. rt->in_chunk_size);
  1313. return AVERROR_INVALIDDATA;
  1314. }
  1315. av_log(s, AV_LOG_DEBUG, "New incoming chunk size = %d\n",
  1316. rt->in_chunk_size);
  1317. return 0;
  1318. }
  1319. static int handle_ping(URLContext *s, RTMPPacket *pkt)
  1320. {
  1321. RTMPContext *rt = s->priv_data;
  1322. int t, ret;
  1323. if (pkt->size < 2) {
  1324. av_log(s, AV_LOG_ERROR, "Too short ping packet (%d)\n",
  1325. pkt->size);
  1326. return AVERROR_INVALIDDATA;
  1327. }
  1328. t = AV_RB16(pkt->data);
  1329. if (t == 6) {
  1330. if ((ret = gen_pong(s, rt, pkt)) < 0)
  1331. return ret;
  1332. } else if (t == 26) {
  1333. if (rt->swfsize) {
  1334. if ((ret = gen_swf_verification(s, rt)) < 0)
  1335. return ret;
  1336. } else {
  1337. av_log(s, AV_LOG_WARNING, "Ignoring SWFVerification request.\n");
  1338. }
  1339. }
  1340. return 0;
  1341. }
  1342. static int handle_client_bw(URLContext *s, RTMPPacket *pkt)
  1343. {
  1344. RTMPContext *rt = s->priv_data;
  1345. if (pkt->size < 4) {
  1346. av_log(s, AV_LOG_ERROR,
  1347. "Client bandwidth report packet is less than 4 bytes long (%d)\n",
  1348. pkt->size);
  1349. return AVERROR_INVALIDDATA;
  1350. }
  1351. rt->client_report_size = AV_RB32(pkt->data);
  1352. if (rt->client_report_size <= 0) {
  1353. av_log(s, AV_LOG_ERROR, "Incorrect client bandwidth %d\n",
  1354. rt->client_report_size);
  1355. return AVERROR_INVALIDDATA;
  1356. }
  1357. av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", rt->client_report_size);
  1358. rt->client_report_size >>= 1;
  1359. return 0;
  1360. }
  1361. static int handle_server_bw(URLContext *s, RTMPPacket *pkt)
  1362. {
  1363. RTMPContext *rt = s->priv_data;
  1364. if (pkt->size < 4) {
  1365. av_log(s, AV_LOG_ERROR,
  1366. "Too short server bandwidth report packet (%d)\n",
  1367. pkt->size);
  1368. return AVERROR_INVALIDDATA;
  1369. }
  1370. rt->server_bw = AV_RB32(pkt->data);
  1371. if (rt->server_bw <= 0) {
  1372. av_log(s, AV_LOG_ERROR, "Incorrect server bandwidth %d\n",
  1373. rt->server_bw);
  1374. return AVERROR_INVALIDDATA;
  1375. }
  1376. av_log(s, AV_LOG_DEBUG, "Server bandwidth = %d\n", rt->server_bw);
  1377. return 0;
  1378. }
  1379. static int do_adobe_auth(RTMPContext *rt, const char *user, const char *salt,
  1380. const char *opaque, const char *challenge)
  1381. {
  1382. uint8_t hash[16];
  1383. char hashstr[AV_BASE64_SIZE(sizeof(hash))], challenge2[10];
  1384. struct AVMD5 *md5 = av_md5_alloc();
  1385. if (!md5)
  1386. return AVERROR(ENOMEM);
  1387. snprintf(challenge2, sizeof(challenge2), "%08x", av_get_random_seed());
  1388. av_md5_init(md5);
  1389. av_md5_update(md5, user, strlen(user));
  1390. av_md5_update(md5, salt, strlen(salt));
  1391. av_md5_update(md5, rt->password, strlen(rt->password));
  1392. av_md5_final(md5, hash);
  1393. av_base64_encode(hashstr, sizeof(hashstr), hash,
  1394. sizeof(hash));
  1395. av_md5_init(md5);
  1396. av_md5_update(md5, hashstr, strlen(hashstr));
  1397. if (opaque)
  1398. av_md5_update(md5, opaque, strlen(opaque));
  1399. else if (challenge)
  1400. av_md5_update(md5, challenge, strlen(challenge));
  1401. av_md5_update(md5, challenge2, strlen(challenge2));
  1402. av_md5_final(md5, hash);
  1403. av_base64_encode(hashstr, sizeof(hashstr), hash,
  1404. sizeof(hash));
  1405. snprintf(rt->auth_params, sizeof(rt->auth_params),
  1406. "?authmod=%s&user=%s&challenge=%s&response=%s",
  1407. "adobe", user, challenge2, hashstr);
  1408. if (opaque)
  1409. av_strlcatf(rt->auth_params, sizeof(rt->auth_params),
  1410. "&opaque=%s", opaque);
  1411. av_free(md5);
  1412. return 0;
  1413. }
  1414. static int do_llnw_auth(RTMPContext *rt, const char *user, const char *nonce)
  1415. {
  1416. uint8_t hash[16];
  1417. char hashstr1[33], hashstr2[33];
  1418. const char *realm = "live";
  1419. const char *method = "publish";
  1420. const char *qop = "auth";
  1421. const char *nc = "00000001";
  1422. char cnonce[10];
  1423. struct AVMD5 *md5 = av_md5_alloc();
  1424. if (!md5)
  1425. return AVERROR(ENOMEM);
  1426. snprintf(cnonce, sizeof(cnonce), "%08x", av_get_random_seed());
  1427. av_md5_init(md5);
  1428. av_md5_update(md5, user, strlen(user));
  1429. av_md5_update(md5, ":", 1);
  1430. av_md5_update(md5, realm, strlen(realm));
  1431. av_md5_update(md5, ":", 1);
  1432. av_md5_update(md5, rt->password, strlen(rt->password));
  1433. av_md5_final(md5, hash);
  1434. ff_data_to_hex(hashstr1, hash, 16, 1);
  1435. hashstr1[32] = '\0';
  1436. av_md5_init(md5);
  1437. av_md5_update(md5, method, strlen(method));
  1438. av_md5_update(md5, ":/", 2);
  1439. av_md5_update(md5, rt->app, strlen(rt->app));
  1440. if (!strchr(rt->app, '/'))
  1441. av_md5_update(md5, "/_definst_", strlen("/_definst_"));
  1442. av_md5_final(md5, hash);
  1443. ff_data_to_hex(hashstr2, hash, 16, 1);
  1444. hashstr2[32] = '\0';
  1445. av_md5_init(md5);
  1446. av_md5_update(md5, hashstr1, strlen(hashstr1));
  1447. av_md5_update(md5, ":", 1);
  1448. if (nonce)
  1449. av_md5_update(md5, nonce, strlen(nonce));
  1450. av_md5_update(md5, ":", 1);
  1451. av_md5_update(md5, nc, strlen(nc));
  1452. av_md5_update(md5, ":", 1);
  1453. av_md5_update(md5, cnonce, strlen(cnonce));
  1454. av_md5_update(md5, ":", 1);
  1455. av_md5_update(md5, qop, strlen(qop));
  1456. av_md5_update(md5, ":", 1);
  1457. av_md5_update(md5, hashstr2, strlen(hashstr2));
  1458. av_md5_final(md5, hash);
  1459. ff_data_to_hex(hashstr1, hash, 16, 1);
  1460. snprintf(rt->auth_params, sizeof(rt->auth_params),
  1461. "?authmod=%s&user=%s&nonce=%s&cnonce=%s&nc=%s&response=%s",
  1462. "llnw", user, nonce, cnonce, nc, hashstr1);
  1463. av_free(md5);
  1464. return 0;
  1465. }
  1466. static int handle_connect_error(URLContext *s, const char *desc)
  1467. {
  1468. RTMPContext *rt = s->priv_data;
  1469. char buf[300], *ptr, authmod[15];
  1470. int i = 0, ret = 0;
  1471. const char *user = "", *salt = "", *opaque = NULL,
  1472. *challenge = NULL, *cptr = NULL, *nonce = NULL;
  1473. if (!(cptr = strstr(desc, "authmod=adobe")) &&
  1474. !(cptr = strstr(desc, "authmod=llnw"))) {
  1475. av_log(s, AV_LOG_ERROR,
  1476. "Unknown connect error (unsupported authentication method?)\n");
  1477. return AVERROR_UNKNOWN;
  1478. }
  1479. cptr += strlen("authmod=");
  1480. while (*cptr && *cptr != ' ' && i < sizeof(authmod) - 1)
  1481. authmod[i++] = *cptr++;
  1482. authmod[i] = '\0';
  1483. if (!rt->username[0] || !rt->password[0]) {
  1484. av_log(s, AV_LOG_ERROR, "No credentials set\n");
  1485. return AVERROR_UNKNOWN;
  1486. }
  1487. if (strstr(desc, "?reason=authfailed")) {
  1488. av_log(s, AV_LOG_ERROR, "Incorrect username/password\n");
  1489. return AVERROR_UNKNOWN;
  1490. } else if (strstr(desc, "?reason=nosuchuser")) {
  1491. av_log(s, AV_LOG_ERROR, "Incorrect username\n");
  1492. return AVERROR_UNKNOWN;
  1493. }
  1494. if (rt->auth_tried) {
  1495. av_log(s, AV_LOG_ERROR, "Authentication failed\n");
  1496. return AVERROR_UNKNOWN;
  1497. }
  1498. rt->auth_params[0] = '\0';
  1499. if (strstr(desc, "code=403 need auth")) {
  1500. snprintf(rt->auth_params, sizeof(rt->auth_params),
  1501. "?authmod=%s&user=%s", authmod, rt->username);
  1502. return 0;
  1503. }
  1504. if (!(cptr = strstr(desc, "?reason=needauth"))) {
  1505. av_log(s, AV_LOG_ERROR, "No auth parameters found\n");
  1506. return AVERROR_UNKNOWN;
  1507. }
  1508. av_strlcpy(buf, cptr + 1, sizeof(buf));
  1509. ptr = buf;
  1510. while (ptr) {
  1511. char *next = strchr(ptr, '&');
  1512. char *value = strchr(ptr, '=');
  1513. if (next)
  1514. *next++ = '\0';
  1515. if (value) {
  1516. *value++ = '\0';
  1517. if (!strcmp(ptr, "user")) {
  1518. user = value;
  1519. } else if (!strcmp(ptr, "salt")) {
  1520. salt = value;
  1521. } else if (!strcmp(ptr, "opaque")) {
  1522. opaque = value;
  1523. } else if (!strcmp(ptr, "challenge")) {
  1524. challenge = value;
  1525. } else if (!strcmp(ptr, "nonce")) {
  1526. nonce = value;
  1527. } else {
  1528. av_log(s, AV_LOG_INFO, "Ignoring unsupported var %s\n", ptr);
  1529. }
  1530. } else {
  1531. av_log(s, AV_LOG_WARNING, "Variable %s has NULL value\n", ptr);
  1532. }
  1533. ptr = next;
  1534. }
  1535. if (!strcmp(authmod, "adobe")) {
  1536. if ((ret = do_adobe_auth(rt, user, salt, opaque, challenge)) < 0)
  1537. return ret;
  1538. } else {
  1539. if ((ret = do_llnw_auth(rt, user, nonce)) < 0)
  1540. return ret;
  1541. }
  1542. rt->auth_tried = 1;
  1543. return 0;
  1544. }
  1545. static int handle_invoke_error(URLContext *s, RTMPPacket *pkt)
  1546. {
  1547. RTMPContext *rt = s->priv_data;
  1548. const uint8_t *data_end = pkt->data + pkt->size;
  1549. char *tracked_method = NULL;
  1550. int level = AV_LOG_ERROR;
  1551. uint8_t tmpstr[256];
  1552. int ret;
  1553. if ((ret = find_tracked_method(s, pkt, 9, &tracked_method)) < 0)
  1554. return ret;
  1555. if (!ff_amf_get_field_value(pkt->data + 9, data_end,
  1556. "description", tmpstr, sizeof(tmpstr))) {
  1557. if (tracked_method && (!strcmp(tracked_method, "_checkbw") ||
  1558. !strcmp(tracked_method, "releaseStream") ||
  1559. !strcmp(tracked_method, "FCSubscribe") ||
  1560. !strcmp(tracked_method, "FCPublish"))) {
  1561. /* Gracefully ignore Adobe-specific historical artifact errors. */
  1562. level = AV_LOG_WARNING;
  1563. ret = 0;
  1564. } else if (tracked_method && !strcmp(tracked_method, "getStreamLength")) {
  1565. level = rt->live ? AV_LOG_DEBUG : AV_LOG_WARNING;
  1566. ret = 0;
  1567. } else if (tracked_method && !strcmp(tracked_method, "connect")) {
  1568. ret = handle_connect_error(s, tmpstr);
  1569. if (!ret) {
  1570. rt->do_reconnect = 1;
  1571. level = AV_LOG_VERBOSE;
  1572. }
  1573. } else
  1574. ret = AVERROR_UNKNOWN;
  1575. av_log(s, level, "Server error: %s\n", tmpstr);
  1576. }
  1577. av_free(tracked_method);
  1578. return ret;
  1579. }
  1580. static int write_begin(URLContext *s)
  1581. {
  1582. RTMPContext *rt = s->priv_data;
  1583. PutByteContext pbc;
  1584. RTMPPacket spkt = { 0 };
  1585. int ret;
  1586. // Send Stream Begin 1
  1587. if ((ret = ff_rtmp_packet_create(&spkt, RTMP_NETWORK_CHANNEL,
  1588. RTMP_PT_PING, 0, 6)) < 0) {
  1589. av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
  1590. return ret;
  1591. }
  1592. bytestream2_init_writer(&pbc, spkt.data, spkt.size);
  1593. bytestream2_put_be16(&pbc, 0); // 0 -> Stream Begin
  1594. bytestream2_put_be32(&pbc, rt->nb_streamid);
  1595. ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
  1596. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  1597. ff_rtmp_packet_destroy(&spkt);
  1598. return ret;
  1599. }
  1600. static int write_status(URLContext *s, RTMPPacket *pkt,
  1601. const char *status, const char *filename)
  1602. {
  1603. RTMPContext *rt = s->priv_data;
  1604. RTMPPacket spkt = { 0 };
  1605. char statusmsg[128];
  1606. uint8_t *pp;
  1607. int ret;
  1608. if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
  1609. RTMP_PT_INVOKE, 0,
  1610. RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
  1611. av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
  1612. return ret;
  1613. }
  1614. pp = spkt.data;
  1615. spkt.extra = pkt->extra;
  1616. ff_amf_write_string(&pp, "onStatus");
  1617. ff_amf_write_number(&pp, 0);
  1618. ff_amf_write_null(&pp);
  1619. ff_amf_write_object_start(&pp);
  1620. ff_amf_write_field_name(&pp, "level");
  1621. ff_amf_write_string(&pp, "status");
  1622. ff_amf_write_field_name(&pp, "code");
  1623. ff_amf_write_string(&pp, status);
  1624. ff_amf_write_field_name(&pp, "description");
  1625. snprintf(statusmsg, sizeof(statusmsg),
  1626. "%s is now published", filename);
  1627. ff_amf_write_string(&pp, statusmsg);
  1628. ff_amf_write_field_name(&pp, "details");
  1629. ff_amf_write_string(&pp, filename);
  1630. ff_amf_write_object_end(&pp);
  1631. spkt.size = pp - spkt.data;
  1632. ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
  1633. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  1634. ff_rtmp_packet_destroy(&spkt);
  1635. return ret;
  1636. }
  1637. static int send_invoke_response(URLContext *s, RTMPPacket *pkt)
  1638. {
  1639. RTMPContext *rt = s->priv_data;
  1640. double seqnum;
  1641. char filename[128];
  1642. char command[64];
  1643. int stringlen;
  1644. char *pchar;
  1645. const uint8_t *p = pkt->data;
  1646. uint8_t *pp = NULL;
  1647. RTMPPacket spkt = { 0 };
  1648. GetByteContext gbc;
  1649. int ret;
  1650. bytestream2_init(&gbc, p, pkt->size);
  1651. if (ff_amf_read_string(&gbc, command, sizeof(command),
  1652. &stringlen)) {
  1653. av_log(s, AV_LOG_ERROR, "Error in PT_INVOKE\n");
  1654. return AVERROR_INVALIDDATA;
  1655. }
  1656. ret = ff_amf_read_number(&gbc, &seqnum);
  1657. if (ret)
  1658. return ret;
  1659. ret = ff_amf_read_null(&gbc);
  1660. if (ret)
  1661. return ret;
  1662. if (!strcmp(command, "FCPublish") ||
  1663. !strcmp(command, "publish")) {
  1664. ret = ff_amf_read_string(&gbc, filename,
  1665. sizeof(filename), &stringlen);
  1666. if (ret) {
  1667. if (ret == AVERROR(EINVAL))
  1668. av_log(s, AV_LOG_ERROR, "Unable to parse stream name - name too long?\n");
  1669. else
  1670. av_log(s, AV_LOG_ERROR, "Unable to parse stream name\n");
  1671. return ret;
  1672. }
  1673. // check with url
  1674. if (s->filename) {
  1675. pchar = strrchr(s->filename, '/');
  1676. if (!pchar) {
  1677. av_log(s, AV_LOG_WARNING,
  1678. "Unable to find / in url %s, bad format\n",
  1679. s->filename);
  1680. pchar = s->filename;
  1681. }
  1682. pchar++;
  1683. if (strcmp(pchar, filename))
  1684. av_log(s, AV_LOG_WARNING, "Unexpected stream %s, expecting"
  1685. " %s\n", filename, pchar);
  1686. }
  1687. rt->state = STATE_RECEIVING;
  1688. }
  1689. if (!strcmp(command, "FCPublish")) {
  1690. if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
  1691. RTMP_PT_INVOKE, 0,
  1692. RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
  1693. av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
  1694. return ret;
  1695. }
  1696. pp = spkt.data;
  1697. ff_amf_write_string(&pp, "onFCPublish");
  1698. } else if (!strcmp(command, "publish")) {
  1699. ret = write_begin(s);
  1700. if (ret < 0)
  1701. return ret;
  1702. // Send onStatus(NetStream.Publish.Start)
  1703. return write_status(s, pkt, "NetStream.Publish.Start",
  1704. filename);
  1705. } else if (!strcmp(command, "play")) {
  1706. ret = write_begin(s);
  1707. if (ret < 0)
  1708. return ret;
  1709. rt->state = STATE_SENDING;
  1710. return write_status(s, pkt, "NetStream.Play.Start",
  1711. filename);
  1712. } else {
  1713. if ((ret = ff_rtmp_packet_create(&spkt, RTMP_SYSTEM_CHANNEL,
  1714. RTMP_PT_INVOKE, 0,
  1715. RTMP_PKTDATA_DEFAULT_SIZE)) < 0) {
  1716. av_log(s, AV_LOG_ERROR, "Unable to create response packet\n");
  1717. return ret;
  1718. }
  1719. pp = spkt.data;
  1720. ff_amf_write_string(&pp, "_result");
  1721. ff_amf_write_number(&pp, seqnum);
  1722. ff_amf_write_null(&pp);
  1723. if (!strcmp(command, "createStream")) {
  1724. rt->nb_streamid++;
  1725. if (rt->nb_streamid == 0 || rt->nb_streamid == 2)
  1726. rt->nb_streamid++; /* Values 0 and 2 are reserved */
  1727. ff_amf_write_number(&pp, rt->nb_streamid);
  1728. /* By now we don't control which streams are removed in
  1729. * deleteStream. There is no stream creation control
  1730. * if a client creates more than 2^32 - 2 streams. */
  1731. }
  1732. }
  1733. spkt.size = pp - spkt.data;
  1734. ret = ff_rtmp_packet_write(rt->stream, &spkt, rt->out_chunk_size,
  1735. &rt->prev_pkt[1], &rt->nb_prev_pkt[1]);
  1736. ff_rtmp_packet_destroy(&spkt);
  1737. return ret;
  1738. }
  1739. /**
  1740. * Read the AMF_NUMBER response ("_result") to a function call
  1741. * (e.g. createStream()). This response should be made up of the AMF_STRING
  1742. * "result", a NULL object and then the response encoded as AMF_NUMBER. On a
  1743. * successful response, we will return set the value to number (otherwise number
  1744. * will not be changed).
  1745. *
  1746. * @return 0 if reading the value succeeds, negative value otherwise
  1747. */
  1748. static int read_number_result(RTMPPacket *pkt, double *number)
  1749. {
  1750. // We only need to fit "_result" in this.
  1751. uint8_t strbuffer[8];
  1752. int stringlen;
  1753. double numbuffer;
  1754. GetByteContext gbc;
  1755. bytestream2_init(&gbc, pkt->data, pkt->size);
  1756. // Value 1/4: "_result" as AMF_STRING
  1757. if (ff_amf_read_string(&gbc, strbuffer, sizeof(strbuffer), &stringlen))
  1758. return AVERROR_INVALIDDATA;
  1759. if (strcmp(strbuffer, "_result"))
  1760. return AVERROR_INVALIDDATA;
  1761. // Value 2/4: The callee reference number
  1762. if (ff_amf_read_number(&gbc, &numbuffer))
  1763. return AVERROR_INVALIDDATA;
  1764. // Value 3/4: Null
  1765. if (ff_amf_read_null(&gbc))
  1766. return AVERROR_INVALIDDATA;
  1767. // Value 4/4: The response as AMF_NUMBER
  1768. if (ff_amf_read_number(&gbc, &numbuffer))
  1769. return AVERROR_INVALIDDATA;
  1770. else
  1771. *number = numbuffer;
  1772. return 0;
  1773. }
  1774. static int handle_invoke_result(URLContext *s, RTMPPacket *pkt)
  1775. {
  1776. RTMPContext *rt = s->priv_data;
  1777. char *tracked_method = NULL;
  1778. int ret = 0;
  1779. if ((ret = find_tracked_method(s, pkt, 10, &tracked_method)) < 0)
  1780. return ret;
  1781. if (!tracked_method) {
  1782. /* Ignore this reply when the current method is not tracked. */
  1783. return ret;
  1784. }
  1785. if (!strcmp(tracked_method, "connect")) {
  1786. if (!rt->is_input) {
  1787. if ((ret = gen_release_stream(s, rt)) < 0)
  1788. goto fail;
  1789. if ((ret = gen_fcpublish_stream(s, rt)) < 0)
  1790. goto fail;
  1791. } else {
  1792. if ((ret = gen_server_bw(s, rt)) < 0)
  1793. goto fail;
  1794. }
  1795. if ((ret = gen_create_stream(s, rt)) < 0)
  1796. goto fail;
  1797. if (rt->is_input) {
  1798. /* Send the FCSubscribe command when the name of live
  1799. * stream is defined by the user or if it's a live stream. */
  1800. if (rt->subscribe) {
  1801. if ((ret = gen_fcsubscribe_stream(s, rt, rt->subscribe)) < 0)
  1802. goto fail;
  1803. } else if (rt->live == -1) {
  1804. if ((ret = gen_fcsubscribe_stream(s, rt, rt->playpath)) < 0)
  1805. goto fail;
  1806. }
  1807. }
  1808. } else if (!strcmp(tracked_method, "createStream")) {
  1809. double stream_id;
  1810. if (read_number_result(pkt, &stream_id)) {
  1811. av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
  1812. } else {
  1813. rt->stream_id = stream_id;
  1814. }
  1815. if (!rt->is_input) {
  1816. if ((ret = gen_publish(s, rt)) < 0)
  1817. goto fail;
  1818. } else {
  1819. if (rt->live != -1) {
  1820. if ((ret = gen_get_stream_length(s, rt)) < 0)
  1821. goto fail;
  1822. }
  1823. if ((ret = gen_play(s, rt)) < 0)
  1824. goto fail;
  1825. if ((ret = gen_buffer_time(s, rt)) < 0)
  1826. goto fail;
  1827. }
  1828. } else if (!strcmp(tracked_method, "getStreamLength")) {
  1829. if (read_number_result(pkt, &rt->duration)) {
  1830. av_log(s, AV_LOG_WARNING, "Unexpected reply on getStreamLength()\n");
  1831. }
  1832. }
  1833. fail:
  1834. av_free(tracked_method);
  1835. return ret;
  1836. }
  1837. static int handle_invoke_status(URLContext *s, RTMPPacket *pkt)
  1838. {
  1839. RTMPContext *rt = s->priv_data;
  1840. const uint8_t *data_end = pkt->data + pkt->size;
  1841. const uint8_t *ptr = pkt->data + RTMP_HEADER;
  1842. uint8_t tmpstr[256];
  1843. int i, t;
  1844. for (i = 0; i < 2; i++) {
  1845. t = ff_amf_tag_size(ptr, data_end);
  1846. if (t < 0)
  1847. return 1;
  1848. ptr += t;
  1849. }
  1850. t = ff_amf_get_field_value(ptr, data_end, "level", tmpstr, sizeof(tmpstr));
  1851. if (!t && !strcmp(tmpstr, "error")) {
  1852. t = ff_amf_get_field_value(ptr, data_end,
  1853. "description", tmpstr, sizeof(tmpstr));
  1854. if (t || !tmpstr[0])
  1855. t = ff_amf_get_field_value(ptr, data_end, "code",
  1856. tmpstr, sizeof(tmpstr));
  1857. if (!t)
  1858. av_log(s, AV_LOG_ERROR, "Server error: %s\n", tmpstr);
  1859. return -1;
  1860. }
  1861. t = ff_amf_get_field_value(ptr, data_end, "code", tmpstr, sizeof(tmpstr));
  1862. if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING;
  1863. if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
  1864. if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
  1865. if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
  1866. if (!t && !strcmp(tmpstr, "NetStream.Seek.Notify")) rt->state = STATE_PLAYING;
  1867. return 0;
  1868. }
  1869. static int handle_invoke(URLContext *s, RTMPPacket *pkt)
  1870. {
  1871. RTMPContext *rt = s->priv_data;
  1872. int ret = 0;
  1873. //TODO: check for the messages sent for wrong state?
  1874. if (ff_amf_match_string(pkt->data, pkt->size, "_error")) {
  1875. if ((ret = handle_invoke_error(s, pkt)) < 0)
  1876. return ret;
  1877. } else if (ff_amf_match_string(pkt->data, pkt->size, "_result")) {
  1878. if ((ret = handle_invoke_result(s, pkt)) < 0)
  1879. return ret;
  1880. } else if (ff_amf_match_string(pkt->data, pkt->size, "onStatus")) {
  1881. if ((ret = handle_invoke_status(s, pkt)) < 0)
  1882. return ret;
  1883. } else if (ff_amf_match_string(pkt->data, pkt->size, "onBWDone")) {
  1884. if ((ret = gen_check_bw(s, rt)) < 0)
  1885. return ret;
  1886. } else if (ff_amf_match_string(pkt->data, pkt->size, "releaseStream") ||
  1887. ff_amf_match_string(pkt->data, pkt->size, "FCPublish") ||
  1888. ff_amf_match_string(pkt->data, pkt->size, "publish") ||
  1889. ff_amf_match_string(pkt->data, pkt->size, "play") ||
  1890. ff_amf_match_string(pkt->data, pkt->size, "_checkbw") ||
  1891. ff_amf_match_string(pkt->data, pkt->size, "createStream")) {
  1892. if ((ret = send_invoke_response(s, pkt)) < 0)
  1893. return ret;
  1894. }
  1895. return ret;
  1896. }
  1897. static int update_offset(RTMPContext *rt, int size)
  1898. {
  1899. int old_flv_size;
  1900. // generate packet header and put data into buffer for FLV demuxer
  1901. if (rt->flv_off < rt->flv_size) {
  1902. // There is old unread data in the buffer, thus append at the end
  1903. old_flv_size = rt->flv_size;
  1904. rt->flv_size += size;
  1905. } else {
  1906. // All data has been read, write the new data at the start of the buffer
  1907. old_flv_size = 0;
  1908. rt->flv_size = size;
  1909. rt->flv_off = 0;
  1910. }
  1911. return old_flv_size;
  1912. }
  1913. static int append_flv_data(RTMPContext *rt, RTMPPacket *pkt, int skip)
  1914. {
  1915. int old_flv_size, ret;
  1916. PutByteContext pbc;
  1917. const uint8_t *data = pkt->data + skip;
  1918. const int size = pkt->size - skip;
  1919. uint32_t ts = pkt->timestamp;
  1920. if (pkt->type == RTMP_PT_AUDIO) {
  1921. rt->has_audio = 1;
  1922. } else if (pkt->type == RTMP_PT_VIDEO) {
  1923. rt->has_video = 1;
  1924. }
  1925. old_flv_size = update_offset(rt, size + 15);
  1926. if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
  1927. rt->flv_size = rt->flv_off = 0;
  1928. return ret;
  1929. }
  1930. bytestream2_init_writer(&pbc, rt->flv_data, rt->flv_size);
  1931. bytestream2_skip_p(&pbc, old_flv_size);
  1932. bytestream2_put_byte(&pbc, pkt->type);
  1933. bytestream2_put_be24(&pbc, size);
  1934. bytestream2_put_be24(&pbc, ts);
  1935. bytestream2_put_byte(&pbc, ts >> 24);
  1936. bytestream2_put_be24(&pbc, 0);
  1937. bytestream2_put_buffer(&pbc, data, size);
  1938. bytestream2_put_be32(&pbc, size + RTMP_HEADER);
  1939. return 0;
  1940. }
  1941. static int handle_notify(URLContext *s, RTMPPacket *pkt)
  1942. {
  1943. RTMPContext *rt = s->priv_data;
  1944. uint8_t commandbuffer[64];
  1945. char statusmsg[128];
  1946. int stringlen, ret, skip = 0;
  1947. GetByteContext gbc;
  1948. bytestream2_init(&gbc, pkt->data, pkt->size);
  1949. if (ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer),
  1950. &stringlen))
  1951. return AVERROR_INVALIDDATA;
  1952. if (!strcmp(commandbuffer, "onMetaData")) {
  1953. // metadata properties should be stored in a mixed array
  1954. if (bytestream2_get_byte(&gbc) == AMF_DATA_TYPE_MIXEDARRAY) {
  1955. // We have found a metaData Array so flv can determine the streams
  1956. // from this.
  1957. rt->received_metadata = 1;
  1958. // skip 32-bit max array index
  1959. bytestream2_skip(&gbc, 4);
  1960. while (bytestream2_get_bytes_left(&gbc) > 3) {
  1961. if (ff_amf_get_string(&gbc, statusmsg, sizeof(statusmsg),
  1962. &stringlen))
  1963. return AVERROR_INVALIDDATA;
  1964. // We do not care about the content of the property (yet).
  1965. stringlen = ff_amf_tag_size(gbc.buffer, gbc.buffer_end);
  1966. if (stringlen < 0)
  1967. return AVERROR_INVALIDDATA;
  1968. bytestream2_skip(&gbc, stringlen);
  1969. // The presence of the following properties indicates that the
  1970. // respective streams are present.
  1971. if (!strcmp(statusmsg, "videocodecid")) {
  1972. rt->has_video = 1;
  1973. }
  1974. if (!strcmp(statusmsg, "audiocodecid")) {
  1975. rt->has_audio = 1;
  1976. }
  1977. }
  1978. if (bytestream2_get_be24(&gbc) != AMF_END_OF_OBJECT)
  1979. return AVERROR_INVALIDDATA;
  1980. }
  1981. }
  1982. // Skip the @setDataFrame string and validate it is a notification
  1983. if (!strcmp(commandbuffer, "@setDataFrame")) {
  1984. skip = gbc.buffer - pkt->data;
  1985. ret = ff_amf_read_string(&gbc, statusmsg,
  1986. sizeof(statusmsg), &stringlen);
  1987. if (ret < 0)
  1988. return AVERROR_INVALIDDATA;
  1989. }
  1990. return append_flv_data(rt, pkt, skip);
  1991. }
  1992. /**
  1993. * Parse received packet and possibly perform some action depending on
  1994. * the packet contents.
  1995. * @return 0 for no errors, negative values for serious errors which prevent
  1996. * further communications, positive values for uncritical errors
  1997. */
  1998. static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
  1999. {
  2000. int ret;
  2001. #ifdef DEBUG
  2002. ff_rtmp_packet_dump(s, pkt);
  2003. #endif
  2004. switch (pkt->type) {
  2005. case RTMP_PT_BYTES_READ:
  2006. av_log(s, AV_LOG_TRACE, "received bytes read report\n");
  2007. break;
  2008. case RTMP_PT_CHUNK_SIZE:
  2009. if ((ret = handle_chunk_size(s, pkt)) < 0)
  2010. return ret;
  2011. break;
  2012. case RTMP_PT_PING:
  2013. if ((ret = handle_ping(s, pkt)) < 0)
  2014. return ret;
  2015. break;
  2016. case RTMP_PT_CLIENT_BW:
  2017. if ((ret = handle_client_bw(s, pkt)) < 0)
  2018. return ret;
  2019. break;
  2020. case RTMP_PT_SERVER_BW:
  2021. if ((ret = handle_server_bw(s, pkt)) < 0)
  2022. return ret;
  2023. break;
  2024. case RTMP_PT_INVOKE:
  2025. if ((ret = handle_invoke(s, pkt)) < 0)
  2026. return ret;
  2027. break;
  2028. case RTMP_PT_VIDEO:
  2029. case RTMP_PT_AUDIO:
  2030. case RTMP_PT_METADATA:
  2031. case RTMP_PT_NOTIFY:
  2032. /* Audio, Video and Metadata packets are parsed in get_packet() */
  2033. break;
  2034. default:
  2035. av_log(s, AV_LOG_VERBOSE, "Unknown packet type received 0x%02X\n", pkt->type);
  2036. break;
  2037. }
  2038. return 0;
  2039. }
  2040. static int handle_metadata(RTMPContext *rt, RTMPPacket *pkt)
  2041. {
  2042. int ret, old_flv_size, type;
  2043. const uint8_t *next;
  2044. uint8_t *p;
  2045. uint32_t size;
  2046. uint32_t ts, cts, pts = 0;
  2047. old_flv_size = update_offset(rt, pkt->size);
  2048. if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0) {
  2049. rt->flv_size = rt->flv_off = 0;
  2050. return ret;
  2051. }
  2052. next = pkt->data;
  2053. p = rt->flv_data + old_flv_size;
  2054. /* copy data while rewriting timestamps */
  2055. ts = pkt->timestamp;
  2056. while (next - pkt->data < pkt->size - RTMP_HEADER) {
  2057. type = bytestream_get_byte(&next);
  2058. size = bytestream_get_be24(&next);
  2059. cts = bytestream_get_be24(&next);
  2060. cts |= bytestream_get_byte(&next) << 24;
  2061. if (!pts)
  2062. pts = cts;
  2063. ts += cts - pts;
  2064. pts = cts;
  2065. if (size + 3 + 4 > pkt->data + pkt->size - next)
  2066. break;
  2067. bytestream_put_byte(&p, type);
  2068. bytestream_put_be24(&p, size);
  2069. bytestream_put_be24(&p, ts);
  2070. bytestream_put_byte(&p, ts >> 24);
  2071. memcpy(p, next, size + 3 + 4);
  2072. p += size + 3;
  2073. bytestream_put_be32(&p, size + RTMP_HEADER);
  2074. next += size + 3 + 4;
  2075. }
  2076. if (p != rt->flv_data + rt->flv_size) {
  2077. av_log(NULL, AV_LOG_WARNING, "Incomplete flv packets in "
  2078. "RTMP_PT_METADATA packet\n");
  2079. rt->flv_size = p - rt->flv_data;
  2080. }
  2081. return 0;
  2082. }
  2083. /**
  2084. * Interact with the server by receiving and sending RTMP packets until
  2085. * there is some significant data (media data or expected status notification).
  2086. *
  2087. * @param s reading context
  2088. * @param for_header non-zero value tells function to work until it
  2089. * gets notification from the server that playing has been started,
  2090. * otherwise function will work until some media data is received (or
  2091. * an error happens)
  2092. * @return 0 for successful operation, negative value in case of error
  2093. */
  2094. static int get_packet(URLContext *s, int for_header)
  2095. {
  2096. RTMPContext *rt = s->priv_data;
  2097. int ret;
  2098. if (rt->state == STATE_STOPPED)
  2099. return AVERROR_EOF;
  2100. for (;;) {
  2101. RTMPPacket rpkt = { 0 };
  2102. if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
  2103. rt->in_chunk_size, &rt->prev_pkt[0],
  2104. &rt->nb_prev_pkt[0])) <= 0) {
  2105. if (ret == 0) {
  2106. return AVERROR(EAGAIN);
  2107. } else {
  2108. return AVERROR(EIO);
  2109. }
  2110. }
  2111. // Track timestamp for later use
  2112. rt->last_timestamp = rpkt.timestamp;
  2113. rt->bytes_read += ret;
  2114. if (rt->bytes_read - rt->last_bytes_read > rt->client_report_size) {
  2115. av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
  2116. if ((ret = gen_bytes_read(s, rt, rpkt.timestamp + 1)) < 0)
  2117. return ret;
  2118. rt->last_bytes_read = rt->bytes_read;
  2119. }
  2120. ret = rtmp_parse_result(s, rt, &rpkt);
  2121. // At this point we must check if we are in the seek state and continue
  2122. // with the next packet. handle_invoke will get us out of this state
  2123. // when the right message is encountered
  2124. if (rt->state == STATE_SEEKING) {
  2125. ff_rtmp_packet_destroy(&rpkt);
  2126. // We continue, let the natural flow of things happen:
  2127. // AVERROR(EAGAIN) or handle_invoke gets us out of here
  2128. continue;
  2129. }
  2130. if (ret < 0) {//serious error in current packet
  2131. ff_rtmp_packet_destroy(&rpkt);
  2132. return ret;
  2133. }
  2134. if (rt->do_reconnect && for_header) {
  2135. ff_rtmp_packet_destroy(&rpkt);
  2136. return 0;
  2137. }
  2138. if (rt->state == STATE_STOPPED) {
  2139. ff_rtmp_packet_destroy(&rpkt);
  2140. return AVERROR_EOF;
  2141. }
  2142. if (for_header && (rt->state == STATE_PLAYING ||
  2143. rt->state == STATE_PUBLISHING ||
  2144. rt->state == STATE_SENDING ||
  2145. rt->state == STATE_RECEIVING)) {
  2146. ff_rtmp_packet_destroy(&rpkt);
  2147. return 0;
  2148. }
  2149. if (!rpkt.size || !rt->is_input) {
  2150. ff_rtmp_packet_destroy(&rpkt);
  2151. continue;
  2152. }
  2153. if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO) {
  2154. ret = append_flv_data(rt, &rpkt, 0);
  2155. ff_rtmp_packet_destroy(&rpkt);
  2156. return ret;
  2157. } else if (rpkt.type == RTMP_PT_NOTIFY) {
  2158. ret = handle_notify(s, &rpkt);
  2159. ff_rtmp_packet_destroy(&rpkt);
  2160. return ret;
  2161. } else if (rpkt.type == RTMP_PT_METADATA) {
  2162. ret = handle_metadata(rt, &rpkt);
  2163. ff_rtmp_packet_destroy(&rpkt);
  2164. return 0;
  2165. }
  2166. ff_rtmp_packet_destroy(&rpkt);
  2167. }
  2168. }
  2169. static int rtmp_close(URLContext *h)
  2170. {
  2171. RTMPContext *rt = h->priv_data;
  2172. int ret = 0, i, j;
  2173. if (!rt->is_input) {
  2174. rt->flv_data = NULL;
  2175. if (rt->out_pkt.size)
  2176. ff_rtmp_packet_destroy(&rt->out_pkt);
  2177. if (rt->state > STATE_FCPUBLISH)
  2178. ret = gen_fcunpublish_stream(h, rt);
  2179. }
  2180. if (rt->state > STATE_HANDSHAKED)
  2181. ret = gen_delete_stream(h, rt);
  2182. for (i = 0; i < 2; i++) {
  2183. for (j = 0; j < rt->nb_prev_pkt[i]; j++)
  2184. ff_rtmp_packet_destroy(&rt->prev_pkt[i][j]);
  2185. av_freep(&rt->prev_pkt[i]);
  2186. }
  2187. free_tracked_methods(rt);
  2188. av_freep(&rt->flv_data);
  2189. ffurl_close(rt->stream);
  2190. return ret;
  2191. }
  2192. /**
  2193. * Insert a fake onMetadata packet into the FLV stream to notify the FLV
  2194. * demuxer about the duration of the stream.
  2195. *
  2196. * This should only be done if there was no real onMetadata packet sent by the
  2197. * server at the start of the stream and if we were able to retrieve a valid
  2198. * duration via a getStreamLength call.
  2199. *
  2200. * @return 0 for successful operation, negative value in case of error
  2201. */
  2202. static int inject_fake_duration_metadata(RTMPContext *rt)
  2203. {
  2204. // We need to insert the metadata packet directly after the FLV
  2205. // header, i.e. we need to move all other already read data by the
  2206. // size of our fake metadata packet.
  2207. uint8_t* p;
  2208. // Keep old flv_data pointer
  2209. uint8_t* old_flv_data = rt->flv_data;
  2210. // Allocate a new flv_data pointer with enough space for the additional package
  2211. if (!(rt->flv_data = av_malloc(rt->flv_size + 55))) {
  2212. rt->flv_data = old_flv_data;
  2213. return AVERROR(ENOMEM);
  2214. }
  2215. // Copy FLV header
  2216. memcpy(rt->flv_data, old_flv_data, 13);
  2217. // Copy remaining packets
  2218. memcpy(rt->flv_data + 13 + 55, old_flv_data + 13, rt->flv_size - 13);
  2219. // Increase the size by the injected packet
  2220. rt->flv_size += 55;
  2221. // Delete the old FLV data
  2222. av_freep(&old_flv_data);
  2223. p = rt->flv_data + 13;
  2224. bytestream_put_byte(&p, FLV_TAG_TYPE_META);
  2225. bytestream_put_be24(&p, 40); // size of data part (sum of all parts below)
  2226. bytestream_put_be24(&p, 0); // timestamp
  2227. bytestream_put_be32(&p, 0); // reserved
  2228. // first event name as a string
  2229. bytestream_put_byte(&p, AMF_DATA_TYPE_STRING);
  2230. // "onMetaData" as AMF string
  2231. bytestream_put_be16(&p, 10);
  2232. bytestream_put_buffer(&p, "onMetaData", 10);
  2233. // mixed array (hash) with size and string/type/data tuples
  2234. bytestream_put_byte(&p, AMF_DATA_TYPE_MIXEDARRAY);
  2235. bytestream_put_be32(&p, 1); // metadata_count
  2236. // "duration" as AMF string
  2237. bytestream_put_be16(&p, 8);
  2238. bytestream_put_buffer(&p, "duration", 8);
  2239. bytestream_put_byte(&p, AMF_DATA_TYPE_NUMBER);
  2240. bytestream_put_be64(&p, av_double2int(rt->duration));
  2241. // Finalise object
  2242. bytestream_put_be16(&p, 0); // Empty string
  2243. bytestream_put_byte(&p, AMF_END_OF_OBJECT);
  2244. bytestream_put_be32(&p, 40 + RTMP_HEADER); // size of data part (sum of all parts above)
  2245. return 0;
  2246. }
  2247. /**
  2248. * Open RTMP connection and verify that the stream can be played.
  2249. *
  2250. * URL syntax: rtmp://server[:port][/app][/playpath]
  2251. * where 'app' is first one or two directories in the path
  2252. * (e.g. /ondemand/, /flash/live/, etc.)
  2253. * and 'playpath' is a file name (the rest of the path,
  2254. * may be prefixed with "mp4:")
  2255. */
  2256. static int rtmp_open(URLContext *s, const char *uri, int flags, AVDictionary **opts)
  2257. {
  2258. RTMPContext *rt = s->priv_data;
  2259. char proto[8], hostname[256], path[1024], auth[100], *fname;
  2260. char *old_app, *qmark, *n, fname_buffer[1024];
  2261. uint8_t buf[2048];
  2262. int port;
  2263. int ret;
  2264. if (rt->listen_timeout > 0)
  2265. rt->listen = 1;
  2266. rt->is_input = !(flags & AVIO_FLAG_WRITE);
  2267. av_url_split(proto, sizeof(proto), auth, sizeof(auth),
  2268. hostname, sizeof(hostname), &port,
  2269. path, sizeof(path), s->filename);
  2270. n = strchr(path, ' ');
  2271. if (n) {
  2272. av_log(s, AV_LOG_WARNING,
  2273. "Detected librtmp style URL parameters, these aren't supported "
  2274. "by the libavformat internal RTMP handler currently enabled. "
  2275. "See the documentation for the correct way to pass parameters.\n");
  2276. *n = '\0'; // Trim not supported part
  2277. }
  2278. if (auth[0]) {
  2279. char *ptr = strchr(auth, ':');
  2280. if (ptr) {
  2281. *ptr = '\0';
  2282. av_strlcpy(rt->username, auth, sizeof(rt->username));
  2283. av_strlcpy(rt->password, ptr + 1, sizeof(rt->password));
  2284. }
  2285. }
  2286. if (rt->listen && strcmp(proto, "rtmp")) {
  2287. av_log(s, AV_LOG_ERROR, "rtmp_listen not available for %s\n",
  2288. proto);
  2289. return AVERROR(EINVAL);
  2290. }
  2291. if (!strcmp(proto, "rtmpt") || !strcmp(proto, "rtmpts")) {
  2292. if (!strcmp(proto, "rtmpts"))
  2293. av_dict_set(opts, "ffrtmphttp_tls", "1", 1);
  2294. /* open the http tunneling connection */
  2295. ff_url_join(buf, sizeof(buf), "ffrtmphttp", NULL, hostname, port, NULL);
  2296. } else if (!strcmp(proto, "rtmps")) {
  2297. /* open the tls connection */
  2298. if (port < 0)
  2299. port = RTMPS_DEFAULT_PORT;
  2300. ff_url_join(buf, sizeof(buf), "tls", NULL, hostname, port, NULL);
  2301. } else if (!strcmp(proto, "rtmpe") || (!strcmp(proto, "rtmpte"))) {
  2302. if (!strcmp(proto, "rtmpte"))
  2303. av_dict_set(opts, "ffrtmpcrypt_tunneling", "1", 1);
  2304. /* open the encrypted connection */
  2305. ff_url_join(buf, sizeof(buf), "ffrtmpcrypt", NULL, hostname, port, NULL);
  2306. rt->encrypted = 1;
  2307. } else {
  2308. /* open the tcp connection */
  2309. if (port < 0)
  2310. port = RTMP_DEFAULT_PORT;
  2311. if (rt->listen)
  2312. ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port,
  2313. "?listen&listen_timeout=%d",
  2314. rt->listen_timeout * 1000);
  2315. else
  2316. ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
  2317. }
  2318. reconnect:
  2319. if ((ret = ffurl_open_whitelist(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
  2320. &s->interrupt_callback, opts,
  2321. s->protocol_whitelist, s->protocol_blacklist, s)) < 0) {
  2322. av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf);
  2323. goto fail;
  2324. }
  2325. if (rt->swfverify) {
  2326. if ((ret = rtmp_calc_swfhash(s)) < 0)
  2327. goto fail;
  2328. }
  2329. rt->state = STATE_START;
  2330. if (!rt->listen && (ret = rtmp_handshake(s, rt)) < 0)
  2331. goto fail;
  2332. if (rt->listen && (ret = rtmp_server_handshake(s, rt)) < 0)
  2333. goto fail;
  2334. rt->out_chunk_size = 128;
  2335. rt->in_chunk_size = 128; // Probably overwritten later
  2336. rt->state = STATE_HANDSHAKED;
  2337. // Keep the application name when it has been defined by the user.
  2338. old_app = rt->app;
  2339. rt->app = av_malloc(APP_MAX_LENGTH);
  2340. if (!rt->app) {
  2341. ret = AVERROR(ENOMEM);
  2342. goto fail;
  2343. }
  2344. //extract "app" part from path
  2345. qmark = strchr(path, '?');
  2346. if (qmark && strstr(qmark, "slist=")) {
  2347. char* amp;
  2348. // After slist we have the playpath, the full path is used as app
  2349. av_strlcpy(rt->app, path + 1, APP_MAX_LENGTH);
  2350. fname = strstr(path, "slist=") + 6;
  2351. // Strip any further query parameters from fname
  2352. amp = strchr(fname, '&');
  2353. if (amp) {
  2354. av_strlcpy(fname_buffer, fname, FFMIN(amp - fname + 1,
  2355. sizeof(fname_buffer)));
  2356. fname = fname_buffer;
  2357. }
  2358. } else if (!strncmp(path, "/ondemand/", 10)) {
  2359. fname = path + 10;
  2360. memcpy(rt->app, "ondemand", 9);
  2361. } else {
  2362. char *next = *path ? path + 1 : path;
  2363. char *p = strchr(next, '/');
  2364. if (!p) {
  2365. if (old_app) {
  2366. // If name of application has been defined by the user, assume that
  2367. // playpath is provided in the URL
  2368. fname = next;
  2369. } else {
  2370. fname = NULL;
  2371. av_strlcpy(rt->app, next, APP_MAX_LENGTH);
  2372. }
  2373. } else {
  2374. // make sure we do not mismatch a playpath for an application instance
  2375. char *c = strchr(p + 1, ':');
  2376. fname = strchr(p + 1, '/');
  2377. if (!fname || (c && c < fname)) {
  2378. fname = p + 1;
  2379. av_strlcpy(rt->app, path + 1, FFMIN(p - path, APP_MAX_LENGTH));
  2380. } else {
  2381. fname++;
  2382. av_strlcpy(rt->app, path + 1, FFMIN(fname - path - 1, APP_MAX_LENGTH));
  2383. }
  2384. }
  2385. }
  2386. if (old_app) {
  2387. // The name of application has been defined by the user, override it.
  2388. if (strlen(old_app) >= APP_MAX_LENGTH) {
  2389. ret = AVERROR(EINVAL);
  2390. goto fail;
  2391. }
  2392. av_free(rt->app);
  2393. rt->app = old_app;
  2394. }
  2395. if (!rt->playpath) {
  2396. rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
  2397. if (!rt->playpath) {
  2398. ret = AVERROR(ENOMEM);
  2399. goto fail;
  2400. }
  2401. if (fname) {
  2402. int len = strlen(fname);
  2403. if (!strchr(fname, ':') && len >= 4 &&
  2404. (!strcmp(fname + len - 4, ".f4v") ||
  2405. !strcmp(fname + len - 4, ".mp4"))) {
  2406. memcpy(rt->playpath, "mp4:", 5);
  2407. } else {
  2408. if (len >= 4 && !strcmp(fname + len - 4, ".flv"))
  2409. fname[len - 4] = '\0';
  2410. rt->playpath[0] = 0;
  2411. }
  2412. av_strlcat(rt->playpath, fname, PLAYPATH_MAX_LENGTH);
  2413. } else {
  2414. rt->playpath[0] = '\0';
  2415. }
  2416. }
  2417. if (!rt->tcurl) {
  2418. rt->tcurl = av_malloc(TCURL_MAX_LENGTH);
  2419. if (!rt->tcurl) {
  2420. ret = AVERROR(ENOMEM);
  2421. goto fail;
  2422. }
  2423. ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname,
  2424. port, "/%s", rt->app);
  2425. }
  2426. if (!rt->flashver) {
  2427. rt->flashver = av_malloc(FLASHVER_MAX_LENGTH);
  2428. if (!rt->flashver) {
  2429. ret = AVERROR(ENOMEM);
  2430. goto fail;
  2431. }
  2432. if (rt->is_input) {
  2433. snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d",
  2434. RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2,
  2435. RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
  2436. } else {
  2437. snprintf(rt->flashver, FLASHVER_MAX_LENGTH,
  2438. "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
  2439. }
  2440. }
  2441. rt->client_report_size = 1048576;
  2442. rt->bytes_read = 0;
  2443. rt->has_audio = 0;
  2444. rt->has_video = 0;
  2445. rt->received_metadata = 0;
  2446. rt->last_bytes_read = 0;
  2447. rt->server_bw = 2500000;
  2448. rt->duration = 0;
  2449. av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
  2450. proto, path, rt->app, rt->playpath);
  2451. if (!rt->listen) {
  2452. if ((ret = gen_connect(s, rt)) < 0)
  2453. goto fail;
  2454. } else {
  2455. if ((ret = read_connect(s, s->priv_data)) < 0)
  2456. goto fail;
  2457. }
  2458. do {
  2459. ret = get_packet(s, 1);
  2460. } while (ret == AVERROR(EAGAIN));
  2461. if (ret < 0)
  2462. goto fail;
  2463. if (rt->do_reconnect) {
  2464. int i;
  2465. ffurl_close(rt->stream);
  2466. rt->stream = NULL;
  2467. rt->do_reconnect = 0;
  2468. rt->nb_invokes = 0;
  2469. for (i = 0; i < 2; i++)
  2470. memset(rt->prev_pkt[i], 0,
  2471. sizeof(**rt->prev_pkt) * rt->nb_prev_pkt[i]);
  2472. free_tracked_methods(rt);
  2473. goto reconnect;
  2474. }
  2475. if (rt->is_input) {
  2476. // generate FLV header for demuxer
  2477. rt->flv_size = 13;
  2478. if ((ret = av_reallocp(&rt->flv_data, rt->flv_size)) < 0)
  2479. goto fail;
  2480. rt->flv_off = 0;
  2481. memcpy(rt->flv_data, "FLV\1\0\0\0\0\011\0\0\0\0", rt->flv_size);
  2482. // Read packets until we reach the first A/V packet or read metadata.
  2483. // If there was a metadata package in front of the A/V packets, we can
  2484. // build the FLV header from this. If we do not receive any metadata,
  2485. // the FLV decoder will allocate the needed streams when their first
  2486. // audio or video packet arrives.
  2487. while (!rt->has_audio && !rt->has_video && !rt->received_metadata) {
  2488. if ((ret = get_packet(s, 0)) < 0)
  2489. goto fail;
  2490. }
  2491. // Either after we have read the metadata or (if there is none) the
  2492. // first packet of an A/V stream, we have a better knowledge about the
  2493. // streams, so set the FLV header accordingly.
  2494. if (rt->has_audio) {
  2495. rt->flv_data[4] |= FLV_HEADER_FLAG_HASAUDIO;
  2496. }
  2497. if (rt->has_video) {
  2498. rt->flv_data[4] |= FLV_HEADER_FLAG_HASVIDEO;
  2499. }
  2500. // If we received the first packet of an A/V stream and no metadata but
  2501. // the server returned a valid duration, create a fake metadata packet
  2502. // to inform the FLV decoder about the duration.
  2503. if (!rt->received_metadata && rt->duration > 0) {
  2504. if ((ret = inject_fake_duration_metadata(rt)) < 0)
  2505. goto fail;
  2506. }
  2507. } else {
  2508. rt->flv_size = 0;
  2509. rt->flv_data = NULL;
  2510. rt->flv_off = 0;
  2511. rt->skip_bytes = 13;
  2512. }
  2513. s->max_packet_size = rt->stream->max_packet_size;
  2514. s->is_streamed = 1;
  2515. return 0;
  2516. fail:
  2517. av_dict_free(opts);
  2518. rtmp_close(s);
  2519. return ret;
  2520. }
  2521. static int rtmp_read(URLContext *s, uint8_t *buf, int size)
  2522. {
  2523. RTMPContext *rt = s->priv_data;
  2524. int orig_size = size;
  2525. int ret;
  2526. while (size > 0) {
  2527. int data_left = rt->flv_size - rt->flv_off;
  2528. if (data_left >= size) {
  2529. memcpy(buf, rt->flv_data + rt->flv_off, size);
  2530. rt->flv_off += size;
  2531. return orig_size;
  2532. }
  2533. if (data_left > 0) {
  2534. memcpy(buf, rt->flv_data + rt->flv_off, data_left);
  2535. buf += data_left;
  2536. size -= data_left;
  2537. rt->flv_off = rt->flv_size;
  2538. return data_left;
  2539. }
  2540. if ((ret = get_packet(s, 0)) < 0)
  2541. return ret;
  2542. }
  2543. return orig_size;
  2544. }
  2545. static int64_t rtmp_seek(URLContext *s, int stream_index, int64_t timestamp,
  2546. int flags)
  2547. {
  2548. RTMPContext *rt = s->priv_data;
  2549. int ret;
  2550. av_log(s, AV_LOG_DEBUG,
  2551. "Seek on stream index %d at timestamp %"PRId64" with flags %08x\n",
  2552. stream_index, timestamp, flags);
  2553. if ((ret = gen_seek(s, rt, timestamp)) < 0) {
  2554. av_log(s, AV_LOG_ERROR,
  2555. "Unable to send seek command on stream index %d at timestamp "
  2556. "%"PRId64" with flags %08x\n",
  2557. stream_index, timestamp, flags);
  2558. return ret;
  2559. }
  2560. rt->flv_off = rt->flv_size;
  2561. rt->state = STATE_SEEKING;
  2562. return timestamp;
  2563. }
  2564. static int rtmp_pause(URLContext *s, int pause)
  2565. {
  2566. RTMPContext *rt = s->priv_data;
  2567. int ret;
  2568. av_log(s, AV_LOG_DEBUG, "Pause at timestamp %d\n",
  2569. rt->last_timestamp);
  2570. if ((ret = gen_pause(s, rt, pause, rt->last_timestamp)) < 0) {
  2571. av_log(s, AV_LOG_ERROR, "Unable to send pause command at timestamp %d\n",
  2572. rt->last_timestamp);
  2573. return ret;
  2574. }
  2575. return 0;
  2576. }
  2577. static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
  2578. {
  2579. RTMPContext *rt = s->priv_data;
  2580. int size_temp = size;
  2581. int pktsize, pkttype, copy;
  2582. uint32_t ts;
  2583. const uint8_t *buf_temp = buf;
  2584. uint8_t c;
  2585. int ret;
  2586. do {
  2587. if (rt->skip_bytes) {
  2588. int skip = FFMIN(rt->skip_bytes, size_temp);
  2589. buf_temp += skip;
  2590. size_temp -= skip;
  2591. rt->skip_bytes -= skip;
  2592. continue;
  2593. }
  2594. if (rt->flv_header_bytes < RTMP_HEADER) {
  2595. const uint8_t *header = rt->flv_header;
  2596. int channel = RTMP_AUDIO_CHANNEL;
  2597. copy = FFMIN(RTMP_HEADER - rt->flv_header_bytes, size_temp);
  2598. bytestream_get_buffer(&buf_temp, rt->flv_header + rt->flv_header_bytes, copy);
  2599. rt->flv_header_bytes += copy;
  2600. size_temp -= copy;
  2601. if (rt->flv_header_bytes < RTMP_HEADER)
  2602. break;
  2603. pkttype = bytestream_get_byte(&header);
  2604. pktsize = bytestream_get_be24(&header);
  2605. ts = bytestream_get_be24(&header);
  2606. ts |= bytestream_get_byte(&header) << 24;
  2607. bytestream_get_be24(&header);
  2608. rt->flv_size = pktsize;
  2609. if (pkttype == RTMP_PT_VIDEO)
  2610. channel = RTMP_VIDEO_CHANNEL;
  2611. if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
  2612. pkttype == RTMP_PT_NOTIFY) {
  2613. if ((ret = ff_rtmp_check_alloc_array(&rt->prev_pkt[1],
  2614. &rt->nb_prev_pkt[1],
  2615. channel)) < 0)
  2616. return ret;
  2617. // Force sending a full 12 bytes header by clearing the
  2618. // channel id, to make it not match a potential earlier
  2619. // packet in the same channel.
  2620. rt->prev_pkt[1][channel].channel_id = 0;
  2621. }
  2622. //this can be a big packet, it's better to send it right here
  2623. if ((ret = ff_rtmp_packet_create(&rt->out_pkt, channel,
  2624. pkttype, ts, pktsize)) < 0)
  2625. return ret;
  2626. rt->out_pkt.extra = rt->stream_id;
  2627. rt->flv_data = rt->out_pkt.data;
  2628. }
  2629. copy = FFMIN(rt->flv_size - rt->flv_off, size_temp);
  2630. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, copy);
  2631. rt->flv_off += copy;
  2632. size_temp -= copy;
  2633. if (rt->flv_off == rt->flv_size) {
  2634. rt->skip_bytes = 4;
  2635. if (rt->out_pkt.type == RTMP_PT_NOTIFY) {
  2636. // For onMetaData and |RtmpSampleAccess packets, we want
  2637. // @setDataFrame prepended to the packet before it gets sent.
  2638. // However, not all RTMP_PT_NOTIFY packets (e.g., onTextData
  2639. // and onCuePoint).
  2640. uint8_t commandbuffer[64];
  2641. int stringlen = 0;
  2642. GetByteContext gbc;
  2643. bytestream2_init(&gbc, rt->flv_data, rt->flv_size);
  2644. if (!ff_amf_read_string(&gbc, commandbuffer, sizeof(commandbuffer),
  2645. &stringlen)) {
  2646. if (!strcmp(commandbuffer, "onMetaData") ||
  2647. !strcmp(commandbuffer, "|RtmpSampleAccess")) {
  2648. uint8_t *ptr;
  2649. if ((ret = av_reallocp(&rt->out_pkt.data, rt->out_pkt.size + 16)) < 0) {
  2650. rt->flv_size = rt->flv_off = rt->flv_header_bytes = 0;
  2651. return ret;
  2652. }
  2653. memmove(rt->out_pkt.data + 16, rt->out_pkt.data, rt->out_pkt.size);
  2654. rt->out_pkt.size += 16;
  2655. ptr = rt->out_pkt.data;
  2656. ff_amf_write_string(&ptr, "@setDataFrame");
  2657. }
  2658. }
  2659. }
  2660. if ((ret = rtmp_send_packet(rt, &rt->out_pkt, 0)) < 0)
  2661. return ret;
  2662. rt->flv_size = 0;
  2663. rt->flv_off = 0;
  2664. rt->flv_header_bytes = 0;
  2665. rt->flv_nb_packets++;
  2666. }
  2667. } while (buf_temp - buf < size);
  2668. if (rt->flv_nb_packets < rt->flush_interval)
  2669. return size;
  2670. rt->flv_nb_packets = 0;
  2671. /* set stream into nonblocking mode */
  2672. rt->stream->flags |= AVIO_FLAG_NONBLOCK;
  2673. /* try to read one byte from the stream */
  2674. ret = ffurl_read(rt->stream, &c, 1);
  2675. /* switch the stream back into blocking mode */
  2676. rt->stream->flags &= ~AVIO_FLAG_NONBLOCK;
  2677. if (ret == AVERROR(EAGAIN)) {
  2678. /* no incoming data to handle */
  2679. return size;
  2680. } else if (ret < 0) {
  2681. return ret;
  2682. } else if (ret == 1) {
  2683. RTMPPacket rpkt = { 0 };
  2684. if ((ret = ff_rtmp_packet_read_internal(rt->stream, &rpkt,
  2685. rt->in_chunk_size,
  2686. &rt->prev_pkt[0],
  2687. &rt->nb_prev_pkt[0], c)) <= 0)
  2688. return ret;
  2689. if ((ret = rtmp_parse_result(s, rt, &rpkt)) < 0)
  2690. return ret;
  2691. ff_rtmp_packet_destroy(&rpkt);
  2692. }
  2693. return size;
  2694. }
  2695. #define OFFSET(x) offsetof(RTMPContext, x)
  2696. #define DEC AV_OPT_FLAG_DECODING_PARAM
  2697. #define ENC AV_OPT_FLAG_ENCODING_PARAM
  2698. static const AVOption rtmp_options[] = {
  2699. {"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2700. {"rtmp_buffer", "Set buffer time in milliseconds. The default is 3000.", OFFSET(client_buffer_time), AV_OPT_TYPE_INT, {.i64 = 3000}, 0, INT_MAX, DEC|ENC},
  2701. {"rtmp_conn", "Append arbitrary AMF data to the Connect message", OFFSET(conn), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2702. {"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2703. {"rtmp_flush_interval", "Number of packets flushed in the same request (RTMPT only).", OFFSET(flush_interval), AV_OPT_TYPE_INT, {.i64 = 10}, 0, INT_MAX, ENC},
  2704. {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {.i64 = -2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
  2705. {"any", "both", 0, AV_OPT_TYPE_CONST, {.i64 = -2}, 0, 0, DEC, "rtmp_live"},
  2706. {"live", "live stream", 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, DEC, "rtmp_live"},
  2707. {"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, DEC, "rtmp_live"},
  2708. {"rtmp_pageurl", "URL of the web page in which the media was embedded. By default no value will be sent.", OFFSET(pageurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
  2709. {"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2710. {"rtmp_subscribe", "Name of live stream to subscribe to. Defaults to rtmp_playpath.", OFFSET(subscribe), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
  2711. {"rtmp_swfhash", "SHA256 hash of the decompressed SWF file (32 bytes).", OFFSET(swfhash), AV_OPT_TYPE_BINARY, .flags = DEC},
  2712. {"rtmp_swfsize", "Size of the decompressed SWF file, required for SWFVerification.", OFFSET(swfsize), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC},
  2713. {"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2714. {"rtmp_swfverify", "URL to player swf file, compute hash/size automatically.", OFFSET(swfverify), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC},
  2715. {"rtmp_tcurl", "URL of the target stream. Defaults to proto://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  2716. {"rtmp_listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
  2717. {"listen", "Listen for incoming rtmp connections", OFFSET(listen), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
  2718. {"timeout", "Maximum timeout (in seconds) to wait for incoming connections. -1 is infinite. Implies -rtmp_listen 1", OFFSET(listen_timeout), AV_OPT_TYPE_INT, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "rtmp_listen" },
  2719. { NULL },
  2720. };
  2721. #define RTMP_PROTOCOL(flavor) \
  2722. static const AVClass flavor##_class = { \
  2723. .class_name = #flavor, \
  2724. .item_name = av_default_item_name, \
  2725. .option = rtmp_options, \
  2726. .version = LIBAVUTIL_VERSION_INT, \
  2727. }; \
  2728. \
  2729. const URLProtocol ff_##flavor##_protocol = { \
  2730. .name = #flavor, \
  2731. .url_open2 = rtmp_open, \
  2732. .url_read = rtmp_read, \
  2733. .url_read_seek = rtmp_seek, \
  2734. .url_read_pause = rtmp_pause, \
  2735. .url_write = rtmp_write, \
  2736. .url_close = rtmp_close, \
  2737. .priv_data_size = sizeof(RTMPContext), \
  2738. .flags = URL_PROTOCOL_FLAG_NETWORK, \
  2739. .priv_data_class= &flavor##_class, \
  2740. };
  2741. RTMP_PROTOCOL(rtmp)
  2742. RTMP_PROTOCOL(rtmpe)
  2743. RTMP_PROTOCOL(rtmps)
  2744. RTMP_PROTOCOL(rtmpt)
  2745. RTMP_PROTOCOL(rtmpte)
  2746. RTMP_PROTOCOL(rtmpts)