You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1105 lines
38KB

  1. /*
  2. * RTMP network protocol
  3. * Copyright (c) 2009 Kostya Shishkov
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * RTMP protocol
  24. */
  25. #include "libavcodec/bytestream.h"
  26. #include "libavutil/avstring.h"
  27. #include "libavutil/intfloat.h"
  28. #include "libavutil/lfg.h"
  29. #include "libavutil/opt.h"
  30. #include "libavutil/sha.h"
  31. #include "avformat.h"
  32. #include "internal.h"
  33. #include "network.h"
  34. #include "flv.h"
  35. #include "rtmp.h"
  36. #include "rtmppkt.h"
  37. #include "url.h"
  38. //#define DEBUG
  39. #define APP_MAX_LENGTH 128
  40. #define PLAYPATH_MAX_LENGTH 256
  41. #define TCURL_MAX_LENGTH 512
  42. #define FLASHVER_MAX_LENGTH 64
  43. /** RTMP protocol handler state */
  44. typedef enum {
  45. STATE_START, ///< client has not done anything yet
  46. STATE_HANDSHAKED, ///< client has performed handshake
  47. STATE_RELEASING, ///< client releasing stream before publish it (for output)
  48. STATE_FCPUBLISH, ///< client FCPublishing stream (for output)
  49. STATE_CONNECTING, ///< client connected to server successfully
  50. STATE_READY, ///< client has sent all needed commands and waits for server reply
  51. STATE_PLAYING, ///< client has started receiving multimedia data from server
  52. STATE_PUBLISHING, ///< client has started sending multimedia data to server (for output)
  53. STATE_STOPPED, ///< the broadcast has been stopped
  54. } ClientState;
  55. /** protocol handler context */
  56. typedef struct RTMPContext {
  57. const AVClass *class;
  58. URLContext* stream; ///< TCP stream used in interactions with RTMP server
  59. RTMPPacket prev_pkt[2][RTMP_CHANNELS]; ///< packet history used when reading and sending packets
  60. int chunk_size; ///< size of the chunks RTMP packets are divided into
  61. int is_input; ///< input/output flag
  62. char *playpath; ///< stream identifier to play (with possible "mp4:" prefix)
  63. int live; ///< 0: recorded, -1: live, -2: both
  64. char *app; ///< name of application
  65. ClientState state; ///< current state
  66. int main_channel_id; ///< an additional channel ID which is used for some invocations
  67. uint8_t* flv_data; ///< buffer with data for demuxer
  68. int flv_size; ///< current buffer size
  69. int flv_off; ///< number of bytes read from current buffer
  70. RTMPPacket out_pkt; ///< rtmp packet, created from flv a/v or metadata (for output)
  71. uint32_t client_report_size; ///< number of bytes after which client should report to server
  72. uint32_t bytes_read; ///< number of bytes read from server
  73. uint32_t last_bytes_read; ///< number of bytes read last reported to server
  74. int skip_bytes; ///< number of bytes to skip from the input FLV stream in the next write call
  75. uint8_t flv_header[11]; ///< partial incoming flv packet header
  76. int flv_header_bytes; ///< number of initialized bytes in flv_header
  77. int nb_invokes; ///< keeps track of invoke messages
  78. int create_stream_invoke; ///< invoke id for the create stream command
  79. char* tcurl; ///< url of the target stream
  80. char* flashver; ///< version of the flash plugin
  81. char* swfurl; ///< url of the swf player
  82. } RTMPContext;
  83. #define PLAYER_KEY_OPEN_PART_LEN 30 ///< length of partial key used for first client digest signing
  84. /** Client key used for digest signing */
  85. static const uint8_t rtmp_player_key[] = {
  86. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  87. 'F', 'l', 'a', 's', 'h', ' ', 'P', 'l', 'a', 'y', 'e', 'r', ' ', '0', '0', '1',
  88. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  89. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  90. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  91. };
  92. #define SERVER_KEY_OPEN_PART_LEN 36 ///< length of partial key used for first server digest signing
  93. /** Key used for RTMP server digest signing */
  94. static const uint8_t rtmp_server_key[] = {
  95. 'G', 'e', 'n', 'u', 'i', 'n', 'e', ' ', 'A', 'd', 'o', 'b', 'e', ' ',
  96. 'F', 'l', 'a', 's', 'h', ' ', 'M', 'e', 'd', 'i', 'a', ' ',
  97. 'S', 'e', 'r', 'v', 'e', 'r', ' ', '0', '0', '1',
  98. 0xF0, 0xEE, 0xC2, 0x4A, 0x80, 0x68, 0xBE, 0xE8, 0x2E, 0x00, 0xD0, 0xD1, 0x02,
  99. 0x9E, 0x7E, 0x57, 0x6E, 0xEC, 0x5D, 0x2D, 0x29, 0x80, 0x6F, 0xAB, 0x93, 0xB8,
  100. 0xE6, 0x36, 0xCF, 0xEB, 0x31, 0xAE
  101. };
  102. /**
  103. * Generate 'connect' call and send it to the server.
  104. */
  105. static void gen_connect(URLContext *s, RTMPContext *rt)
  106. {
  107. RTMPPacket pkt;
  108. uint8_t *p;
  109. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 4096);
  110. p = pkt.data;
  111. ff_amf_write_string(&p, "connect");
  112. ff_amf_write_number(&p, ++rt->nb_invokes);
  113. ff_amf_write_object_start(&p);
  114. ff_amf_write_field_name(&p, "app");
  115. ff_amf_write_string(&p, rt->app);
  116. if (!rt->is_input) {
  117. ff_amf_write_field_name(&p, "type");
  118. ff_amf_write_string(&p, "nonprivate");
  119. }
  120. ff_amf_write_field_name(&p, "flashVer");
  121. ff_amf_write_string(&p, rt->flashver);
  122. if (rt->swfurl) {
  123. ff_amf_write_field_name(&p, "swfUrl");
  124. ff_amf_write_string(&p, rt->swfurl);
  125. }
  126. ff_amf_write_field_name(&p, "tcUrl");
  127. ff_amf_write_string(&p, rt->tcurl);
  128. if (rt->is_input) {
  129. ff_amf_write_field_name(&p, "fpad");
  130. ff_amf_write_bool(&p, 0);
  131. ff_amf_write_field_name(&p, "capabilities");
  132. ff_amf_write_number(&p, 15.0);
  133. /* Tell the server we support all the audio codecs except
  134. * SUPPORT_SND_INTEL (0x0008) and SUPPORT_SND_UNUSED (0x0010)
  135. * which are unused in the RTMP protocol implementation. */
  136. ff_amf_write_field_name(&p, "audioCodecs");
  137. ff_amf_write_number(&p, 4071.0);
  138. ff_amf_write_field_name(&p, "videoCodecs");
  139. ff_amf_write_number(&p, 252.0);
  140. ff_amf_write_field_name(&p, "videoFunction");
  141. ff_amf_write_number(&p, 1.0);
  142. }
  143. ff_amf_write_object_end(&p);
  144. pkt.data_size = p - pkt.data;
  145. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  146. ff_rtmp_packet_destroy(&pkt);
  147. }
  148. /**
  149. * Generate 'releaseStream' call and send it to the server. It should make
  150. * the server release some channel for media streams.
  151. */
  152. static void gen_release_stream(URLContext *s, RTMPContext *rt)
  153. {
  154. RTMPPacket pkt;
  155. uint8_t *p;
  156. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  157. 29 + strlen(rt->playpath));
  158. av_log(s, AV_LOG_DEBUG, "Releasing stream...\n");
  159. p = pkt.data;
  160. ff_amf_write_string(&p, "releaseStream");
  161. ff_amf_write_number(&p, ++rt->nb_invokes);
  162. ff_amf_write_null(&p);
  163. ff_amf_write_string(&p, rt->playpath);
  164. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  165. ff_rtmp_packet_destroy(&pkt);
  166. }
  167. /**
  168. * Generate 'FCPublish' call and send it to the server. It should make
  169. * the server preapare for receiving media streams.
  170. */
  171. static void gen_fcpublish_stream(URLContext *s, RTMPContext *rt)
  172. {
  173. RTMPPacket pkt;
  174. uint8_t *p;
  175. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  176. 25 + strlen(rt->playpath));
  177. av_log(s, AV_LOG_DEBUG, "FCPublish stream...\n");
  178. p = pkt.data;
  179. ff_amf_write_string(&p, "FCPublish");
  180. ff_amf_write_number(&p, ++rt->nb_invokes);
  181. ff_amf_write_null(&p);
  182. ff_amf_write_string(&p, rt->playpath);
  183. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  184. ff_rtmp_packet_destroy(&pkt);
  185. }
  186. /**
  187. * Generate 'FCUnpublish' call and send it to the server. It should make
  188. * the server destroy stream.
  189. */
  190. static void gen_fcunpublish_stream(URLContext *s, RTMPContext *rt)
  191. {
  192. RTMPPacket pkt;
  193. uint8_t *p;
  194. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0,
  195. 27 + strlen(rt->playpath));
  196. av_log(s, AV_LOG_DEBUG, "UnPublishing stream...\n");
  197. p = pkt.data;
  198. ff_amf_write_string(&p, "FCUnpublish");
  199. ff_amf_write_number(&p, ++rt->nb_invokes);
  200. ff_amf_write_null(&p);
  201. ff_amf_write_string(&p, rt->playpath);
  202. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  203. ff_rtmp_packet_destroy(&pkt);
  204. }
  205. /**
  206. * Generate 'createStream' call and send it to the server. It should make
  207. * the server allocate some channel for media streams.
  208. */
  209. static void gen_create_stream(URLContext *s, RTMPContext *rt)
  210. {
  211. RTMPPacket pkt;
  212. uint8_t *p;
  213. av_log(s, AV_LOG_DEBUG, "Creating stream...\n");
  214. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 25);
  215. p = pkt.data;
  216. ff_amf_write_string(&p, "createStream");
  217. ff_amf_write_number(&p, ++rt->nb_invokes);
  218. ff_amf_write_null(&p);
  219. rt->create_stream_invoke = rt->nb_invokes;
  220. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  221. ff_rtmp_packet_destroy(&pkt);
  222. }
  223. /**
  224. * Generate 'deleteStream' call and send it to the server. It should make
  225. * the server remove some channel for media streams.
  226. */
  227. static void gen_delete_stream(URLContext *s, RTMPContext *rt)
  228. {
  229. RTMPPacket pkt;
  230. uint8_t *p;
  231. av_log(s, AV_LOG_DEBUG, "Deleting stream...\n");
  232. ff_rtmp_packet_create(&pkt, RTMP_SYSTEM_CHANNEL, RTMP_PT_INVOKE, 0, 34);
  233. p = pkt.data;
  234. ff_amf_write_string(&p, "deleteStream");
  235. ff_amf_write_number(&p, ++rt->nb_invokes);
  236. ff_amf_write_null(&p);
  237. ff_amf_write_number(&p, rt->main_channel_id);
  238. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  239. ff_rtmp_packet_destroy(&pkt);
  240. }
  241. /**
  242. * Generate 'play' call and send it to the server, then ping the server
  243. * to start actual playing.
  244. */
  245. static void gen_play(URLContext *s, RTMPContext *rt)
  246. {
  247. RTMPPacket pkt;
  248. uint8_t *p;
  249. av_log(s, AV_LOG_DEBUG, "Sending play command for '%s'\n", rt->playpath);
  250. ff_rtmp_packet_create(&pkt, RTMP_VIDEO_CHANNEL, RTMP_PT_INVOKE, 0,
  251. 29 + strlen(rt->playpath));
  252. pkt.extra = rt->main_channel_id;
  253. p = pkt.data;
  254. ff_amf_write_string(&p, "play");
  255. ff_amf_write_number(&p, ++rt->nb_invokes);
  256. ff_amf_write_null(&p);
  257. ff_amf_write_string(&p, rt->playpath);
  258. ff_amf_write_number(&p, rt->live);
  259. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  260. ff_rtmp_packet_destroy(&pkt);
  261. // set client buffer time disguised in ping packet
  262. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, 1, 10);
  263. p = pkt.data;
  264. bytestream_put_be16(&p, 3);
  265. bytestream_put_be32(&p, 1);
  266. bytestream_put_be32(&p, 256); //TODO: what is a good value here?
  267. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  268. ff_rtmp_packet_destroy(&pkt);
  269. }
  270. /**
  271. * Generate 'publish' call and send it to the server.
  272. */
  273. static void gen_publish(URLContext *s, RTMPContext *rt)
  274. {
  275. RTMPPacket pkt;
  276. uint8_t *p;
  277. av_log(s, AV_LOG_DEBUG, "Sending publish command for '%s'\n", rt->playpath);
  278. ff_rtmp_packet_create(&pkt, RTMP_SOURCE_CHANNEL, RTMP_PT_INVOKE, 0,
  279. 30 + strlen(rt->playpath));
  280. pkt.extra = rt->main_channel_id;
  281. p = pkt.data;
  282. ff_amf_write_string(&p, "publish");
  283. ff_amf_write_number(&p, ++rt->nb_invokes);
  284. ff_amf_write_null(&p);
  285. ff_amf_write_string(&p, rt->playpath);
  286. ff_amf_write_string(&p, "live");
  287. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  288. ff_rtmp_packet_destroy(&pkt);
  289. }
  290. /**
  291. * Generate ping reply and send it to the server.
  292. */
  293. static void gen_pong(URLContext *s, RTMPContext *rt, RTMPPacket *ppkt)
  294. {
  295. RTMPPacket pkt;
  296. uint8_t *p;
  297. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_PING, ppkt->timestamp + 1, 6);
  298. p = pkt.data;
  299. bytestream_put_be16(&p, 7);
  300. bytestream_put_be32(&p, AV_RB32(ppkt->data+2));
  301. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  302. ff_rtmp_packet_destroy(&pkt);
  303. }
  304. /**
  305. * Generate server bandwidth message and send it to the server.
  306. */
  307. static void gen_server_bw(URLContext *s, RTMPContext *rt)
  308. {
  309. RTMPPacket pkt;
  310. uint8_t *p;
  311. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_SERVER_BW, 0, 4);
  312. p = pkt.data;
  313. bytestream_put_be32(&p, 2500000);
  314. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  315. ff_rtmp_packet_destroy(&pkt);
  316. }
  317. /**
  318. * Generate report on bytes read so far and send it to the server.
  319. */
  320. static void gen_bytes_read(URLContext *s, RTMPContext *rt, uint32_t ts)
  321. {
  322. RTMPPacket pkt;
  323. uint8_t *p;
  324. ff_rtmp_packet_create(&pkt, RTMP_NETWORK_CHANNEL, RTMP_PT_BYTES_READ, ts, 4);
  325. p = pkt.data;
  326. bytestream_put_be32(&p, rt->bytes_read);
  327. ff_rtmp_packet_write(rt->stream, &pkt, rt->chunk_size, rt->prev_pkt[1]);
  328. ff_rtmp_packet_destroy(&pkt);
  329. }
  330. //TODO: Move HMAC code somewhere. Eventually.
  331. #define HMAC_IPAD_VAL 0x36
  332. #define HMAC_OPAD_VAL 0x5C
  333. /**
  334. * Calculate HMAC-SHA2 digest for RTMP handshake packets.
  335. *
  336. * @param src input buffer
  337. * @param len input buffer length (should be 1536)
  338. * @param gap offset in buffer where 32 bytes should not be taken into account
  339. * when calculating digest (since it will be used to store that digest)
  340. * @param key digest key
  341. * @param keylen digest key length
  342. * @param dst buffer where calculated digest will be stored (32 bytes)
  343. */
  344. static void rtmp_calc_digest(const uint8_t *src, int len, int gap,
  345. const uint8_t *key, int keylen, uint8_t *dst)
  346. {
  347. struct AVSHA *sha;
  348. uint8_t hmac_buf[64+32] = {0};
  349. int i;
  350. sha = av_mallocz(av_sha_size);
  351. if (keylen < 64) {
  352. memcpy(hmac_buf, key, keylen);
  353. } else {
  354. av_sha_init(sha, 256);
  355. av_sha_update(sha,key, keylen);
  356. av_sha_final(sha, hmac_buf);
  357. }
  358. for (i = 0; i < 64; i++)
  359. hmac_buf[i] ^= HMAC_IPAD_VAL;
  360. av_sha_init(sha, 256);
  361. av_sha_update(sha, hmac_buf, 64);
  362. if (gap <= 0) {
  363. av_sha_update(sha, src, len);
  364. } else { //skip 32 bytes used for storing digest
  365. av_sha_update(sha, src, gap);
  366. av_sha_update(sha, src + gap + 32, len - gap - 32);
  367. }
  368. av_sha_final(sha, hmac_buf + 64);
  369. for (i = 0; i < 64; i++)
  370. hmac_buf[i] ^= HMAC_IPAD_VAL ^ HMAC_OPAD_VAL; //reuse XORed key for opad
  371. av_sha_init(sha, 256);
  372. av_sha_update(sha, hmac_buf, 64+32);
  373. av_sha_final(sha, dst);
  374. av_free(sha);
  375. }
  376. /**
  377. * Put HMAC-SHA2 digest of packet data (except for the bytes where this digest
  378. * will be stored) into that packet.
  379. *
  380. * @param buf handshake data (1536 bytes)
  381. * @return offset to the digest inside input data
  382. */
  383. static int rtmp_handshake_imprint_with_digest(uint8_t *buf)
  384. {
  385. int i, digest_pos = 0;
  386. for (i = 8; i < 12; i++)
  387. digest_pos += buf[i];
  388. digest_pos = (digest_pos % 728) + 12;
  389. rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  390. rtmp_player_key, PLAYER_KEY_OPEN_PART_LEN,
  391. buf + digest_pos);
  392. return digest_pos;
  393. }
  394. /**
  395. * Verify that the received server response has the expected digest value.
  396. *
  397. * @param buf handshake data received from the server (1536 bytes)
  398. * @param off position to search digest offset from
  399. * @return 0 if digest is valid, digest position otherwise
  400. */
  401. static int rtmp_validate_digest(uint8_t *buf, int off)
  402. {
  403. int i, digest_pos = 0;
  404. uint8_t digest[32];
  405. for (i = 0; i < 4; i++)
  406. digest_pos += buf[i + off];
  407. digest_pos = (digest_pos % 728) + off + 4;
  408. rtmp_calc_digest(buf, RTMP_HANDSHAKE_PACKET_SIZE, digest_pos,
  409. rtmp_server_key, SERVER_KEY_OPEN_PART_LEN,
  410. digest);
  411. if (!memcmp(digest, buf + digest_pos, 32))
  412. return digest_pos;
  413. return 0;
  414. }
  415. /**
  416. * Perform handshake with the server by means of exchanging pseudorandom data
  417. * signed with HMAC-SHA2 digest.
  418. *
  419. * @return 0 if handshake succeeds, negative value otherwise
  420. */
  421. static int rtmp_handshake(URLContext *s, RTMPContext *rt)
  422. {
  423. AVLFG rnd;
  424. uint8_t tosend [RTMP_HANDSHAKE_PACKET_SIZE+1] = {
  425. 3, // unencrypted data
  426. 0, 0, 0, 0, // client uptime
  427. RTMP_CLIENT_VER1,
  428. RTMP_CLIENT_VER2,
  429. RTMP_CLIENT_VER3,
  430. RTMP_CLIENT_VER4,
  431. };
  432. uint8_t clientdata[RTMP_HANDSHAKE_PACKET_SIZE];
  433. uint8_t serverdata[RTMP_HANDSHAKE_PACKET_SIZE+1];
  434. int i;
  435. int server_pos, client_pos;
  436. uint8_t digest[32];
  437. av_log(s, AV_LOG_DEBUG, "Handshaking...\n");
  438. av_lfg_init(&rnd, 0xDEADC0DE);
  439. // generate handshake packet - 1536 bytes of pseudorandom data
  440. for (i = 9; i <= RTMP_HANDSHAKE_PACKET_SIZE; i++)
  441. tosend[i] = av_lfg_get(&rnd) >> 24;
  442. client_pos = rtmp_handshake_imprint_with_digest(tosend + 1);
  443. ffurl_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE + 1);
  444. i = ffurl_read_complete(rt->stream, serverdata, RTMP_HANDSHAKE_PACKET_SIZE + 1);
  445. if (i != RTMP_HANDSHAKE_PACKET_SIZE + 1) {
  446. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  447. return -1;
  448. }
  449. i = ffurl_read_complete(rt->stream, clientdata, RTMP_HANDSHAKE_PACKET_SIZE);
  450. if (i != RTMP_HANDSHAKE_PACKET_SIZE) {
  451. av_log(s, AV_LOG_ERROR, "Cannot read RTMP handshake response\n");
  452. return -1;
  453. }
  454. av_log(s, AV_LOG_DEBUG, "Server version %d.%d.%d.%d\n",
  455. serverdata[5], serverdata[6], serverdata[7], serverdata[8]);
  456. if (rt->is_input && serverdata[5] >= 3) {
  457. server_pos = rtmp_validate_digest(serverdata + 1, 772);
  458. if (!server_pos) {
  459. server_pos = rtmp_validate_digest(serverdata + 1, 8);
  460. if (!server_pos) {
  461. av_log(s, AV_LOG_ERROR, "Server response validating failed\n");
  462. return -1;
  463. }
  464. }
  465. rtmp_calc_digest(tosend + 1 + client_pos, 32, 0,
  466. rtmp_server_key, sizeof(rtmp_server_key),
  467. digest);
  468. rtmp_calc_digest(clientdata, RTMP_HANDSHAKE_PACKET_SIZE-32, 0,
  469. digest, 32,
  470. digest);
  471. if (memcmp(digest, clientdata + RTMP_HANDSHAKE_PACKET_SIZE - 32, 32)) {
  472. av_log(s, AV_LOG_ERROR, "Signature mismatch\n");
  473. return -1;
  474. }
  475. for (i = 0; i < RTMP_HANDSHAKE_PACKET_SIZE; i++)
  476. tosend[i] = av_lfg_get(&rnd) >> 24;
  477. rtmp_calc_digest(serverdata + 1 + server_pos, 32, 0,
  478. rtmp_player_key, sizeof(rtmp_player_key),
  479. digest);
  480. rtmp_calc_digest(tosend, RTMP_HANDSHAKE_PACKET_SIZE - 32, 0,
  481. digest, 32,
  482. tosend + RTMP_HANDSHAKE_PACKET_SIZE - 32);
  483. // write reply back to the server
  484. ffurl_write(rt->stream, tosend, RTMP_HANDSHAKE_PACKET_SIZE);
  485. } else {
  486. ffurl_write(rt->stream, serverdata+1, RTMP_HANDSHAKE_PACKET_SIZE);
  487. }
  488. return 0;
  489. }
  490. /**
  491. * Parse received packet and possibly perform some action depending on
  492. * the packet contents.
  493. * @return 0 for no errors, negative values for serious errors which prevent
  494. * further communications, positive values for uncritical errors
  495. */
  496. static int rtmp_parse_result(URLContext *s, RTMPContext *rt, RTMPPacket *pkt)
  497. {
  498. int i, t;
  499. const uint8_t *data_end = pkt->data + pkt->data_size;
  500. #ifdef DEBUG
  501. ff_rtmp_packet_dump(s, pkt);
  502. #endif
  503. switch (pkt->type) {
  504. case RTMP_PT_CHUNK_SIZE:
  505. if (pkt->data_size != 4) {
  506. av_log(s, AV_LOG_ERROR,
  507. "Chunk size change packet is not 4 bytes long (%d)\n", pkt->data_size);
  508. return -1;
  509. }
  510. if (!rt->is_input)
  511. ff_rtmp_packet_write(rt->stream, pkt, rt->chunk_size, rt->prev_pkt[1]);
  512. rt->chunk_size = AV_RB32(pkt->data);
  513. if (rt->chunk_size <= 0) {
  514. av_log(s, AV_LOG_ERROR, "Incorrect chunk size %d\n", rt->chunk_size);
  515. return -1;
  516. }
  517. av_log(s, AV_LOG_DEBUG, "New chunk size = %d\n", rt->chunk_size);
  518. break;
  519. case RTMP_PT_PING:
  520. t = AV_RB16(pkt->data);
  521. if (t == 6)
  522. gen_pong(s, rt, pkt);
  523. break;
  524. case RTMP_PT_CLIENT_BW:
  525. if (pkt->data_size < 4) {
  526. av_log(s, AV_LOG_ERROR,
  527. "Client bandwidth report packet is less than 4 bytes long (%d)\n",
  528. pkt->data_size);
  529. return -1;
  530. }
  531. av_log(s, AV_LOG_DEBUG, "Client bandwidth = %d\n", AV_RB32(pkt->data));
  532. rt->client_report_size = AV_RB32(pkt->data) >> 1;
  533. break;
  534. case RTMP_PT_INVOKE:
  535. //TODO: check for the messages sent for wrong state?
  536. if (!memcmp(pkt->data, "\002\000\006_error", 9)) {
  537. uint8_t tmpstr[256];
  538. if (!ff_amf_get_field_value(pkt->data + 9, data_end,
  539. "description", tmpstr, sizeof(tmpstr)))
  540. av_log(s, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  541. return -1;
  542. } else if (!memcmp(pkt->data, "\002\000\007_result", 10)) {
  543. switch (rt->state) {
  544. case STATE_HANDSHAKED:
  545. if (!rt->is_input) {
  546. gen_release_stream(s, rt);
  547. gen_fcpublish_stream(s, rt);
  548. rt->state = STATE_RELEASING;
  549. } else {
  550. gen_server_bw(s, rt);
  551. rt->state = STATE_CONNECTING;
  552. }
  553. gen_create_stream(s, rt);
  554. break;
  555. case STATE_FCPUBLISH:
  556. rt->state = STATE_CONNECTING;
  557. break;
  558. case STATE_RELEASING:
  559. rt->state = STATE_FCPUBLISH;
  560. /* hack for Wowza Media Server, it does not send result for
  561. * releaseStream and FCPublish calls */
  562. if (!pkt->data[10]) {
  563. int pkt_id = av_int2double(AV_RB64(pkt->data + 11));
  564. if (pkt_id == rt->create_stream_invoke)
  565. rt->state = STATE_CONNECTING;
  566. }
  567. if (rt->state != STATE_CONNECTING)
  568. break;
  569. case STATE_CONNECTING:
  570. //extract a number from the result
  571. if (pkt->data[10] || pkt->data[19] != 5 || pkt->data[20]) {
  572. av_log(s, AV_LOG_WARNING, "Unexpected reply on connect()\n");
  573. } else {
  574. rt->main_channel_id = av_int2double(AV_RB64(pkt->data + 21));
  575. }
  576. if (rt->is_input) {
  577. gen_play(s, rt);
  578. } else {
  579. gen_publish(s, rt);
  580. }
  581. rt->state = STATE_READY;
  582. break;
  583. }
  584. } else if (!memcmp(pkt->data, "\002\000\010onStatus", 11)) {
  585. const uint8_t* ptr = pkt->data + 11;
  586. uint8_t tmpstr[256];
  587. for (i = 0; i < 2; i++) {
  588. t = ff_amf_tag_size(ptr, data_end);
  589. if (t < 0)
  590. return 1;
  591. ptr += t;
  592. }
  593. t = ff_amf_get_field_value(ptr, data_end,
  594. "level", tmpstr, sizeof(tmpstr));
  595. if (!t && !strcmp(tmpstr, "error")) {
  596. if (!ff_amf_get_field_value(ptr, data_end,
  597. "description", tmpstr, sizeof(tmpstr)))
  598. av_log(s, AV_LOG_ERROR, "Server error: %s\n",tmpstr);
  599. return -1;
  600. }
  601. t = ff_amf_get_field_value(ptr, data_end,
  602. "code", tmpstr, sizeof(tmpstr));
  603. if (!t && !strcmp(tmpstr, "NetStream.Play.Start")) rt->state = STATE_PLAYING;
  604. if (!t && !strcmp(tmpstr, "NetStream.Play.Stop")) rt->state = STATE_STOPPED;
  605. if (!t && !strcmp(tmpstr, "NetStream.Play.UnpublishNotify")) rt->state = STATE_STOPPED;
  606. if (!t && !strcmp(tmpstr, "NetStream.Publish.Start")) rt->state = STATE_PUBLISHING;
  607. }
  608. break;
  609. }
  610. return 0;
  611. }
  612. /**
  613. * Interact with the server by receiving and sending RTMP packets until
  614. * there is some significant data (media data or expected status notification).
  615. *
  616. * @param s reading context
  617. * @param for_header non-zero value tells function to work until it
  618. * gets notification from the server that playing has been started,
  619. * otherwise function will work until some media data is received (or
  620. * an error happens)
  621. * @return 0 for successful operation, negative value in case of error
  622. */
  623. static int get_packet(URLContext *s, int for_header)
  624. {
  625. RTMPContext *rt = s->priv_data;
  626. int ret;
  627. uint8_t *p;
  628. const uint8_t *next;
  629. uint32_t data_size;
  630. uint32_t ts, cts, pts=0;
  631. if (rt->state == STATE_STOPPED)
  632. return AVERROR_EOF;
  633. for (;;) {
  634. RTMPPacket rpkt = { 0 };
  635. if ((ret = ff_rtmp_packet_read(rt->stream, &rpkt,
  636. rt->chunk_size, rt->prev_pkt[0])) <= 0) {
  637. if (ret == 0) {
  638. return AVERROR(EAGAIN);
  639. } else {
  640. return AVERROR(EIO);
  641. }
  642. }
  643. rt->bytes_read += ret;
  644. if (rt->bytes_read > rt->last_bytes_read + rt->client_report_size) {
  645. av_log(s, AV_LOG_DEBUG, "Sending bytes read report\n");
  646. gen_bytes_read(s, rt, rpkt.timestamp + 1);
  647. rt->last_bytes_read = rt->bytes_read;
  648. }
  649. ret = rtmp_parse_result(s, rt, &rpkt);
  650. if (ret < 0) {//serious error in current packet
  651. ff_rtmp_packet_destroy(&rpkt);
  652. return -1;
  653. }
  654. if (rt->state == STATE_STOPPED) {
  655. ff_rtmp_packet_destroy(&rpkt);
  656. return AVERROR_EOF;
  657. }
  658. if (for_header && (rt->state == STATE_PLAYING || rt->state == STATE_PUBLISHING)) {
  659. ff_rtmp_packet_destroy(&rpkt);
  660. return 0;
  661. }
  662. if (!rpkt.data_size || !rt->is_input) {
  663. ff_rtmp_packet_destroy(&rpkt);
  664. continue;
  665. }
  666. if (rpkt.type == RTMP_PT_VIDEO || rpkt.type == RTMP_PT_AUDIO ||
  667. (rpkt.type == RTMP_PT_NOTIFY && !memcmp("\002\000\012onMetaData", rpkt.data, 13))) {
  668. ts = rpkt.timestamp;
  669. // generate packet header and put data into buffer for FLV demuxer
  670. rt->flv_off = 0;
  671. rt->flv_size = rpkt.data_size + 15;
  672. rt->flv_data = p = av_realloc(rt->flv_data, rt->flv_size);
  673. bytestream_put_byte(&p, rpkt.type);
  674. bytestream_put_be24(&p, rpkt.data_size);
  675. bytestream_put_be24(&p, ts);
  676. bytestream_put_byte(&p, ts >> 24);
  677. bytestream_put_be24(&p, 0);
  678. bytestream_put_buffer(&p, rpkt.data, rpkt.data_size);
  679. bytestream_put_be32(&p, 0);
  680. ff_rtmp_packet_destroy(&rpkt);
  681. return 0;
  682. } else if (rpkt.type == RTMP_PT_METADATA) {
  683. // we got raw FLV data, make it available for FLV demuxer
  684. rt->flv_off = 0;
  685. rt->flv_size = rpkt.data_size;
  686. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  687. /* rewrite timestamps */
  688. next = rpkt.data;
  689. ts = rpkt.timestamp;
  690. while (next - rpkt.data < rpkt.data_size - 11) {
  691. next++;
  692. data_size = bytestream_get_be24(&next);
  693. p=next;
  694. cts = bytestream_get_be24(&next);
  695. cts |= bytestream_get_byte(&next) << 24;
  696. if (pts==0)
  697. pts=cts;
  698. ts += cts - pts;
  699. pts = cts;
  700. bytestream_put_be24(&p, ts);
  701. bytestream_put_byte(&p, ts >> 24);
  702. next += data_size + 3 + 4;
  703. }
  704. memcpy(rt->flv_data, rpkt.data, rpkt.data_size);
  705. ff_rtmp_packet_destroy(&rpkt);
  706. return 0;
  707. }
  708. ff_rtmp_packet_destroy(&rpkt);
  709. }
  710. }
  711. static int rtmp_close(URLContext *h)
  712. {
  713. RTMPContext *rt = h->priv_data;
  714. if (!rt->is_input) {
  715. rt->flv_data = NULL;
  716. if (rt->out_pkt.data_size)
  717. ff_rtmp_packet_destroy(&rt->out_pkt);
  718. if (rt->state > STATE_FCPUBLISH)
  719. gen_fcunpublish_stream(h, rt);
  720. }
  721. if (rt->state > STATE_HANDSHAKED)
  722. gen_delete_stream(h, rt);
  723. av_freep(&rt->flv_data);
  724. ffurl_close(rt->stream);
  725. return 0;
  726. }
  727. /**
  728. * Open RTMP connection and verify that the stream can be played.
  729. *
  730. * URL syntax: rtmp://server[:port][/app][/playpath]
  731. * where 'app' is first one or two directories in the path
  732. * (e.g. /ondemand/, /flash/live/, etc.)
  733. * and 'playpath' is a file name (the rest of the path,
  734. * may be prefixed with "mp4:")
  735. */
  736. static int rtmp_open(URLContext *s, const char *uri, int flags)
  737. {
  738. RTMPContext *rt = s->priv_data;
  739. char proto[8], hostname[256], path[1024], *fname;
  740. char *old_app;
  741. uint8_t buf[2048];
  742. int port;
  743. int ret;
  744. rt->is_input = !(flags & AVIO_FLAG_WRITE);
  745. av_url_split(proto, sizeof(proto), NULL, 0, hostname, sizeof(hostname), &port,
  746. path, sizeof(path), s->filename);
  747. if (port < 0)
  748. port = RTMP_DEFAULT_PORT;
  749. ff_url_join(buf, sizeof(buf), "tcp", NULL, hostname, port, NULL);
  750. if (ffurl_open(&rt->stream, buf, AVIO_FLAG_READ_WRITE,
  751. &s->interrupt_callback, NULL) < 0) {
  752. av_log(s , AV_LOG_ERROR, "Cannot open connection %s\n", buf);
  753. goto fail;
  754. }
  755. rt->state = STATE_START;
  756. if (rtmp_handshake(s, rt))
  757. goto fail;
  758. rt->chunk_size = 128;
  759. rt->state = STATE_HANDSHAKED;
  760. // Keep the application name when it has been defined by the user.
  761. old_app = rt->app;
  762. rt->app = av_malloc(APP_MAX_LENGTH);
  763. if (!rt->app) {
  764. rtmp_close(s);
  765. return AVERROR(ENOMEM);
  766. }
  767. //extract "app" part from path
  768. if (!strncmp(path, "/ondemand/", 10)) {
  769. fname = path + 10;
  770. memcpy(rt->app, "ondemand", 9);
  771. } else {
  772. char *p = strchr(path + 1, '/');
  773. if (!p) {
  774. fname = path + 1;
  775. rt->app[0] = '\0';
  776. } else {
  777. char *c = strchr(p + 1, ':');
  778. fname = strchr(p + 1, '/');
  779. if (!fname || c < fname) {
  780. fname = p + 1;
  781. av_strlcpy(rt->app, path + 1, p - path);
  782. } else {
  783. fname++;
  784. av_strlcpy(rt->app, path + 1, fname - path - 1);
  785. }
  786. }
  787. }
  788. if (old_app) {
  789. // The name of application has been defined by the user, override it.
  790. av_free(rt->app);
  791. rt->app = old_app;
  792. }
  793. if (!rt->playpath) {
  794. rt->playpath = av_malloc(PLAYPATH_MAX_LENGTH);
  795. if (!rt->playpath) {
  796. rtmp_close(s);
  797. return AVERROR(ENOMEM);
  798. }
  799. if (!strchr(fname, ':') &&
  800. (!strcmp(fname + strlen(fname) - 4, ".f4v") ||
  801. !strcmp(fname + strlen(fname) - 4, ".mp4"))) {
  802. memcpy(rt->playpath, "mp4:", 5);
  803. } else {
  804. rt->playpath[0] = 0;
  805. }
  806. strncat(rt->playpath, fname, PLAYPATH_MAX_LENGTH - 5);
  807. }
  808. if (!rt->tcurl) {
  809. rt->tcurl = av_malloc(TCURL_MAX_LENGTH);
  810. ff_url_join(rt->tcurl, TCURL_MAX_LENGTH, proto, NULL, hostname,
  811. port, "/%s", rt->app);
  812. }
  813. if (!rt->flashver) {
  814. rt->flashver = av_malloc(FLASHVER_MAX_LENGTH);
  815. if (rt->is_input) {
  816. snprintf(rt->flashver, FLASHVER_MAX_LENGTH, "%s %d,%d,%d,%d",
  817. RTMP_CLIENT_PLATFORM, RTMP_CLIENT_VER1, RTMP_CLIENT_VER2,
  818. RTMP_CLIENT_VER3, RTMP_CLIENT_VER4);
  819. } else {
  820. snprintf(rt->flashver, FLASHVER_MAX_LENGTH,
  821. "FMLE/3.0 (compatible; %s)", LIBAVFORMAT_IDENT);
  822. }
  823. }
  824. rt->client_report_size = 1048576;
  825. rt->bytes_read = 0;
  826. rt->last_bytes_read = 0;
  827. av_log(s, AV_LOG_DEBUG, "Proto = %s, path = %s, app = %s, fname = %s\n",
  828. proto, path, rt->app, rt->playpath);
  829. gen_connect(s, rt);
  830. do {
  831. ret = get_packet(s, 1);
  832. } while (ret == EAGAIN);
  833. if (ret < 0)
  834. goto fail;
  835. if (rt->is_input) {
  836. // generate FLV header for demuxer
  837. rt->flv_size = 13;
  838. rt->flv_data = av_realloc(rt->flv_data, rt->flv_size);
  839. rt->flv_off = 0;
  840. memcpy(rt->flv_data, "FLV\1\5\0\0\0\011\0\0\0\0", rt->flv_size);
  841. } else {
  842. rt->flv_size = 0;
  843. rt->flv_data = NULL;
  844. rt->flv_off = 0;
  845. rt->skip_bytes = 13;
  846. }
  847. s->max_packet_size = rt->stream->max_packet_size;
  848. s->is_streamed = 1;
  849. return 0;
  850. fail:
  851. rtmp_close(s);
  852. return AVERROR(EIO);
  853. }
  854. static int rtmp_read(URLContext *s, uint8_t *buf, int size)
  855. {
  856. RTMPContext *rt = s->priv_data;
  857. int orig_size = size;
  858. int ret;
  859. while (size > 0) {
  860. int data_left = rt->flv_size - rt->flv_off;
  861. if (data_left >= size) {
  862. memcpy(buf, rt->flv_data + rt->flv_off, size);
  863. rt->flv_off += size;
  864. return orig_size;
  865. }
  866. if (data_left > 0) {
  867. memcpy(buf, rt->flv_data + rt->flv_off, data_left);
  868. buf += data_left;
  869. size -= data_left;
  870. rt->flv_off = rt->flv_size;
  871. return data_left;
  872. }
  873. if ((ret = get_packet(s, 0)) < 0)
  874. return ret;
  875. }
  876. return orig_size;
  877. }
  878. static int rtmp_write(URLContext *s, const uint8_t *buf, int size)
  879. {
  880. RTMPContext *rt = s->priv_data;
  881. int size_temp = size;
  882. int pktsize, pkttype;
  883. uint32_t ts;
  884. const uint8_t *buf_temp = buf;
  885. do {
  886. if (rt->skip_bytes) {
  887. int skip = FFMIN(rt->skip_bytes, size_temp);
  888. buf_temp += skip;
  889. size_temp -= skip;
  890. rt->skip_bytes -= skip;
  891. continue;
  892. }
  893. if (rt->flv_header_bytes < 11) {
  894. const uint8_t *header = rt->flv_header;
  895. int copy = FFMIN(11 - rt->flv_header_bytes, size_temp);
  896. bytestream_get_buffer(&buf_temp, rt->flv_header + rt->flv_header_bytes, copy);
  897. rt->flv_header_bytes += copy;
  898. size_temp -= copy;
  899. if (rt->flv_header_bytes < 11)
  900. break;
  901. pkttype = bytestream_get_byte(&header);
  902. pktsize = bytestream_get_be24(&header);
  903. ts = bytestream_get_be24(&header);
  904. ts |= bytestream_get_byte(&header) << 24;
  905. bytestream_get_be24(&header);
  906. rt->flv_size = pktsize;
  907. //force 12bytes header
  908. if (((pkttype == RTMP_PT_VIDEO || pkttype == RTMP_PT_AUDIO) && ts == 0) ||
  909. pkttype == RTMP_PT_NOTIFY) {
  910. if (pkttype == RTMP_PT_NOTIFY)
  911. pktsize += 16;
  912. rt->prev_pkt[1][RTMP_SOURCE_CHANNEL].channel_id = 0;
  913. }
  914. //this can be a big packet, it's better to send it right here
  915. ff_rtmp_packet_create(&rt->out_pkt, RTMP_SOURCE_CHANNEL, pkttype, ts, pktsize);
  916. rt->out_pkt.extra = rt->main_channel_id;
  917. rt->flv_data = rt->out_pkt.data;
  918. if (pkttype == RTMP_PT_NOTIFY)
  919. ff_amf_write_string(&rt->flv_data, "@setDataFrame");
  920. }
  921. if (rt->flv_size - rt->flv_off > size_temp) {
  922. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, size_temp);
  923. rt->flv_off += size_temp;
  924. size_temp = 0;
  925. } else {
  926. bytestream_get_buffer(&buf_temp, rt->flv_data + rt->flv_off, rt->flv_size - rt->flv_off);
  927. size_temp -= rt->flv_size - rt->flv_off;
  928. rt->flv_off += rt->flv_size - rt->flv_off;
  929. }
  930. if (rt->flv_off == rt->flv_size) {
  931. rt->skip_bytes = 4;
  932. ff_rtmp_packet_write(rt->stream, &rt->out_pkt, rt->chunk_size, rt->prev_pkt[1]);
  933. ff_rtmp_packet_destroy(&rt->out_pkt);
  934. rt->flv_size = 0;
  935. rt->flv_off = 0;
  936. rt->flv_header_bytes = 0;
  937. }
  938. } while (buf_temp - buf < size);
  939. return size;
  940. }
  941. #define OFFSET(x) offsetof(RTMPContext, x)
  942. #define DEC AV_OPT_FLAG_DECODING_PARAM
  943. #define ENC AV_OPT_FLAG_ENCODING_PARAM
  944. static const AVOption rtmp_options[] = {
  945. {"rtmp_app", "Name of application to connect to on the RTMP server", OFFSET(app), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  946. {"rtmp_flashver", "Version of the Flash plugin used to run the SWF player.", OFFSET(flashver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  947. {"rtmp_live", "Specify that the media is a live stream.", OFFSET(live), AV_OPT_TYPE_INT, {-2}, INT_MIN, INT_MAX, DEC, "rtmp_live"},
  948. {"any", "both", 0, AV_OPT_TYPE_CONST, {-2}, 0, 0, DEC, "rtmp_live"},
  949. {"live", "live stream", 0, AV_OPT_TYPE_CONST, {-1}, 0, 0, DEC, "rtmp_live"},
  950. {"recorded", "recorded stream", 0, AV_OPT_TYPE_CONST, {0}, 0, 0, DEC, "rtmp_live"},
  951. {"rtmp_playpath", "Stream identifier to play or to publish", OFFSET(playpath), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  952. {"rtmp_swfurl", "URL of the SWF player. By default no value will be sent", OFFSET(swfurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  953. {"rtmp_tcurl", "URL of the target stream. Defaults to rtmp://host[:port]/app.", OFFSET(tcurl), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, DEC|ENC},
  954. { NULL },
  955. };
  956. static const AVClass rtmp_class = {
  957. .class_name = "rtmp",
  958. .item_name = av_default_item_name,
  959. .option = rtmp_options,
  960. .version = LIBAVUTIL_VERSION_INT,
  961. };
  962. URLProtocol ff_rtmp_protocol = {
  963. .name = "rtmp",
  964. .url_open = rtmp_open,
  965. .url_read = rtmp_read,
  966. .url_write = rtmp_write,
  967. .url_close = rtmp_close,
  968. .priv_data_size = sizeof(RTMPContext),
  969. .flags = URL_PROTOCOL_FLAG_NETWORK,
  970. .priv_data_class= &rtmp_class,
  971. };