You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

818 lines
24KB

  1. /*
  2. * RemotelyAnywhere Screen Capture decoder
  3. *
  4. * Copyright (c) 2018 Paul B Mahol
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include <stdio.h>
  23. #include <stdlib.h>
  24. #include <string.h>
  25. #include "libavutil/avassert.h"
  26. #include "libavutil/imgutils.h"
  27. #include "libavutil/opt.h"
  28. #include "avcodec.h"
  29. #include "bytestream.h"
  30. #include "internal.h"
  31. #include <zlib.h>
  32. #define KBND MKTAG('K', 'B', 'N', 'D')
  33. #define FINT MKTAG('F', 'I', 'N', 'T')
  34. #define INIT MKTAG('I', 'N', 'I', 'T')
  35. #define BNDL MKTAG('B', 'N', 'D', 'L')
  36. #define KFRM MKTAG('K', 'F', 'R', 'M')
  37. #define DLTA MKTAG('D', 'L', 'T', 'A')
  38. #define MOUS MKTAG('M', 'O', 'U', 'S')
  39. #define MPOS MKTAG('M', 'P', 'O', 'S')
  40. #define MOVE MKTAG('M', 'O', 'V', 'E')
  41. #define EMPT MKTAG('E', 'M', 'P', 'T')
  42. typedef struct RASCContext {
  43. AVClass *class;
  44. int skip_cursor;
  45. GetByteContext gb;
  46. uint8_t *delta;
  47. int delta_size;
  48. uint8_t *cursor;
  49. int cursor_size;
  50. unsigned cursor_w;
  51. unsigned cursor_h;
  52. unsigned cursor_x;
  53. unsigned cursor_y;
  54. int stride;
  55. int bpp;
  56. z_stream zstream;
  57. AVFrame *frame;
  58. AVFrame *frame1;
  59. AVFrame *frame2;
  60. } RASCContext;
  61. static void clear_plane(AVCodecContext *avctx, AVFrame *frame)
  62. {
  63. RASCContext *s = avctx->priv_data;
  64. uint8_t *dst = frame->data[0];
  65. for (int y = 0; y < avctx->height; y++) {
  66. memset(dst, 0, avctx->width * s->bpp);
  67. dst += frame->linesize[0];
  68. }
  69. }
  70. static void copy_plane(AVCodecContext *avctx, AVFrame *src, AVFrame *dst)
  71. {
  72. RASCContext *s = avctx->priv_data;
  73. uint8_t *srcp = src->data[0];
  74. uint8_t *dstp = dst->data[0];
  75. for (int y = 0; y < avctx->height; y++) {
  76. memcpy(dstp, srcp, s->stride);
  77. srcp += src->linesize[0];
  78. dstp += dst->linesize[0];
  79. }
  80. }
  81. static int init_frames(AVCodecContext *avctx)
  82. {
  83. RASCContext *s = avctx->priv_data;
  84. int ret;
  85. av_frame_unref(s->frame1);
  86. av_frame_unref(s->frame2);
  87. if ((ret = ff_get_buffer(avctx, s->frame1, 0)) < 0)
  88. return ret;
  89. if ((ret = ff_get_buffer(avctx, s->frame2, 0)) < 0)
  90. return ret;
  91. clear_plane(avctx, s->frame2);
  92. clear_plane(avctx, s->frame1);
  93. return 0;
  94. }
  95. static int decode_fint(AVCodecContext *avctx,
  96. AVPacket *avpkt, unsigned size)
  97. {
  98. RASCContext *s = avctx->priv_data;
  99. GetByteContext *gb = &s->gb;
  100. unsigned w, h, fmt;
  101. int ret;
  102. if (bytestream2_peek_le32(gb) != 0x65) {
  103. if (!s->frame2->data[0] || !s->frame1->data[0])
  104. return AVERROR_INVALIDDATA;
  105. clear_plane(avctx, s->frame2);
  106. clear_plane(avctx, s->frame1);
  107. return 0;
  108. }
  109. bytestream2_skip(gb, 8);
  110. w = bytestream2_get_le32(gb);
  111. h = bytestream2_get_le32(gb);
  112. bytestream2_skip(gb, 30);
  113. fmt = bytestream2_get_le16(gb);
  114. bytestream2_skip(gb, 24);
  115. switch (fmt) {
  116. case 8: s->stride = FFALIGN(w, 4);
  117. s->bpp = 1;
  118. fmt = AV_PIX_FMT_PAL8; break;
  119. case 16: s->stride = w * 2;
  120. s->bpp = 2;
  121. fmt = AV_PIX_FMT_RGB555LE; break;
  122. case 32: s->stride = w * 4;
  123. s->bpp = 4;
  124. fmt = AV_PIX_FMT_BGR0; break;
  125. default: return AVERROR_INVALIDDATA;
  126. }
  127. ret = ff_set_dimensions(avctx, w, h);
  128. if (ret < 0)
  129. return ret;
  130. avctx->width = w;
  131. avctx->height = h;
  132. avctx->pix_fmt = fmt;
  133. ret = init_frames(avctx);
  134. if (ret < 0)
  135. return ret;
  136. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  137. uint32_t *pal = (uint32_t *)s->frame2->data[1];
  138. for (int i = 0; i < 256; i++)
  139. pal[i] = bytestream2_get_le32(gb) | 0xFF000000u;
  140. }
  141. return 0;
  142. }
  143. static int decode_zlib(AVCodecContext *avctx, AVPacket *avpkt,
  144. unsigned size, unsigned uncompressed_size)
  145. {
  146. RASCContext *s = avctx->priv_data;
  147. GetByteContext *gb = &s->gb;
  148. int zret;
  149. zret = inflateReset(&s->zstream);
  150. if (zret != Z_OK) {
  151. av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
  152. return AVERROR_EXTERNAL;
  153. }
  154. av_fast_padded_malloc(&s->delta, &s->delta_size, uncompressed_size);
  155. if (!s->delta)
  156. return AVERROR(ENOMEM);
  157. s->zstream.next_in = avpkt->data + bytestream2_tell(gb);
  158. s->zstream.avail_in = FFMIN(size, bytestream2_get_bytes_left(gb));
  159. s->zstream.next_out = s->delta;
  160. s->zstream.avail_out = s->delta_size;
  161. zret = inflate(&s->zstream, Z_FINISH);
  162. if (zret != Z_STREAM_END) {
  163. av_log(avctx, AV_LOG_ERROR,
  164. "Inflate failed with return code: %d.\n", zret);
  165. return AVERROR_INVALIDDATA;
  166. }
  167. return 0;
  168. }
  169. static int decode_move(AVCodecContext *avctx,
  170. AVPacket *avpkt, unsigned size)
  171. {
  172. RASCContext *s = avctx->priv_data;
  173. GetByteContext *gb = &s->gb;
  174. GetByteContext mc;
  175. unsigned pos, compression, nb_moves;
  176. unsigned uncompressed_size;
  177. int ret;
  178. pos = bytestream2_tell(gb);
  179. bytestream2_skip(gb, 8);
  180. nb_moves = bytestream2_get_le32(gb);
  181. bytestream2_skip(gb, 8);
  182. compression = bytestream2_get_le32(gb);
  183. if (nb_moves > INT32_MAX / 16 || nb_moves > avctx->width * avctx->height)
  184. return AVERROR_INVALIDDATA;
  185. uncompressed_size = 16 * nb_moves;
  186. if (compression == 1) {
  187. ret = decode_zlib(avctx, avpkt,
  188. size - (bytestream2_tell(gb) - pos),
  189. uncompressed_size);
  190. if (ret < 0)
  191. return ret;
  192. bytestream2_init(&mc, s->delta, uncompressed_size);
  193. } else if (compression == 0) {
  194. bytestream2_init(&mc, avpkt->data + bytestream2_tell(gb),
  195. bytestream2_get_bytes_left(gb));
  196. } else if (compression == 2) {
  197. avpriv_request_sample(avctx, "compression %d", compression);
  198. return AVERROR_PATCHWELCOME;
  199. } else {
  200. return AVERROR_INVALIDDATA;
  201. }
  202. if (bytestream2_get_bytes_left(&mc) < uncompressed_size)
  203. return AVERROR_INVALIDDATA;
  204. for (int i = 0; i < nb_moves; i++) {
  205. int type, start_x, start_y, end_x, end_y, mov_x, mov_y;
  206. uint8_t *e2, *b1, *b2;
  207. int w, h;
  208. type = bytestream2_get_le16(&mc);
  209. start_x = bytestream2_get_le16(&mc);
  210. start_y = bytestream2_get_le16(&mc);
  211. end_x = bytestream2_get_le16(&mc);
  212. end_y = bytestream2_get_le16(&mc);
  213. mov_x = bytestream2_get_le16(&mc);
  214. mov_y = bytestream2_get_le16(&mc);
  215. bytestream2_skip(&mc, 2);
  216. if (start_x >= avctx->width || start_y >= avctx->height ||
  217. end_x >= avctx->width || end_y >= avctx->height ||
  218. mov_x >= avctx->width || mov_y >= avctx->height) {
  219. continue;
  220. }
  221. if (start_x >= end_x || start_y >= end_y)
  222. continue;
  223. w = end_x - start_x;
  224. h = end_y - start_y;
  225. if (mov_x + w > avctx->width || mov_y + h > avctx->height)
  226. continue;
  227. if (!s->frame2->data[0] || !s->frame1->data[0])
  228. return AVERROR_INVALIDDATA;
  229. b1 = s->frame1->data[0] + s->frame1->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
  230. b2 = s->frame2->data[0] + s->frame2->linesize[0] * (start_y + h - 1) + start_x * s->bpp;
  231. e2 = s->frame2->data[0] + s->frame2->linesize[0] * (mov_y + h - 1) + mov_x * s->bpp;
  232. if (type == 2) {
  233. for (int j = 0; j < h; j++) {
  234. memcpy(b1, b2, w * s->bpp);
  235. b1 -= s->frame1->linesize[0];
  236. b2 -= s->frame2->linesize[0];
  237. }
  238. } else if (type == 1) {
  239. for (int j = 0; j < h; j++) {
  240. memset(b2, 0, w * s->bpp);
  241. b2 -= s->frame2->linesize[0];
  242. }
  243. } else if (type == 0) {
  244. uint8_t *buffer;
  245. av_fast_padded_malloc(&s->delta, &s->delta_size, w * h * s->bpp);
  246. buffer = s->delta;
  247. if (!buffer)
  248. return AVERROR(ENOMEM);
  249. for (int j = 0; j < h; j++) {
  250. memcpy(buffer + j * w * s->bpp, e2, w * s->bpp);
  251. e2 -= s->frame2->linesize[0];
  252. }
  253. for (int j = 0; j < h; j++) {
  254. memcpy(b2, buffer + j * w * s->bpp, w * s->bpp);
  255. b2 -= s->frame2->linesize[0];
  256. }
  257. } else {
  258. return AVERROR_INVALIDDATA;
  259. }
  260. }
  261. bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
  262. return 0;
  263. }
  264. #define NEXT_LINE \
  265. if (cx >= w * s->bpp) { \
  266. cx = 0; \
  267. cy--; \
  268. b1 -= s->frame1->linesize[0]; \
  269. b2 -= s->frame2->linesize[0]; \
  270. } \
  271. len--;
  272. static int decode_dlta(AVCodecContext *avctx,
  273. AVPacket *avpkt, unsigned size)
  274. {
  275. RASCContext *s = avctx->priv_data;
  276. GetByteContext *gb = &s->gb;
  277. GetByteContext dc;
  278. unsigned uncompressed_size, pos;
  279. unsigned x, y, w, h;
  280. int ret, cx, cy, compression;
  281. uint8_t *b1, *b2;
  282. pos = bytestream2_tell(gb);
  283. bytestream2_skip(gb, 12);
  284. uncompressed_size = bytestream2_get_le32(gb);
  285. x = bytestream2_get_le32(gb);
  286. y = bytestream2_get_le32(gb);
  287. w = bytestream2_get_le32(gb);
  288. h = bytestream2_get_le32(gb);
  289. if (x >= avctx->width || y >= avctx->height ||
  290. w > avctx->width || h > avctx->height)
  291. return AVERROR_INVALIDDATA;
  292. if (x + w > avctx->width || y + h > avctx->height)
  293. return AVERROR_INVALIDDATA;
  294. bytestream2_skip(gb, 4);
  295. compression = bytestream2_get_le32(gb);
  296. if (compression == 1) {
  297. if (w * h * s->bpp * 3 < uncompressed_size)
  298. return AVERROR_INVALIDDATA;
  299. ret = decode_zlib(avctx, avpkt, size, uncompressed_size);
  300. if (ret < 0)
  301. return ret;
  302. bytestream2_init(&dc, s->delta, uncompressed_size);
  303. } else if (compression == 0) {
  304. if (bytestream2_get_bytes_left(gb) < uncompressed_size)
  305. return AVERROR_INVALIDDATA;
  306. bytestream2_init(&dc, avpkt->data + bytestream2_tell(gb),
  307. uncompressed_size);
  308. } else if (compression == 2) {
  309. avpriv_request_sample(avctx, "compression %d", compression);
  310. return AVERROR_PATCHWELCOME;
  311. } else {
  312. return AVERROR_INVALIDDATA;
  313. }
  314. if (!s->frame2->data[0] || !s->frame1->data[0])
  315. return AVERROR_INVALIDDATA;
  316. b1 = s->frame1->data[0] + s->frame1->linesize[0] * (y + h - 1) + x * s->bpp;
  317. b2 = s->frame2->data[0] + s->frame2->linesize[0] * (y + h - 1) + x * s->bpp;
  318. cx = 0, cy = h;
  319. while (bytestream2_get_bytes_left(&dc) > 0) {
  320. int type = bytestream2_get_byte(&dc);
  321. int len = bytestream2_get_byte(&dc);
  322. unsigned fill;
  323. switch (type) {
  324. case 1:
  325. while (len > 0 && cy > 0) {
  326. cx++;
  327. NEXT_LINE
  328. }
  329. break;
  330. case 2:
  331. while (len > 0 && cy > 0) {
  332. int v0 = b1[cx];
  333. int v1 = b2[cx];
  334. b2[cx] = v0;
  335. b1[cx] = v1;
  336. cx++;
  337. NEXT_LINE
  338. }
  339. break;
  340. case 3:
  341. while (len > 0 && cy > 0) {
  342. fill = bytestream2_get_byte(&dc);
  343. b1[cx] = b2[cx];
  344. b2[cx] = fill;
  345. cx++;
  346. NEXT_LINE
  347. }
  348. break;
  349. case 4:
  350. fill = bytestream2_get_byte(&dc);
  351. while (len > 0 && cy > 0) {
  352. AV_WL32(b1 + cx, AV_RL32(b2 + cx));
  353. AV_WL32(b2 + cx, fill);
  354. cx++;
  355. NEXT_LINE
  356. }
  357. break;
  358. case 7:
  359. fill = bytestream2_get_le32(&dc);
  360. while (len > 0 && cy > 0) {
  361. AV_WL32(b1 + cx, AV_RL32(b2 + cx));
  362. AV_WL32(b2 + cx, fill);
  363. cx += 4;
  364. NEXT_LINE
  365. }
  366. break;
  367. case 10:
  368. while (len > 0 && cy > 0) {
  369. cx += 4;
  370. NEXT_LINE
  371. }
  372. break;
  373. case 12:
  374. while (len > 0 && cy > 0) {
  375. unsigned v0, v1;
  376. v0 = AV_RL32(b2 + cx);
  377. v1 = AV_RL32(b1 + cx);
  378. AV_WL32(b2 + cx, v1);
  379. AV_WL32(b1 + cx, v0);
  380. cx += 4;
  381. NEXT_LINE
  382. }
  383. break;
  384. case 13:
  385. while (len > 0 && cy > 0) {
  386. fill = bytestream2_get_le32(&dc);
  387. AV_WL32(b1 + cx, AV_RL32(b2 + cx));
  388. AV_WL32(b2 + cx, fill);
  389. cx += 4;
  390. NEXT_LINE
  391. }
  392. break;
  393. default:
  394. avpriv_request_sample(avctx, "runlen %d", type);
  395. return AVERROR_INVALIDDATA;
  396. }
  397. }
  398. bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
  399. return 0;
  400. }
  401. static int decode_kfrm(AVCodecContext *avctx,
  402. AVPacket *avpkt, unsigned size)
  403. {
  404. RASCContext *s = avctx->priv_data;
  405. GetByteContext *gb = &s->gb;
  406. uint8_t *dst;
  407. unsigned pos;
  408. int zret, ret;
  409. pos = bytestream2_tell(gb);
  410. if (bytestream2_peek_le32(gb) == 0x65) {
  411. ret = decode_fint(avctx, avpkt, size);
  412. if (ret < 0)
  413. return ret;
  414. }
  415. if (!s->frame2->data[0])
  416. return AVERROR_INVALIDDATA;
  417. zret = inflateReset(&s->zstream);
  418. if (zret != Z_OK) {
  419. av_log(avctx, AV_LOG_ERROR, "Inflate reset error: %d\n", zret);
  420. return AVERROR_EXTERNAL;
  421. }
  422. s->zstream.next_in = avpkt->data + bytestream2_tell(gb);
  423. s->zstream.avail_in = bytestream2_get_bytes_left(gb);
  424. dst = s->frame2->data[0] + (avctx->height - 1) * s->frame2->linesize[0];
  425. for (int i = 0; i < avctx->height; i++) {
  426. s->zstream.next_out = dst;
  427. s->zstream.avail_out = s->stride;
  428. zret = inflate(&s->zstream, Z_SYNC_FLUSH);
  429. if (zret != Z_OK && zret != Z_STREAM_END) {
  430. av_log(avctx, AV_LOG_ERROR,
  431. "Inflate failed with return code: %d.\n", zret);
  432. return AVERROR_INVALIDDATA;
  433. }
  434. dst -= s->frame2->linesize[0];
  435. }
  436. dst = s->frame1->data[0] + (avctx->height - 1) * s->frame1->linesize[0];
  437. for (int i = 0; i < avctx->height; i++) {
  438. s->zstream.next_out = dst;
  439. s->zstream.avail_out = s->stride;
  440. zret = inflate(&s->zstream, Z_SYNC_FLUSH);
  441. if (zret != Z_OK && zret != Z_STREAM_END) {
  442. av_log(avctx, AV_LOG_ERROR,
  443. "Inflate failed with return code: %d.\n", zret);
  444. return AVERROR_INVALIDDATA;
  445. }
  446. dst -= s->frame1->linesize[0];
  447. }
  448. bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
  449. return 0;
  450. }
  451. static int decode_mous(AVCodecContext *avctx,
  452. AVPacket *avpkt, unsigned size)
  453. {
  454. RASCContext *s = avctx->priv_data;
  455. GetByteContext *gb = &s->gb;
  456. unsigned w, h, pos, uncompressed_size;
  457. int ret;
  458. pos = bytestream2_tell(gb);
  459. bytestream2_skip(gb, 8);
  460. w = bytestream2_get_le32(gb);
  461. h = bytestream2_get_le32(gb);
  462. bytestream2_skip(gb, 12);
  463. uncompressed_size = bytestream2_get_le32(gb);
  464. if (w > avctx->width || h > avctx->height)
  465. return AVERROR_INVALIDDATA;
  466. if (uncompressed_size != 3 * w * h)
  467. return AVERROR_INVALIDDATA;
  468. av_fast_padded_malloc(&s->cursor, &s->cursor_size, uncompressed_size);
  469. if (!s->cursor)
  470. return AVERROR(ENOMEM);
  471. ret = decode_zlib(avctx, avpkt,
  472. size - (bytestream2_tell(gb) - pos),
  473. uncompressed_size);
  474. if (ret < 0)
  475. return ret;
  476. memcpy(s->cursor, s->delta, uncompressed_size);
  477. bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
  478. s->cursor_w = w;
  479. s->cursor_h = h;
  480. return 0;
  481. }
  482. static int decode_mpos(AVCodecContext *avctx,
  483. AVPacket *avpkt, unsigned size)
  484. {
  485. RASCContext *s = avctx->priv_data;
  486. GetByteContext *gb = &s->gb;
  487. unsigned pos;
  488. pos = bytestream2_tell(gb);
  489. bytestream2_skip(gb, 8);
  490. s->cursor_x = bytestream2_get_le32(gb);
  491. s->cursor_y = bytestream2_get_le32(gb);
  492. bytestream2_skip(gb, size - (bytestream2_tell(gb) - pos));
  493. return 0;
  494. }
  495. static void draw_cursor(AVCodecContext *avctx)
  496. {
  497. RASCContext *s = avctx->priv_data;
  498. uint8_t *dst, *pal;
  499. if (!s->cursor)
  500. return;
  501. if (s->cursor_x >= avctx->width || s->cursor_y >= avctx->height)
  502. return;
  503. if (s->cursor_x + s->cursor_w > avctx->width ||
  504. s->cursor_y + s->cursor_h > avctx->height)
  505. return;
  506. if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
  507. pal = s->frame->data[1];
  508. for (int i = 0; i < s->cursor_h; i++) {
  509. for (int j = 0; j < s->cursor_w; j++) {
  510. int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
  511. int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
  512. int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
  513. int best = INT_MAX;
  514. int index = 0;
  515. int dist;
  516. if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
  517. continue;
  518. dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + (s->cursor_x + j);
  519. for (int k = 0; k < 256; k++) {
  520. int pr = pal[k * 4 + 0];
  521. int pg = pal[k * 4 + 1];
  522. int pb = pal[k * 4 + 2];
  523. dist = FFABS(cr - pr) + FFABS(cg - pg) + FFABS(cb - pb);
  524. if (dist < best) {
  525. best = dist;
  526. index = k;
  527. }
  528. }
  529. dst[0] = index;
  530. }
  531. }
  532. } else if (avctx->pix_fmt == AV_PIX_FMT_RGB555LE) {
  533. for (int i = 0; i < s->cursor_h; i++) {
  534. for (int j = 0; j < s->cursor_w; j++) {
  535. int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
  536. int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
  537. int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
  538. if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
  539. continue;
  540. cr >>= 3; cg >>=3; cb >>= 3;
  541. dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 2 * (s->cursor_x + j);
  542. AV_WL16(dst, cr | cg << 5 | cb << 10);
  543. }
  544. }
  545. } else if (avctx->pix_fmt == AV_PIX_FMT_BGR0) {
  546. for (int i = 0; i < s->cursor_h; i++) {
  547. for (int j = 0; j < s->cursor_w; j++) {
  548. int cr = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 0];
  549. int cg = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 1];
  550. int cb = s->cursor[3 * s->cursor_w * (s->cursor_h - i - 1) + 3 * j + 2];
  551. if (cr == s->cursor[0] && cg == s->cursor[1] && cb == s->cursor[2])
  552. continue;
  553. dst = s->frame->data[0] + s->frame->linesize[0] * (s->cursor_y + i) + 4 * (s->cursor_x + j);
  554. dst[0] = cb;
  555. dst[1] = cg;
  556. dst[2] = cr;
  557. }
  558. }
  559. }
  560. }
  561. static int decode_frame(AVCodecContext *avctx,
  562. void *data, int *got_frame,
  563. AVPacket *avpkt)
  564. {
  565. RASCContext *s = avctx->priv_data;
  566. GetByteContext *gb = &s->gb;
  567. int ret, intra = 0;
  568. AVFrame *frame = data;
  569. bytestream2_init(gb, avpkt->data, avpkt->size);
  570. if (bytestream2_peek_le32(gb) == EMPT)
  571. return avpkt->size;
  572. s->frame = frame;
  573. while (bytestream2_get_bytes_left(gb) > 0) {
  574. unsigned type, size = 0;
  575. if (bytestream2_get_bytes_left(gb) < 8)
  576. return AVERROR_INVALIDDATA;
  577. type = bytestream2_get_le32(gb);
  578. if (type == KBND || type == BNDL) {
  579. intra = type == KBND;
  580. type = bytestream2_get_le32(gb);
  581. }
  582. size = bytestream2_get_le32(gb);
  583. if (bytestream2_get_bytes_left(gb) < size)
  584. return AVERROR_INVALIDDATA;
  585. switch (type) {
  586. case FINT:
  587. case INIT:
  588. ret = decode_fint(avctx, avpkt, size);
  589. break;
  590. case KFRM:
  591. ret = decode_kfrm(avctx, avpkt, size);
  592. break;
  593. case DLTA:
  594. ret = decode_dlta(avctx, avpkt, size);
  595. break;
  596. case MOVE:
  597. ret = decode_move(avctx, avpkt, size);
  598. break;
  599. case MOUS:
  600. ret = decode_mous(avctx, avpkt, size);
  601. break;
  602. case MPOS:
  603. ret = decode_mpos(avctx, avpkt, size);
  604. break;
  605. default:
  606. bytestream2_skip(gb, size);
  607. }
  608. if (ret < 0)
  609. return ret;
  610. }
  611. if (!s->frame2->data[0] || !s->frame1->data[0])
  612. return AVERROR_INVALIDDATA;
  613. if ((ret = ff_get_buffer(avctx, s->frame, 0)) < 0)
  614. return ret;
  615. copy_plane(avctx, s->frame2, s->frame);
  616. if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
  617. memcpy(s->frame->data[1], s->frame2->data[1], 1024);
  618. if (!s->skip_cursor)
  619. draw_cursor(avctx);
  620. s->frame->key_frame = intra;
  621. s->frame->pict_type = intra ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  622. *got_frame = 1;
  623. return avpkt->size;
  624. }
  625. static av_cold int decode_init(AVCodecContext *avctx)
  626. {
  627. RASCContext *s = avctx->priv_data;
  628. int zret;
  629. s->zstream.zalloc = Z_NULL;
  630. s->zstream.zfree = Z_NULL;
  631. s->zstream.opaque = Z_NULL;
  632. zret = inflateInit(&s->zstream);
  633. if (zret != Z_OK) {
  634. av_log(avctx, AV_LOG_ERROR, "Inflate init error: %d\n", zret);
  635. return AVERROR_EXTERNAL;
  636. }
  637. s->frame1 = av_frame_alloc();
  638. s->frame2 = av_frame_alloc();
  639. if (!s->frame1 || !s->frame2)
  640. return AVERROR(ENOMEM);
  641. return 0;
  642. }
  643. static av_cold int decode_close(AVCodecContext *avctx)
  644. {
  645. RASCContext *s = avctx->priv_data;
  646. av_freep(&s->cursor);
  647. s->cursor_size = 0;
  648. av_freep(&s->delta);
  649. s->delta_size = 0;
  650. av_frame_free(&s->frame1);
  651. av_frame_free(&s->frame2);
  652. inflateEnd(&s->zstream);
  653. return 0;
  654. }
  655. static void decode_flush(AVCodecContext *avctx)
  656. {
  657. RASCContext *s = avctx->priv_data;
  658. clear_plane(avctx, s->frame1);
  659. clear_plane(avctx, s->frame2);
  660. }
  661. static const AVOption options[] = {
  662. { "skip_cursor", "skip the cursor", offsetof(RASCContext, skip_cursor), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_DECODING_PARAM | AV_OPT_FLAG_VIDEO_PARAM },
  663. { NULL },
  664. };
  665. static const AVClass rasc_decoder_class = {
  666. .class_name = "rasc decoder",
  667. .item_name = av_default_item_name,
  668. .option = options,
  669. .version = LIBAVUTIL_VERSION_INT,
  670. };
  671. AVCodec ff_rasc_decoder = {
  672. .name = "rasc",
  673. .long_name = NULL_IF_CONFIG_SMALL("RemotelyAnywhere Screen Capture"),
  674. .type = AVMEDIA_TYPE_VIDEO,
  675. .id = AV_CODEC_ID_RASC,
  676. .priv_data_size = sizeof(RASCContext),
  677. .init = decode_init,
  678. .close = decode_close,
  679. .decode = decode_frame,
  680. .flush = decode_flush,
  681. .capabilities = AV_CODEC_CAP_DR1,
  682. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
  683. FF_CODEC_CAP_INIT_CLEANUP,
  684. .priv_class = &rasc_decoder_class,
  685. };