You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

534 lines
16KB

  1. /*
  2. * VMware Screen Codec (VMnc) decoder
  3. * Copyright (c) 2006 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * VMware Screen Codec (VMnc) decoder
  24. * As Alex Beregszaszi discovered, this is effectively RFB data dump
  25. */
  26. #include <stdio.h>
  27. #include <stdlib.h>
  28. #include "libavutil/common.h"
  29. #include "libavutil/intreadwrite.h"
  30. #include "avcodec.h"
  31. enum EncTypes {
  32. MAGIC_WMVd = 0x574D5664,
  33. MAGIC_WMVe,
  34. MAGIC_WMVf,
  35. MAGIC_WMVg,
  36. MAGIC_WMVh,
  37. MAGIC_WMVi,
  38. MAGIC_WMVj
  39. };
  40. enum HexTile_Flags {
  41. HT_RAW = 1, // tile is raw
  42. HT_BKG = 2, // background color is present
  43. HT_FG = 4, // foreground color is present
  44. HT_SUB = 8, // subrects are present
  45. HT_CLR = 16 // each subrect has own color
  46. };
  47. /*
  48. * Decoder context
  49. */
  50. typedef struct VmncContext {
  51. AVCodecContext *avctx;
  52. AVFrame pic;
  53. int bpp;
  54. int bpp2;
  55. int bigendian;
  56. uint8_t pal[768];
  57. int width, height;
  58. /* cursor data */
  59. int cur_w, cur_h;
  60. int cur_x, cur_y;
  61. int cur_hx, cur_hy;
  62. uint8_t* curbits, *curmask;
  63. uint8_t* screendta;
  64. } VmncContext;
  65. /* read pixel value from stream */
  66. static av_always_inline int vmnc_get_pixel(const uint8_t* buf, int bpp, int be) {
  67. switch(bpp * 2 + be) {
  68. case 2:
  69. case 3: return *buf;
  70. case 4: return AV_RL16(buf);
  71. case 5: return AV_RB16(buf);
  72. case 8: return AV_RL32(buf);
  73. case 9: return AV_RB32(buf);
  74. default: return 0;
  75. }
  76. }
  77. static void load_cursor(VmncContext *c, const uint8_t *src)
  78. {
  79. int i, j, p;
  80. const int bpp = c->bpp2;
  81. uint8_t *dst8 = c->curbits;
  82. uint16_t *dst16 = (uint16_t*)c->curbits;
  83. uint32_t *dst32 = (uint32_t*)c->curbits;
  84. for(j = 0; j < c->cur_h; j++) {
  85. for(i = 0; i < c->cur_w; i++) {
  86. p = vmnc_get_pixel(src, bpp, c->bigendian);
  87. src += bpp;
  88. if(bpp == 1) *dst8++ = p;
  89. if(bpp == 2) *dst16++ = p;
  90. if(bpp == 4) *dst32++ = p;
  91. }
  92. }
  93. dst8 = c->curmask;
  94. dst16 = (uint16_t*)c->curmask;
  95. dst32 = (uint32_t*)c->curmask;
  96. for(j = 0; j < c->cur_h; j++) {
  97. for(i = 0; i < c->cur_w; i++) {
  98. p = vmnc_get_pixel(src, bpp, c->bigendian);
  99. src += bpp;
  100. if(bpp == 1) *dst8++ = p;
  101. if(bpp == 2) *dst16++ = p;
  102. if(bpp == 4) *dst32++ = p;
  103. }
  104. }
  105. }
  106. static void put_cursor(uint8_t *dst, int stride, VmncContext *c, int dx, int dy)
  107. {
  108. int i, j;
  109. int w, h, x, y;
  110. w = c->cur_w;
  111. if(c->width < c->cur_x + c->cur_w) w = c->width - c->cur_x;
  112. h = c->cur_h;
  113. if(c->height < c->cur_y + c->cur_h) h = c->height - c->cur_y;
  114. x = c->cur_x;
  115. y = c->cur_y;
  116. if(x < 0) {
  117. w += x;
  118. x = 0;
  119. }
  120. if(y < 0) {
  121. h += y;
  122. y = 0;
  123. }
  124. if((w < 1) || (h < 1)) return;
  125. dst += x * c->bpp2 + y * stride;
  126. if(c->bpp2 == 1) {
  127. uint8_t* cd = c->curbits, *msk = c->curmask;
  128. for(j = 0; j < h; j++) {
  129. for(i = 0; i < w; i++)
  130. dst[i] = (dst[i] & cd[i]) ^ msk[i];
  131. msk += c->cur_w;
  132. cd += c->cur_w;
  133. dst += stride;
  134. }
  135. } else if(c->bpp2 == 2) {
  136. uint16_t* cd = (uint16_t*)c->curbits, *msk = (uint16_t*)c->curmask;
  137. uint16_t* dst2;
  138. for(j = 0; j < h; j++) {
  139. dst2 = (uint16_t*)dst;
  140. for(i = 0; i < w; i++)
  141. dst2[i] = (dst2[i] & cd[i]) ^ msk[i];
  142. msk += c->cur_w;
  143. cd += c->cur_w;
  144. dst += stride;
  145. }
  146. } else if(c->bpp2 == 4) {
  147. uint32_t* cd = (uint32_t*)c->curbits, *msk = (uint32_t*)c->curmask;
  148. uint32_t* dst2;
  149. for(j = 0; j < h; j++) {
  150. dst2 = (uint32_t*)dst;
  151. for(i = 0; i < w; i++)
  152. dst2[i] = (dst2[i] & cd[i]) ^ msk[i];
  153. msk += c->cur_w;
  154. cd += c->cur_w;
  155. dst += stride;
  156. }
  157. }
  158. }
  159. /* fill rectangle with given color */
  160. static av_always_inline void paint_rect(uint8_t *dst, int dx, int dy, int w, int h, int color, int bpp, int stride)
  161. {
  162. int i, j;
  163. dst += dx * bpp + dy * stride;
  164. if(bpp == 1){
  165. for(j = 0; j < h; j++) {
  166. memset(dst, color, w);
  167. dst += stride;
  168. }
  169. }else if(bpp == 2){
  170. uint16_t* dst2;
  171. for(j = 0; j < h; j++) {
  172. dst2 = (uint16_t*)dst;
  173. for(i = 0; i < w; i++) {
  174. *dst2++ = color;
  175. }
  176. dst += stride;
  177. }
  178. }else if(bpp == 4){
  179. uint32_t* dst2;
  180. for(j = 0; j < h; j++) {
  181. dst2 = (uint32_t*)dst;
  182. for(i = 0; i < w; i++) {
  183. dst2[i] = color;
  184. }
  185. dst += stride;
  186. }
  187. }
  188. }
  189. static av_always_inline void paint_raw(uint8_t *dst, int w, int h, const uint8_t* src, int bpp, int be, int stride)
  190. {
  191. int i, j, p;
  192. for(j = 0; j < h; j++) {
  193. for(i = 0; i < w; i++) {
  194. p = vmnc_get_pixel(src, bpp, be);
  195. src += bpp;
  196. switch(bpp){
  197. case 1:
  198. dst[i] = p;
  199. break;
  200. case 2:
  201. ((uint16_t*)dst)[i] = p;
  202. break;
  203. case 4:
  204. ((uint32_t*)dst)[i] = p;
  205. break;
  206. }
  207. }
  208. dst += stride;
  209. }
  210. }
  211. static int decode_hextile(VmncContext *c, uint8_t* dst, const uint8_t* src, int ssize, int w, int h, int stride)
  212. {
  213. int i, j, k;
  214. int bg = 0, fg = 0, rects, color, flags, xy, wh;
  215. const int bpp = c->bpp2;
  216. uint8_t *dst2;
  217. int bw = 16, bh = 16;
  218. const uint8_t *ssrc=src;
  219. for(j = 0; j < h; j += 16) {
  220. dst2 = dst;
  221. bw = 16;
  222. if(j + 16 > h) bh = h - j;
  223. for(i = 0; i < w; i += 16, dst2 += 16 * bpp) {
  224. if(src - ssrc >= ssize) {
  225. av_log(c->avctx, AV_LOG_ERROR, "Premature end of data!\n");
  226. return -1;
  227. }
  228. if(i + 16 > w) bw = w - i;
  229. flags = *src++;
  230. if(flags & HT_RAW) {
  231. if(src - ssrc > ssize - bw * bh * bpp) {
  232. av_log(c->avctx, AV_LOG_ERROR, "Premature end of data!\n");
  233. return -1;
  234. }
  235. paint_raw(dst2, bw, bh, src, bpp, c->bigendian, stride);
  236. src += bw * bh * bpp;
  237. } else {
  238. if(flags & HT_BKG) {
  239. bg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp;
  240. }
  241. if(flags & HT_FG) {
  242. fg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp;
  243. }
  244. rects = 0;
  245. if(flags & HT_SUB)
  246. rects = *src++;
  247. color = !!(flags & HT_CLR);
  248. paint_rect(dst2, 0, 0, bw, bh, bg, bpp, stride);
  249. if(src - ssrc > ssize - rects * (color * bpp + 2)) {
  250. av_log(c->avctx, AV_LOG_ERROR, "Premature end of data!\n");
  251. return -1;
  252. }
  253. for(k = 0; k < rects; k++) {
  254. if(color) {
  255. fg = vmnc_get_pixel(src, bpp, c->bigendian); src += bpp;
  256. }
  257. xy = *src++;
  258. wh = *src++;
  259. paint_rect(dst2, xy >> 4, xy & 0xF, (wh>>4)+1, (wh & 0xF)+1, fg, bpp, stride);
  260. }
  261. }
  262. }
  263. dst += stride * 16;
  264. }
  265. return src - ssrc;
  266. }
  267. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
  268. AVPacket *avpkt)
  269. {
  270. const uint8_t *buf = avpkt->data;
  271. int buf_size = avpkt->size;
  272. VmncContext * const c = avctx->priv_data;
  273. uint8_t *outptr;
  274. const uint8_t *src = buf;
  275. int dx, dy, w, h, depth, enc, chunks, res, size_left;
  276. c->pic.reference = 3;
  277. c->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE | FF_BUFFER_HINTS_REUSABLE;
  278. if(avctx->reget_buffer(avctx, &c->pic) < 0){
  279. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  280. return -1;
  281. }
  282. c->pic.key_frame = 0;
  283. c->pic.pict_type = AV_PICTURE_TYPE_P;
  284. //restore screen after cursor
  285. if(c->screendta) {
  286. int i;
  287. w = c->cur_w;
  288. if(c->width < c->cur_x + w) w = c->width - c->cur_x;
  289. h = c->cur_h;
  290. if(c->height < c->cur_y + h) h = c->height - c->cur_y;
  291. dx = c->cur_x;
  292. if(dx < 0) {
  293. w += dx;
  294. dx = 0;
  295. }
  296. dy = c->cur_y;
  297. if(dy < 0) {
  298. h += dy;
  299. dy = 0;
  300. }
  301. if((w > 0) && (h > 0)) {
  302. outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
  303. for(i = 0; i < h; i++) {
  304. memcpy(outptr, c->screendta + i * c->cur_w * c->bpp2, w * c->bpp2);
  305. outptr += c->pic.linesize[0];
  306. }
  307. }
  308. }
  309. src += 2;
  310. chunks = AV_RB16(src); src += 2;
  311. while(chunks--) {
  312. if(buf_size - (src - buf) < 12) {
  313. av_log(avctx, AV_LOG_ERROR, "Premature end of data!\n");
  314. return -1;
  315. }
  316. dx = AV_RB16(src); src += 2;
  317. dy = AV_RB16(src); src += 2;
  318. w = AV_RB16(src); src += 2;
  319. h = AV_RB16(src); src += 2;
  320. enc = AV_RB32(src); src += 4;
  321. outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
  322. size_left = buf_size - (src - buf);
  323. switch(enc) {
  324. case MAGIC_WMVd: // cursor
  325. if (w*(int64_t)h*c->bpp2 > INT_MAX/2 - 2) {
  326. av_log(avctx, AV_LOG_ERROR, "dimensions too large\n");
  327. return AVERROR_INVALIDDATA;
  328. }
  329. if(size_left < 2 + w * h * c->bpp2 * 2) {
  330. av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", 2 + w * h * c->bpp2 * 2, size_left);
  331. return -1;
  332. }
  333. src += 2;
  334. c->cur_w = w;
  335. c->cur_h = h;
  336. c->cur_hx = dx;
  337. c->cur_hy = dy;
  338. if((c->cur_hx > c->cur_w) || (c->cur_hy > c->cur_h)) {
  339. av_log(avctx, AV_LOG_ERROR, "Cursor hot spot is not in image: %ix%i of %ix%i cursor size\n", c->cur_hx, c->cur_hy, c->cur_w, c->cur_h);
  340. c->cur_hx = c->cur_hy = 0;
  341. }
  342. c->curbits = av_realloc(c->curbits, c->cur_w * c->cur_h * c->bpp2);
  343. c->curmask = av_realloc(c->curmask, c->cur_w * c->cur_h * c->bpp2);
  344. c->screendta = av_realloc(c->screendta, c->cur_w * c->cur_h * c->bpp2);
  345. load_cursor(c, src);
  346. src += w * h * c->bpp2 * 2;
  347. break;
  348. case MAGIC_WMVe: // unknown
  349. src += 2;
  350. break;
  351. case MAGIC_WMVf: // update cursor position
  352. c->cur_x = dx - c->cur_hx;
  353. c->cur_y = dy - c->cur_hy;
  354. break;
  355. case MAGIC_WMVg: // unknown
  356. src += 10;
  357. break;
  358. case MAGIC_WMVh: // unknown
  359. src += 4;
  360. break;
  361. case MAGIC_WMVi: // ServerInitialization struct
  362. c->pic.key_frame = 1;
  363. c->pic.pict_type = AV_PICTURE_TYPE_I;
  364. depth = *src++;
  365. if(depth != c->bpp) {
  366. av_log(avctx, AV_LOG_INFO, "Depth mismatch. Container %i bpp, Frame data: %i bpp\n", c->bpp, depth);
  367. }
  368. src++;
  369. c->bigendian = *src++;
  370. if(c->bigendian & (~1)) {
  371. av_log(avctx, AV_LOG_INFO, "Invalid header: bigendian flag = %i\n", c->bigendian);
  372. return -1;
  373. }
  374. //skip the rest of pixel format data
  375. src += 13;
  376. break;
  377. case MAGIC_WMVj: // unknown
  378. src += 2;
  379. break;
  380. case 0x00000000: // raw rectangle data
  381. if((dx + w > c->width) || (dy + h > c->height)) {
  382. av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
  383. return -1;
  384. }
  385. if(size_left < w * h * c->bpp2) {
  386. av_log(avctx, AV_LOG_ERROR, "Premature end of data! (need %i got %i)\n", w * h * c->bpp2, size_left);
  387. return -1;
  388. }
  389. paint_raw(outptr, w, h, src, c->bpp2, c->bigendian, c->pic.linesize[0]);
  390. src += w * h * c->bpp2;
  391. break;
  392. case 0x00000005: // HexTile encoded rectangle
  393. if((dx + w > c->width) || (dy + h > c->height)) {
  394. av_log(avctx, AV_LOG_ERROR, "Incorrect frame size: %ix%i+%ix%i of %ix%i\n", w, h, dx, dy, c->width, c->height);
  395. return -1;
  396. }
  397. res = decode_hextile(c, outptr, src, size_left, w, h, c->pic.linesize[0]);
  398. if(res < 0)
  399. return -1;
  400. src += res;
  401. break;
  402. default:
  403. av_log(avctx, AV_LOG_ERROR, "Unsupported block type 0x%08X\n", enc);
  404. chunks = 0; // leave chunks decoding loop
  405. }
  406. }
  407. if(c->screendta){
  408. int i;
  409. //save screen data before painting cursor
  410. w = c->cur_w;
  411. if(c->width < c->cur_x + w) w = c->width - c->cur_x;
  412. h = c->cur_h;
  413. if(c->height < c->cur_y + h) h = c->height - c->cur_y;
  414. dx = c->cur_x;
  415. if(dx < 0) {
  416. w += dx;
  417. dx = 0;
  418. }
  419. dy = c->cur_y;
  420. if(dy < 0) {
  421. h += dy;
  422. dy = 0;
  423. }
  424. if((w > 0) && (h > 0)) {
  425. outptr = c->pic.data[0] + dx * c->bpp2 + dy * c->pic.linesize[0];
  426. for(i = 0; i < h; i++) {
  427. memcpy(c->screendta + i * c->cur_w * c->bpp2, outptr, w * c->bpp2);
  428. outptr += c->pic.linesize[0];
  429. }
  430. outptr = c->pic.data[0];
  431. put_cursor(outptr, c->pic.linesize[0], c, c->cur_x, c->cur_y);
  432. }
  433. }
  434. *got_frame = 1;
  435. *(AVFrame*)data = c->pic;
  436. /* always report that the buffer was completely consumed */
  437. return buf_size;
  438. }
  439. /*
  440. *
  441. * Init VMnc decoder
  442. *
  443. */
  444. static av_cold int decode_init(AVCodecContext *avctx)
  445. {
  446. VmncContext * const c = avctx->priv_data;
  447. c->avctx = avctx;
  448. c->width = avctx->width;
  449. c->height = avctx->height;
  450. c->bpp = avctx->bits_per_coded_sample;
  451. c->bpp2 = c->bpp/8;
  452. avcodec_get_frame_defaults(&c->pic);
  453. switch(c->bpp){
  454. case 8:
  455. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  456. break;
  457. case 16:
  458. avctx->pix_fmt = AV_PIX_FMT_RGB555;
  459. break;
  460. case 32:
  461. avctx->pix_fmt = AV_PIX_FMT_RGB32;
  462. break;
  463. default:
  464. av_log(avctx, AV_LOG_ERROR, "Unsupported bitdepth %i\n", c->bpp);
  465. return AVERROR_INVALIDDATA;
  466. }
  467. return 0;
  468. }
  469. /*
  470. *
  471. * Uninit VMnc decoder
  472. *
  473. */
  474. static av_cold int decode_end(AVCodecContext *avctx)
  475. {
  476. VmncContext * const c = avctx->priv_data;
  477. if (c->pic.data[0])
  478. avctx->release_buffer(avctx, &c->pic);
  479. av_free(c->curbits);
  480. av_free(c->curmask);
  481. av_free(c->screendta);
  482. return 0;
  483. }
  484. AVCodec ff_vmnc_decoder = {
  485. .name = "vmnc",
  486. .type = AVMEDIA_TYPE_VIDEO,
  487. .id = AV_CODEC_ID_VMNC,
  488. .priv_data_size = sizeof(VmncContext),
  489. .init = decode_init,
  490. .close = decode_end,
  491. .decode = decode_frame,
  492. .capabilities = CODEC_CAP_DR1,
  493. .long_name = NULL_IF_CONFIG_SMALL("VMware Screen Codec / VMware Video"),
  494. };