You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

919 lines
29KB

  1. /*
  2. * Go2Webinar decoder
  3. * Copyright (c) 2012 Konstantin Shishkov
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Go2Webinar decoder
  24. */
  25. #include <inttypes.h>
  26. #include <zlib.h>
  27. #include "libavutil/intreadwrite.h"
  28. #include "avcodec.h"
  29. #include "blockdsp.h"
  30. #include "bytestream.h"
  31. #include "get_bits.h"
  32. #include "idctdsp.h"
  33. #include "internal.h"
  34. #include "jpegtables.h"
  35. #include "mjpeg.h"
  36. enum ChunkType {
  37. DISPLAY_INFO = 0xC8,
  38. TILE_DATA,
  39. CURSOR_POS,
  40. CURSOR_SHAPE,
  41. CHUNK_CC,
  42. CHUNK_CD
  43. };
  44. enum Compression {
  45. COMPR_EPIC_J_B = 2,
  46. COMPR_KEMPF_J_B,
  47. };
  48. static const uint8_t luma_quant[64] = {
  49. 8, 6, 5, 8, 12, 20, 26, 31,
  50. 6, 6, 7, 10, 13, 29, 30, 28,
  51. 7, 7, 8, 12, 20, 29, 35, 28,
  52. 7, 9, 11, 15, 26, 44, 40, 31,
  53. 9, 11, 19, 28, 34, 55, 52, 39,
  54. 12, 18, 28, 32, 41, 52, 57, 46,
  55. 25, 32, 39, 44, 52, 61, 60, 51,
  56. 36, 46, 48, 49, 56, 50, 52, 50
  57. };
  58. static const uint8_t chroma_quant[64] = {
  59. 9, 9, 12, 24, 50, 50, 50, 50,
  60. 9, 11, 13, 33, 50, 50, 50, 50,
  61. 12, 13, 28, 50, 50, 50, 50, 50,
  62. 24, 33, 50, 50, 50, 50, 50, 50,
  63. 50, 50, 50, 50, 50, 50, 50, 50,
  64. 50, 50, 50, 50, 50, 50, 50, 50,
  65. 50, 50, 50, 50, 50, 50, 50, 50,
  66. 50, 50, 50, 50, 50, 50, 50, 50,
  67. };
  68. typedef struct JPGContext {
  69. BlockDSPContext bdsp;
  70. IDCTDSPContext idsp;
  71. ScanTable scantable;
  72. VLC dc_vlc[2], ac_vlc[2];
  73. int prev_dc[3];
  74. DECLARE_ALIGNED(16, int16_t, block)[6][64];
  75. uint8_t *buf;
  76. } JPGContext;
  77. typedef struct G2MContext {
  78. JPGContext jc;
  79. int version;
  80. int compression;
  81. int width, height, bpp;
  82. int orig_width, orig_height;
  83. int tile_width, tile_height;
  84. int tiles_x, tiles_y, tile_x, tile_y;
  85. int got_header;
  86. uint8_t *framebuf;
  87. int framebuf_stride, old_width, old_height;
  88. uint8_t *synth_tile, *jpeg_tile;
  89. int tile_stride, old_tile_w, old_tile_h;
  90. uint8_t *kempf_buf, *kempf_flags;
  91. uint8_t *cursor;
  92. int cursor_stride;
  93. int cursor_fmt;
  94. int cursor_w, cursor_h, cursor_x, cursor_y;
  95. int cursor_hot_x, cursor_hot_y;
  96. } G2MContext;
  97. static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table,
  98. const uint8_t *val_table, int nb_codes,
  99. int is_ac)
  100. {
  101. uint8_t huff_size[256] = { 0 };
  102. uint16_t huff_code[256];
  103. uint16_t huff_sym[256];
  104. int i;
  105. ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
  106. for (i = 0; i < 256; i++)
  107. huff_sym[i] = i + 16 * is_ac;
  108. if (is_ac)
  109. huff_sym[0] = 16 * 256;
  110. return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
  111. huff_code, 2, 2, huff_sym, 2, 2, 0);
  112. }
  113. static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
  114. {
  115. int ret;
  116. ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance,
  117. avpriv_mjpeg_val_dc, 12, 0);
  118. if (ret)
  119. return ret;
  120. ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance,
  121. avpriv_mjpeg_val_dc, 12, 0);
  122. if (ret)
  123. return ret;
  124. ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance,
  125. avpriv_mjpeg_val_ac_luminance, 251, 1);
  126. if (ret)
  127. return ret;
  128. ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance,
  129. avpriv_mjpeg_val_ac_chrominance, 251, 1);
  130. if (ret)
  131. return ret;
  132. ff_blockdsp_init(&c->bdsp, avctx);
  133. ff_idctdsp_init(&c->idsp, avctx);
  134. ff_init_scantable(c->idsp.idct_permutation, &c->scantable,
  135. ff_zigzag_direct);
  136. return 0;
  137. }
  138. static av_cold void jpg_free_context(JPGContext *ctx)
  139. {
  140. int i;
  141. for (i = 0; i < 2; i++) {
  142. ff_free_vlc(&ctx->dc_vlc[i]);
  143. ff_free_vlc(&ctx->ac_vlc[i]);
  144. }
  145. av_freep(&ctx->buf);
  146. }
  147. static void jpg_unescape(const uint8_t *src, int src_size,
  148. uint8_t *dst, int *dst_size)
  149. {
  150. const uint8_t *src_end = src + src_size;
  151. uint8_t *dst_start = dst;
  152. while (src < src_end) {
  153. uint8_t x = *src++;
  154. *dst++ = x;
  155. if (x == 0xFF && !*src)
  156. src++;
  157. }
  158. *dst_size = dst - dst_start;
  159. }
  160. static int jpg_decode_block(JPGContext *c, GetBitContext *gb,
  161. int plane, int16_t *block)
  162. {
  163. int dc, val, pos;
  164. const int is_chroma = !!plane;
  165. const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant;
  166. c->bdsp.clear_block(block);
  167. dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3);
  168. if (dc < 0)
  169. return AVERROR_INVALIDDATA;
  170. if (dc)
  171. dc = get_xbits(gb, dc);
  172. dc = dc * qmat[0] + c->prev_dc[plane];
  173. block[0] = dc;
  174. c->prev_dc[plane] = dc;
  175. pos = 0;
  176. while (pos < 63) {
  177. val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3);
  178. if (val < 0)
  179. return AVERROR_INVALIDDATA;
  180. pos += val >> 4;
  181. val &= 0xF;
  182. if (pos > 63)
  183. return val ? AVERROR_INVALIDDATA : 0;
  184. if (val) {
  185. int nbits = val;
  186. val = get_xbits(gb, nbits);
  187. val *= qmat[ff_zigzag_direct[pos]];
  188. block[c->scantable.permutated[pos]] = val;
  189. }
  190. }
  191. return 0;
  192. }
  193. static inline void yuv2rgb(uint8_t *out, int Y, int U, int V)
  194. {
  195. out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16));
  196. out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
  197. out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
  198. }
  199. static int jpg_decode_data(JPGContext *c, int width, int height,
  200. const uint8_t *src, int src_size,
  201. uint8_t *dst, int dst_stride,
  202. const uint8_t *mask, int mask_stride, int num_mbs,
  203. int swapuv)
  204. {
  205. GetBitContext gb;
  206. int mb_w, mb_h, mb_x, mb_y, i, j;
  207. int bx, by;
  208. int unesc_size;
  209. int ret;
  210. if ((ret = av_reallocp(&c->buf,
  211. src_size + FF_INPUT_BUFFER_PADDING_SIZE)) < 0)
  212. return ret;
  213. jpg_unescape(src, src_size, c->buf, &unesc_size);
  214. memset(c->buf + unesc_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
  215. init_get_bits(&gb, c->buf, unesc_size * 8);
  216. width = FFALIGN(width, 16);
  217. mb_w = width >> 4;
  218. mb_h = (height + 15) >> 4;
  219. if (!num_mbs)
  220. num_mbs = mb_w * mb_h * 4;
  221. for (i = 0; i < 3; i++)
  222. c->prev_dc[i] = 1024;
  223. bx =
  224. by = 0;
  225. c->bdsp.clear_blocks(c->block[0]);
  226. for (mb_y = 0; mb_y < mb_h; mb_y++) {
  227. for (mb_x = 0; mb_x < mb_w; mb_x++) {
  228. if (mask && !mask[mb_x * 2] && !mask[mb_x * 2 + 1] &&
  229. !mask[mb_x * 2 + mask_stride] &&
  230. !mask[mb_x * 2 + 1 + mask_stride]) {
  231. bx += 16;
  232. continue;
  233. }
  234. for (j = 0; j < 2; j++) {
  235. for (i = 0; i < 2; i++) {
  236. if (mask && !mask[mb_x * 2 + i + j * mask_stride])
  237. continue;
  238. num_mbs--;
  239. if ((ret = jpg_decode_block(c, &gb, 0,
  240. c->block[i + j * 2])) != 0)
  241. return ret;
  242. c->idsp.idct(c->block[i + j * 2]);
  243. }
  244. }
  245. for (i = 1; i < 3; i++) {
  246. if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0)
  247. return ret;
  248. c->idsp.idct(c->block[i + 3]);
  249. }
  250. for (j = 0; j < 16; j++) {
  251. uint8_t *out = dst + bx * 3 + (by + j) * dst_stride;
  252. for (i = 0; i < 16; i++) {
  253. int Y, U, V;
  254. Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
  255. U = c->block[4 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
  256. V = c->block[5 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
  257. yuv2rgb(out + i * 3, Y, U, V);
  258. }
  259. }
  260. if (!num_mbs)
  261. return 0;
  262. bx += 16;
  263. }
  264. bx = 0;
  265. by += 16;
  266. if (mask)
  267. mask += mask_stride * 2;
  268. }
  269. return 0;
  270. }
  271. static void kempf_restore_buf(const uint8_t *src, int len,
  272. uint8_t *dst, int stride,
  273. const uint8_t *jpeg_tile, int tile_stride,
  274. int width, int height,
  275. const uint8_t *pal, int npal, int tidx)
  276. {
  277. GetBitContext gb;
  278. int i, j, nb, col;
  279. init_get_bits(&gb, src, len * 8);
  280. if (npal <= 2) nb = 1;
  281. else if (npal <= 4) nb = 2;
  282. else if (npal <= 16) nb = 4;
  283. else nb = 8;
  284. for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) {
  285. if (get_bits(&gb, 8))
  286. continue;
  287. for (i = 0; i < width; i++) {
  288. col = get_bits(&gb, nb);
  289. if (col != tidx)
  290. memcpy(dst + i * 3, pal + col * 3, 3);
  291. else
  292. memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
  293. }
  294. }
  295. }
  296. static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
  297. const uint8_t *src, int src_size)
  298. {
  299. int width, height;
  300. int hdr, zsize, npal, tidx = -1, ret;
  301. int i, j;
  302. const uint8_t *src_end = src + src_size;
  303. uint8_t pal[768], transp[3];
  304. uLongf dlen = (c->tile_width + 1) * c->tile_height;
  305. int sub_type;
  306. int nblocks, cblocks, bstride;
  307. int bits, bitbuf, coded;
  308. uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
  309. tile_y * c->tile_height * c->framebuf_stride;
  310. if (src_size < 2)
  311. return AVERROR_INVALIDDATA;
  312. width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
  313. height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
  314. hdr = *src++;
  315. sub_type = hdr >> 5;
  316. if (sub_type == 0) {
  317. int j;
  318. memcpy(transp, src, 3);
  319. src += 3;
  320. for (j = 0; j < height; j++, dst += c->framebuf_stride)
  321. for (i = 0; i < width; i++)
  322. memcpy(dst + i * 3, transp, 3);
  323. return 0;
  324. } else if (sub_type == 1) {
  325. return jpg_decode_data(&c->jc, width, height, src, src_end - src,
  326. dst, c->framebuf_stride, NULL, 0, 0, 0);
  327. }
  328. if (sub_type != 2) {
  329. memcpy(transp, src, 3);
  330. src += 3;
  331. }
  332. npal = *src++ + 1;
  333. memcpy(pal, src, npal * 3);
  334. src += npal * 3;
  335. if (sub_type != 2) {
  336. for (i = 0; i < npal; i++) {
  337. if (!memcmp(pal + i * 3, transp, 3)) {
  338. tidx = i;
  339. break;
  340. }
  341. }
  342. }
  343. if (src_end - src < 2)
  344. return 0;
  345. zsize = (src[0] << 8) | src[1];
  346. src += 2;
  347. if (src_end - src < zsize)
  348. return AVERROR_INVALIDDATA;
  349. ret = uncompress(c->kempf_buf, &dlen, src, zsize);
  350. if (ret)
  351. return AVERROR_INVALIDDATA;
  352. src += zsize;
  353. if (sub_type == 2) {
  354. kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
  355. NULL, 0, width, height, pal, npal, tidx);
  356. return 0;
  357. }
  358. nblocks = *src++ + 1;
  359. cblocks = 0;
  360. bstride = FFALIGN(width, 16) >> 3;
  361. // blocks are coded LSB and we need normal bitreader for JPEG data
  362. bits = 0;
  363. for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
  364. for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
  365. if (!bits) {
  366. bitbuf = *src++;
  367. bits = 8;
  368. }
  369. coded = bitbuf & 1;
  370. bits--;
  371. bitbuf >>= 1;
  372. cblocks += coded;
  373. if (cblocks > nblocks)
  374. return AVERROR_INVALIDDATA;
  375. c->kempf_flags[j * 2 + i * 2 * bstride] =
  376. c->kempf_flags[j * 2 + 1 + i * 2 * bstride] =
  377. c->kempf_flags[j * 2 + (i * 2 + 1) * bstride] =
  378. c->kempf_flags[j * 2 + 1 + (i * 2 + 1) * bstride] = coded;
  379. }
  380. }
  381. memset(c->jpeg_tile, 0, c->tile_stride * height);
  382. jpg_decode_data(&c->jc, width, height, src, src_end - src,
  383. c->jpeg_tile, c->tile_stride,
  384. c->kempf_flags, bstride, nblocks * 4, 0);
  385. kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
  386. c->jpeg_tile, c->tile_stride,
  387. width, height, pal, npal, tidx);
  388. return 0;
  389. }
  390. static int g2m_init_buffers(G2MContext *c)
  391. {
  392. int aligned_height;
  393. if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
  394. c->framebuf_stride = FFALIGN(c->width * 3, 16);
  395. aligned_height = FFALIGN(c->height, 16);
  396. av_free(c->framebuf);
  397. c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
  398. if (!c->framebuf)
  399. return AVERROR(ENOMEM);
  400. }
  401. if (!c->synth_tile || !c->jpeg_tile ||
  402. c->old_tile_w < c->tile_width ||
  403. c->old_tile_h < c->tile_height) {
  404. c->tile_stride = FFALIGN(c->tile_width * 3, 16);
  405. aligned_height = FFALIGN(c->tile_height, 16);
  406. av_free(c->synth_tile);
  407. av_free(c->jpeg_tile);
  408. av_free(c->kempf_buf);
  409. av_free(c->kempf_flags);
  410. c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
  411. c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
  412. c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height
  413. + FF_INPUT_BUFFER_PADDING_SIZE);
  414. c->kempf_flags = av_mallocz( c->tile_width * aligned_height);
  415. if (!c->synth_tile || !c->jpeg_tile ||
  416. !c->kempf_buf || !c->kempf_flags)
  417. return AVERROR(ENOMEM);
  418. }
  419. return 0;
  420. }
  421. static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c,
  422. GetByteContext *gb)
  423. {
  424. int i, j, k;
  425. uint8_t *dst;
  426. uint32_t bits;
  427. uint32_t cur_size, cursor_w, cursor_h, cursor_stride;
  428. uint32_t cursor_hot_x, cursor_hot_y;
  429. int cursor_fmt, err;
  430. cur_size = bytestream2_get_be32(gb);
  431. cursor_w = bytestream2_get_byte(gb);
  432. cursor_h = bytestream2_get_byte(gb);
  433. cursor_hot_x = bytestream2_get_byte(gb);
  434. cursor_hot_y = bytestream2_get_byte(gb);
  435. cursor_fmt = bytestream2_get_byte(gb);
  436. cursor_stride = FFALIGN(cursor_w, 32) * 4;
  437. if (cursor_w < 1 || cursor_w > 256 ||
  438. cursor_h < 1 || cursor_h > 256) {
  439. av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %"PRIu32"x%"PRIu32"\n",
  440. cursor_w, cursor_h);
  441. return AVERROR_INVALIDDATA;
  442. }
  443. if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) {
  444. av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %"PRIu32",%"PRIu32"\n",
  445. cursor_hot_x, cursor_hot_y);
  446. cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1);
  447. cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1);
  448. }
  449. if (cur_size - 9 > bytestream2_get_bytes_left(gb) ||
  450. c->cursor_w * c->cursor_h / 4 > cur_size) {
  451. av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"/%u\n",
  452. cur_size, bytestream2_get_bytes_left(gb));
  453. return AVERROR_INVALIDDATA;
  454. }
  455. if (cursor_fmt != 1 && cursor_fmt != 32) {
  456. avpriv_report_missing_feature(avctx, "Cursor format %d",
  457. cursor_fmt);
  458. return AVERROR_PATCHWELCOME;
  459. }
  460. if ((err = av_reallocp(&c->cursor, cursor_stride * cursor_h)) < 0) {
  461. av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n");
  462. return err;
  463. }
  464. c->cursor_w = cursor_w;
  465. c->cursor_h = cursor_h;
  466. c->cursor_hot_x = cursor_hot_x;
  467. c->cursor_hot_y = cursor_hot_y;
  468. c->cursor_fmt = cursor_fmt;
  469. c->cursor_stride = cursor_stride;
  470. dst = c->cursor;
  471. switch (c->cursor_fmt) {
  472. case 1: // old monochrome
  473. for (j = 0; j < c->cursor_h; j++) {
  474. for (i = 0; i < c->cursor_w; i += 32) {
  475. bits = bytestream2_get_be32(gb);
  476. for (k = 0; k < 32; k++) {
  477. dst[0] = !!(bits & 0x80000000);
  478. dst += 4;
  479. bits <<= 1;
  480. }
  481. }
  482. dst += c->cursor_stride - c->cursor_w * 4;
  483. }
  484. dst = c->cursor;
  485. for (j = 0; j < c->cursor_h; j++) {
  486. for (i = 0; i < c->cursor_w; i += 32) {
  487. bits = bytestream2_get_be32(gb);
  488. for (k = 0; k < 32; k++) {
  489. int mask_bit = !!(bits & 0x80000000);
  490. switch (dst[0] * 2 + mask_bit) {
  491. case 0:
  492. dst[0] = 0xFF;
  493. dst[1] = 0x00;
  494. dst[2] = 0x00;
  495. dst[3] = 0x00;
  496. break;
  497. case 1:
  498. dst[0] = 0xFF;
  499. dst[1] = 0xFF;
  500. dst[2] = 0xFF;
  501. dst[3] = 0xFF;
  502. break;
  503. default:
  504. dst[0] = 0x00;
  505. dst[1] = 0x00;
  506. dst[2] = 0x00;
  507. dst[3] = 0x00;
  508. }
  509. dst += 4;
  510. bits <<= 1;
  511. }
  512. }
  513. dst += c->cursor_stride - c->cursor_w * 4;
  514. }
  515. break;
  516. case 32: // full colour
  517. /* skip monochrome version of the cursor and decode RGBA instead */
  518. bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3));
  519. for (j = 0; j < c->cursor_h; j++) {
  520. for (i = 0; i < c->cursor_w; i++) {
  521. int val = bytestream2_get_be32(gb);
  522. *dst++ = val >> 0;
  523. *dst++ = val >> 8;
  524. *dst++ = val >> 16;
  525. *dst++ = val >> 24;
  526. }
  527. dst += c->cursor_stride - c->cursor_w * 4;
  528. }
  529. break;
  530. default:
  531. return AVERROR_PATCHWELCOME;
  532. }
  533. return 0;
  534. }
  535. #define APPLY_ALPHA(src, new, alpha) \
  536. src = (src * (256 - alpha) + new * alpha) >> 8
  537. static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
  538. {
  539. int i, j;
  540. int x, y, w, h;
  541. const uint8_t *cursor;
  542. if (!c->cursor)
  543. return;
  544. x = c->cursor_x - c->cursor_hot_x;
  545. y = c->cursor_y - c->cursor_hot_y;
  546. cursor = c->cursor;
  547. w = c->cursor_w;
  548. h = c->cursor_h;
  549. if (x + w > c->width)
  550. w = c->width - x;
  551. if (y + h > c->height)
  552. h = c->height - y;
  553. if (x < 0) {
  554. w += x;
  555. cursor += -x * 4;
  556. } else {
  557. dst += x * 3;
  558. }
  559. if (y < 0) {
  560. h += y;
  561. cursor += -y * c->cursor_stride;
  562. } else {
  563. dst += y * stride;
  564. }
  565. if (w < 0 || h < 0)
  566. return;
  567. for (j = 0; j < h; j++) {
  568. for (i = 0; i < w; i++) {
  569. uint8_t alpha = cursor[i * 4];
  570. APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
  571. APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
  572. APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
  573. }
  574. dst += stride;
  575. cursor += c->cursor_stride;
  576. }
  577. }
  578. static int g2m_decode_frame(AVCodecContext *avctx, void *data,
  579. int *got_picture_ptr, AVPacket *avpkt)
  580. {
  581. const uint8_t *buf = avpkt->data;
  582. int buf_size = avpkt->size;
  583. G2MContext *c = avctx->priv_data;
  584. AVFrame *pic = data;
  585. GetByteContext bc, tbc;
  586. int magic;
  587. int got_header = 0;
  588. uint32_t chunk_size, r_mask, g_mask, b_mask;
  589. int chunk_type, chunk_start;
  590. int i;
  591. int ret;
  592. if (buf_size < 12) {
  593. av_log(avctx, AV_LOG_ERROR,
  594. "Frame should have at least 12 bytes, got %d instead\n",
  595. buf_size);
  596. return AVERROR_INVALIDDATA;
  597. }
  598. bytestream2_init(&bc, buf, buf_size);
  599. magic = bytestream2_get_be32(&bc);
  600. if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') ||
  601. (magic & 0xF) < 2 || (magic & 0xF) > 5) {
  602. av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic);
  603. return AVERROR_INVALIDDATA;
  604. }
  605. if ((magic & 0xF) < 4) {
  606. av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n");
  607. return AVERROR(ENOSYS);
  608. }
  609. while (bytestream2_get_bytes_left(&bc) > 5) {
  610. chunk_size = bytestream2_get_le32(&bc) - 1;
  611. chunk_type = bytestream2_get_byte(&bc);
  612. chunk_start = bytestream2_tell(&bc);
  613. if (chunk_size > bytestream2_get_bytes_left(&bc)) {
  614. av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %"PRIu32" type %02X\n",
  615. chunk_size, chunk_type);
  616. break;
  617. }
  618. switch (chunk_type) {
  619. case DISPLAY_INFO:
  620. c->got_header = 0;
  621. if (chunk_size < 21) {
  622. av_log(avctx, AV_LOG_ERROR, "Invalid display info size %"PRIu32"\n",
  623. chunk_size);
  624. break;
  625. }
  626. c->width = bytestream2_get_be32(&bc);
  627. c->height = bytestream2_get_be32(&bc);
  628. if (c->width < 16 || c->width > c->orig_width ||
  629. c->height < 16 || c->height > c->orig_height) {
  630. av_log(avctx, AV_LOG_ERROR,
  631. "Invalid frame dimensions %dx%d\n",
  632. c->width, c->height);
  633. ret = AVERROR_INVALIDDATA;
  634. goto header_fail;
  635. }
  636. if (c->width != avctx->width || c->height != avctx->height) {
  637. ret = ff_set_dimensions(avctx, c->width, c->height);
  638. if (ret < 0)
  639. return ret;
  640. }
  641. c->compression = bytestream2_get_be32(&bc);
  642. if (c->compression != 2 && c->compression != 3) {
  643. av_log(avctx, AV_LOG_ERROR,
  644. "Unknown compression method %d\n",
  645. c->compression);
  646. return AVERROR_PATCHWELCOME;
  647. }
  648. c->tile_width = bytestream2_get_be32(&bc);
  649. c->tile_height = bytestream2_get_be32(&bc);
  650. if (!c->tile_width || !c->tile_height ||
  651. ((c->tile_width | c->tile_height) & 0xF)) {
  652. av_log(avctx, AV_LOG_ERROR,
  653. "Invalid tile dimensions %dx%d\n",
  654. c->tile_width, c->tile_height);
  655. ret = AVERROR_INVALIDDATA;
  656. goto header_fail;
  657. }
  658. c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width;
  659. c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height;
  660. c->bpp = bytestream2_get_byte(&bc);
  661. if (c->bpp == 32) {
  662. if (bytestream2_get_bytes_left(&bc) < 16 ||
  663. (chunk_size - 21) < 16) {
  664. av_log(avctx, AV_LOG_ERROR,
  665. "Display info: missing bitmasks!\n");
  666. return AVERROR_INVALIDDATA;
  667. }
  668. r_mask = bytestream2_get_be32(&bc);
  669. g_mask = bytestream2_get_be32(&bc);
  670. b_mask = bytestream2_get_be32(&bc);
  671. if (r_mask != 0xFF0000 || g_mask != 0xFF00 || b_mask != 0xFF) {
  672. av_log(avctx, AV_LOG_ERROR,
  673. "Invalid or unsupported bitmasks: R=%"PRIX32", G=%"PRIX32", B=%"PRIX32"\n",
  674. r_mask, g_mask, b_mask);
  675. return AVERROR_PATCHWELCOME;
  676. }
  677. } else {
  678. avpriv_request_sample(avctx, "bpp=%d", c->bpp);
  679. return AVERROR_PATCHWELCOME;
  680. }
  681. if (g2m_init_buffers(c)) {
  682. ret = AVERROR(ENOMEM);
  683. goto header_fail;
  684. }
  685. got_header = 1;
  686. break;
  687. case TILE_DATA:
  688. if (!c->tiles_x || !c->tiles_y) {
  689. av_log(avctx, AV_LOG_WARNING,
  690. "No display info - skipping tile\n");
  691. break;
  692. }
  693. if (chunk_size < 2) {
  694. av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %"PRIu32"\n",
  695. chunk_size);
  696. break;
  697. }
  698. c->tile_x = bytestream2_get_byte(&bc);
  699. c->tile_y = bytestream2_get_byte(&bc);
  700. if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) {
  701. av_log(avctx, AV_LOG_ERROR,
  702. "Invalid tile pos %d,%d (in %dx%d grid)\n",
  703. c->tile_x, c->tile_y, c->tiles_x, c->tiles_y);
  704. break;
  705. }
  706. ret = 0;
  707. switch (c->compression) {
  708. case COMPR_EPIC_J_B:
  709. av_log(avctx, AV_LOG_ERROR,
  710. "ePIC j-b compression is not implemented yet\n");
  711. return AVERROR(ENOSYS);
  712. case COMPR_KEMPF_J_B:
  713. ret = kempf_decode_tile(c, c->tile_x, c->tile_y,
  714. buf + bytestream2_tell(&bc),
  715. chunk_size - 2);
  716. break;
  717. }
  718. if (ret && c->framebuf)
  719. av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n",
  720. c->tile_x, c->tile_y);
  721. break;
  722. case CURSOR_POS:
  723. if (chunk_size < 5) {
  724. av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %"PRIu32"\n",
  725. chunk_size);
  726. break;
  727. }
  728. c->cursor_x = bytestream2_get_be16(&bc);
  729. c->cursor_y = bytestream2_get_be16(&bc);
  730. break;
  731. case CURSOR_SHAPE:
  732. if (chunk_size < 8) {
  733. av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"\n",
  734. chunk_size);
  735. break;
  736. }
  737. bytestream2_init(&tbc, buf + bytestream2_tell(&bc),
  738. chunk_size - 4);
  739. g2m_load_cursor(avctx, c, &tbc);
  740. break;
  741. case CHUNK_CC:
  742. case CHUNK_CD:
  743. break;
  744. default:
  745. av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02d\n",
  746. chunk_type);
  747. }
  748. /* navigate to next chunk */
  749. bytestream2_skip(&bc, chunk_start + chunk_size - bytestream2_tell(&bc));
  750. }
  751. if (got_header)
  752. c->got_header = 1;
  753. if (c->width && c->height) {
  754. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) {
  755. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  756. return ret;
  757. }
  758. pic->key_frame = got_header;
  759. pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  760. for (i = 0; i < avctx->height; i++)
  761. memcpy(pic->data[0] + i * pic->linesize[0],
  762. c->framebuf + i * c->framebuf_stride,
  763. c->width * 3);
  764. g2m_paint_cursor(c, pic->data[0], pic->linesize[0]);
  765. *got_picture_ptr = 1;
  766. }
  767. return buf_size;
  768. header_fail:
  769. c->width =
  770. c->height = 0;
  771. c->tiles_x =
  772. c->tiles_y = 0;
  773. return ret;
  774. }
  775. static av_cold int g2m_decode_init(AVCodecContext *avctx)
  776. {
  777. G2MContext *const c = avctx->priv_data;
  778. int ret;
  779. if ((ret = jpg_init(avctx, &c->jc)) != 0) {
  780. av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
  781. jpg_free_context(&c->jc);
  782. return AVERROR(ENOMEM);
  783. }
  784. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  785. // store original sizes and check against those if resize happens
  786. c->orig_width = avctx->width;
  787. c->orig_height = avctx->height;
  788. return 0;
  789. }
  790. static av_cold int g2m_decode_end(AVCodecContext *avctx)
  791. {
  792. G2MContext *const c = avctx->priv_data;
  793. jpg_free_context(&c->jc);
  794. av_freep(&c->kempf_buf);
  795. av_freep(&c->kempf_flags);
  796. av_freep(&c->synth_tile);
  797. av_freep(&c->jpeg_tile);
  798. av_freep(&c->cursor);
  799. av_freep(&c->framebuf);
  800. return 0;
  801. }
  802. AVCodec ff_g2m_decoder = {
  803. .name = "g2m",
  804. .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
  805. .type = AVMEDIA_TYPE_VIDEO,
  806. .id = AV_CODEC_ID_G2M,
  807. .priv_data_size = sizeof(G2MContext),
  808. .init = g2m_decode_init,
  809. .close = g2m_decode_end,
  810. .decode = g2m_decode_frame,
  811. .capabilities = CODEC_CAP_DR1,
  812. };