You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

922 lines
29KB

  1. /*
  2. * Go2Webinar decoder
  3. * Copyright (c) 2012 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Go2Webinar decoder
  24. */
  25. #include <inttypes.h>
  26. #include <zlib.h>
  27. #include "libavutil/intreadwrite.h"
  28. #include "avcodec.h"
  29. #include "blockdsp.h"
  30. #include "bytestream.h"
  31. #include "idctdsp.h"
  32. #include "get_bits.h"
  33. #include "internal.h"
  34. #include "mjpeg.h"
  35. enum ChunkType {
  36. DISPLAY_INFO = 0xC8,
  37. TILE_DATA,
  38. CURSOR_POS,
  39. CURSOR_SHAPE,
  40. CHUNK_CC,
  41. CHUNK_CD
  42. };
  43. enum Compression {
  44. COMPR_EPIC_J_B = 2,
  45. COMPR_KEMPF_J_B,
  46. };
  47. static const uint8_t luma_quant[64] = {
  48. 8, 6, 5, 8, 12, 20, 26, 31,
  49. 6, 6, 7, 10, 13, 29, 30, 28,
  50. 7, 7, 8, 12, 20, 29, 35, 28,
  51. 7, 9, 11, 15, 26, 44, 40, 31,
  52. 9, 11, 19, 28, 34, 55, 52, 39,
  53. 12, 18, 28, 32, 41, 52, 57, 46,
  54. 25, 32, 39, 44, 52, 61, 60, 51,
  55. 36, 46, 48, 49, 56, 50, 52, 50
  56. };
  57. static const uint8_t chroma_quant[64] = {
  58. 9, 9, 12, 24, 50, 50, 50, 50,
  59. 9, 11, 13, 33, 50, 50, 50, 50,
  60. 12, 13, 28, 50, 50, 50, 50, 50,
  61. 24, 33, 50, 50, 50, 50, 50, 50,
  62. 50, 50, 50, 50, 50, 50, 50, 50,
  63. 50, 50, 50, 50, 50, 50, 50, 50,
  64. 50, 50, 50, 50, 50, 50, 50, 50,
  65. 50, 50, 50, 50, 50, 50, 50, 50,
  66. };
  67. typedef struct JPGContext {
  68. BlockDSPContext bdsp;
  69. IDCTDSPContext idsp;
  70. ScanTable scantable;
  71. VLC dc_vlc[2], ac_vlc[2];
  72. int prev_dc[3];
  73. DECLARE_ALIGNED(16, int16_t, block)[6][64];
  74. uint8_t *buf;
  75. } JPGContext;
  76. typedef struct G2MContext {
  77. JPGContext jc;
  78. int version;
  79. int compression;
  80. int width, height, bpp;
  81. int orig_width, orig_height;
  82. int tile_width, tile_height;
  83. int tiles_x, tiles_y, tile_x, tile_y;
  84. int got_header;
  85. uint8_t *framebuf;
  86. int framebuf_stride, old_width, old_height;
  87. uint8_t *synth_tile, *jpeg_tile;
  88. int tile_stride, old_tile_w, old_tile_h;
  89. uint8_t *kempf_buf, *kempf_flags;
  90. uint8_t *cursor;
  91. int cursor_stride;
  92. int cursor_fmt;
  93. int cursor_w, cursor_h, cursor_x, cursor_y;
  94. int cursor_hot_x, cursor_hot_y;
  95. } G2MContext;
  96. static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table,
  97. const uint8_t *val_table, int nb_codes,
  98. int is_ac)
  99. {
  100. uint8_t huff_size[256] = { 0 };
  101. uint16_t huff_code[256];
  102. uint16_t huff_sym[256];
  103. int i;
  104. ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
  105. for (i = 0; i < 256; i++)
  106. huff_sym[i] = i + 16 * is_ac;
  107. if (is_ac)
  108. huff_sym[0] = 16 * 256;
  109. return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
  110. huff_code, 2, 2, huff_sym, 2, 2, 0);
  111. }
  112. static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
  113. {
  114. int ret;
  115. ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance,
  116. avpriv_mjpeg_val_dc, 12, 0);
  117. if (ret)
  118. return ret;
  119. ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance,
  120. avpriv_mjpeg_val_dc, 12, 0);
  121. if (ret)
  122. return ret;
  123. ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance,
  124. avpriv_mjpeg_val_ac_luminance, 251, 1);
  125. if (ret)
  126. return ret;
  127. ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance,
  128. avpriv_mjpeg_val_ac_chrominance, 251, 1);
  129. if (ret)
  130. return ret;
  131. ff_blockdsp_init(&c->bdsp, avctx);
  132. ff_idctdsp_init(&c->idsp, avctx);
  133. ff_init_scantable(c->idsp.idct_permutation, &c->scantable,
  134. ff_zigzag_direct);
  135. return 0;
  136. }
  137. static av_cold void jpg_free_context(JPGContext *ctx)
  138. {
  139. int i;
  140. for (i = 0; i < 2; i++) {
  141. ff_free_vlc(&ctx->dc_vlc[i]);
  142. ff_free_vlc(&ctx->ac_vlc[i]);
  143. }
  144. av_freep(&ctx->buf);
  145. }
  146. static void jpg_unescape(const uint8_t *src, int src_size,
  147. uint8_t *dst, int *dst_size)
  148. {
  149. const uint8_t *src_end = src + src_size;
  150. uint8_t *dst_start = dst;
  151. while (src < src_end) {
  152. uint8_t x = *src++;
  153. *dst++ = x;
  154. if (x == 0xFF && !*src)
  155. src++;
  156. }
  157. *dst_size = dst - dst_start;
  158. }
  159. static int jpg_decode_block(JPGContext *c, GetBitContext *gb,
  160. int plane, int16_t *block)
  161. {
  162. int dc, val, pos;
  163. const int is_chroma = !!plane;
  164. const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant;
  165. c->bdsp.clear_block(block);
  166. dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3);
  167. if (dc < 0)
  168. return AVERROR_INVALIDDATA;
  169. if (dc)
  170. dc = get_xbits(gb, dc);
  171. dc = dc * qmat[0] + c->prev_dc[plane];
  172. block[0] = dc;
  173. c->prev_dc[plane] = dc;
  174. pos = 0;
  175. while (pos < 63) {
  176. val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3);
  177. if (val < 0)
  178. return AVERROR_INVALIDDATA;
  179. pos += val >> 4;
  180. val &= 0xF;
  181. if (pos > 63)
  182. return val ? AVERROR_INVALIDDATA : 0;
  183. if (val) {
  184. int nbits = val;
  185. val = get_xbits(gb, nbits);
  186. val *= qmat[ff_zigzag_direct[pos]];
  187. block[c->scantable.permutated[pos]] = val;
  188. }
  189. }
  190. return 0;
  191. }
  192. static inline void yuv2rgb(uint8_t *out, int Y, int U, int V)
  193. {
  194. out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16));
  195. out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
  196. out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
  197. }
  198. static int jpg_decode_data(JPGContext *c, int width, int height,
  199. const uint8_t *src, int src_size,
  200. uint8_t *dst, int dst_stride,
  201. const uint8_t *mask, int mask_stride, int num_mbs,
  202. int swapuv)
  203. {
  204. GetBitContext gb;
  205. int mb_w, mb_h, mb_x, mb_y, i, j;
  206. int bx, by;
  207. int unesc_size;
  208. int ret;
  209. if ((ret = av_reallocp(&c->buf,
  210. src_size + FF_INPUT_BUFFER_PADDING_SIZE)) < 0)
  211. return ret;
  212. jpg_unescape(src, src_size, c->buf, &unesc_size);
  213. memset(c->buf + unesc_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
  214. init_get_bits(&gb, c->buf, unesc_size * 8);
  215. width = FFALIGN(width, 16);
  216. mb_w = width >> 4;
  217. mb_h = (height + 15) >> 4;
  218. if (!num_mbs)
  219. num_mbs = mb_w * mb_h * 4;
  220. for (i = 0; i < 3; i++)
  221. c->prev_dc[i] = 1024;
  222. bx = by = 0;
  223. c->bdsp.clear_blocks(c->block[0]);
  224. for (mb_y = 0; mb_y < mb_h; mb_y++) {
  225. for (mb_x = 0; mb_x < mb_w; mb_x++) {
  226. if (mask && !mask[mb_x * 2] && !mask[mb_x * 2 + 1] &&
  227. !mask[mb_x * 2 + mask_stride] &&
  228. !mask[mb_x * 2 + 1 + mask_stride]) {
  229. bx += 16;
  230. continue;
  231. }
  232. for (j = 0; j < 2; j++) {
  233. for (i = 0; i < 2; i++) {
  234. if (mask && !mask[mb_x * 2 + i + j * mask_stride])
  235. continue;
  236. num_mbs--;
  237. if ((ret = jpg_decode_block(c, &gb, 0,
  238. c->block[i + j * 2])) != 0)
  239. return ret;
  240. c->idsp.idct(c->block[i + j * 2]);
  241. }
  242. }
  243. for (i = 1; i < 3; i++) {
  244. if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0)
  245. return ret;
  246. c->idsp.idct(c->block[i + 3]);
  247. }
  248. for (j = 0; j < 16; j++) {
  249. uint8_t *out = dst + bx * 3 + (by + j) * dst_stride;
  250. for (i = 0; i < 16; i++) {
  251. int Y, U, V;
  252. Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
  253. U = c->block[4 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
  254. V = c->block[5 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
  255. yuv2rgb(out + i * 3, Y, U, V);
  256. }
  257. }
  258. if (!num_mbs)
  259. return 0;
  260. bx += 16;
  261. }
  262. bx = 0;
  263. by += 16;
  264. if (mask)
  265. mask += mask_stride * 2;
  266. }
  267. return 0;
  268. }
  269. static void kempf_restore_buf(const uint8_t *src, int len,
  270. uint8_t *dst, int stride,
  271. const uint8_t *jpeg_tile, int tile_stride,
  272. int width, int height,
  273. const uint8_t *pal, int npal, int tidx)
  274. {
  275. GetBitContext gb;
  276. int i, j, nb, col;
  277. init_get_bits(&gb, src, len * 8);
  278. if (npal <= 2) nb = 1;
  279. else if (npal <= 4) nb = 2;
  280. else if (npal <= 16) nb = 4;
  281. else nb = 8;
  282. for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) {
  283. if (get_bits(&gb, 8))
  284. continue;
  285. for (i = 0; i < width; i++) {
  286. col = get_bits(&gb, nb);
  287. if (col != tidx)
  288. memcpy(dst + i * 3, pal + col * 3, 3);
  289. else
  290. memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
  291. }
  292. }
  293. }
  294. static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
  295. const uint8_t *src, int src_size)
  296. {
  297. int width, height;
  298. int hdr, zsize, npal, tidx = -1, ret;
  299. int i, j;
  300. const uint8_t *src_end = src + src_size;
  301. uint8_t pal[768], transp[3];
  302. uLongf dlen = (c->tile_width + 1) * c->tile_height;
  303. int sub_type;
  304. int nblocks, cblocks, bstride;
  305. int bits, bitbuf, coded;
  306. uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
  307. tile_y * c->tile_height * c->framebuf_stride;
  308. if (src_size < 2)
  309. return AVERROR_INVALIDDATA;
  310. width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
  311. height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
  312. hdr = *src++;
  313. sub_type = hdr >> 5;
  314. if (sub_type == 0) {
  315. int j;
  316. memcpy(transp, src, 3);
  317. src += 3;
  318. for (j = 0; j < height; j++, dst += c->framebuf_stride)
  319. for (i = 0; i < width; i++)
  320. memcpy(dst + i * 3, transp, 3);
  321. return 0;
  322. } else if (sub_type == 1) {
  323. return jpg_decode_data(&c->jc, width, height, src, src_end - src,
  324. dst, c->framebuf_stride, NULL, 0, 0, 0);
  325. }
  326. if (sub_type != 2) {
  327. memcpy(transp, src, 3);
  328. src += 3;
  329. }
  330. npal = *src++ + 1;
  331. if (src_end - src < npal * 3)
  332. return AVERROR_INVALIDDATA;
  333. memcpy(pal, src, npal * 3);
  334. src += npal * 3;
  335. if (sub_type != 2) {
  336. for (i = 0; i < npal; i++) {
  337. if (!memcmp(pal + i * 3, transp, 3)) {
  338. tidx = i;
  339. break;
  340. }
  341. }
  342. }
  343. if (src_end - src < 2)
  344. return 0;
  345. zsize = (src[0] << 8) | src[1];
  346. src += 2;
  347. if (src_end - src < zsize + (sub_type != 2))
  348. return AVERROR_INVALIDDATA;
  349. ret = uncompress(c->kempf_buf, &dlen, src, zsize);
  350. if (ret)
  351. return AVERROR_INVALIDDATA;
  352. src += zsize;
  353. if (sub_type == 2) {
  354. kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
  355. NULL, 0, width, height, pal, npal, tidx);
  356. return 0;
  357. }
  358. nblocks = *src++ + 1;
  359. cblocks = 0;
  360. bstride = FFALIGN(width, 16) >> 3;
  361. // blocks are coded LSB and we need normal bitreader for JPEG data
  362. bits = 0;
  363. for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
  364. for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
  365. if (!bits) {
  366. if (src >= src_end)
  367. return AVERROR_INVALIDDATA;
  368. bitbuf = *src++;
  369. bits = 8;
  370. }
  371. coded = bitbuf & 1;
  372. bits--;
  373. bitbuf >>= 1;
  374. cblocks += coded;
  375. if (cblocks > nblocks)
  376. return AVERROR_INVALIDDATA;
  377. c->kempf_flags[j * 2 + i * 2 * bstride] =
  378. c->kempf_flags[j * 2 + 1 + i * 2 * bstride] =
  379. c->kempf_flags[j * 2 + (i * 2 + 1) * bstride] =
  380. c->kempf_flags[j * 2 + 1 + (i * 2 + 1) * bstride] = coded;
  381. }
  382. }
  383. memset(c->jpeg_tile, 0, c->tile_stride * height);
  384. jpg_decode_data(&c->jc, width, height, src, src_end - src,
  385. c->jpeg_tile, c->tile_stride,
  386. c->kempf_flags, bstride, nblocks * 4, 0);
  387. kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
  388. c->jpeg_tile, c->tile_stride,
  389. width, height, pal, npal, tidx);
  390. return 0;
  391. }
  392. static int g2m_init_buffers(G2MContext *c)
  393. {
  394. int aligned_height;
  395. if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
  396. c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3;
  397. aligned_height = c->height + 15;
  398. av_free(c->framebuf);
  399. c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
  400. if (!c->framebuf)
  401. return AVERROR(ENOMEM);
  402. }
  403. if (!c->synth_tile || !c->jpeg_tile ||
  404. c->old_tile_w < c->tile_width ||
  405. c->old_tile_h < c->tile_height) {
  406. c->tile_stride = FFALIGN(c->tile_width, 16) * 3;
  407. aligned_height = FFALIGN(c->tile_height, 16);
  408. av_free(c->synth_tile);
  409. av_free(c->jpeg_tile);
  410. av_free(c->kempf_buf);
  411. av_free(c->kempf_flags);
  412. c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
  413. c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
  414. c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height
  415. + FF_INPUT_BUFFER_PADDING_SIZE);
  416. c->kempf_flags = av_mallocz( c->tile_width * aligned_height);
  417. if (!c->synth_tile || !c->jpeg_tile ||
  418. !c->kempf_buf || !c->kempf_flags)
  419. return AVERROR(ENOMEM);
  420. }
  421. return 0;
  422. }
  423. static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c,
  424. GetByteContext *gb)
  425. {
  426. int i, j, k;
  427. uint8_t *dst;
  428. uint32_t bits;
  429. uint32_t cur_size, cursor_w, cursor_h, cursor_stride;
  430. uint32_t cursor_hot_x, cursor_hot_y;
  431. int cursor_fmt, err;
  432. cur_size = bytestream2_get_be32(gb);
  433. cursor_w = bytestream2_get_byte(gb);
  434. cursor_h = bytestream2_get_byte(gb);
  435. cursor_hot_x = bytestream2_get_byte(gb);
  436. cursor_hot_y = bytestream2_get_byte(gb);
  437. cursor_fmt = bytestream2_get_byte(gb);
  438. cursor_stride = FFALIGN(cursor_w, cursor_fmt==1 ? 32 : 1) * 4;
  439. if (cursor_w < 1 || cursor_w > 256 ||
  440. cursor_h < 1 || cursor_h > 256) {
  441. av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %"PRIu32"x%"PRIu32"\n",
  442. cursor_w, cursor_h);
  443. return AVERROR_INVALIDDATA;
  444. }
  445. if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) {
  446. av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %"PRIu32",%"PRIu32"\n",
  447. cursor_hot_x, cursor_hot_y);
  448. cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1);
  449. cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1);
  450. }
  451. if (cur_size - 9 > bytestream2_get_bytes_left(gb) ||
  452. c->cursor_w * c->cursor_h / 4 > cur_size) {
  453. av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"/%u\n",
  454. cur_size, bytestream2_get_bytes_left(gb));
  455. return AVERROR_INVALIDDATA;
  456. }
  457. if (cursor_fmt != 1 && cursor_fmt != 32) {
  458. avpriv_report_missing_feature(avctx, "Cursor format %d",
  459. cursor_fmt);
  460. return AVERROR_PATCHWELCOME;
  461. }
  462. if ((err = av_reallocp(&c->cursor, cursor_stride * cursor_h)) < 0) {
  463. av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n");
  464. return err;
  465. }
  466. c->cursor_w = cursor_w;
  467. c->cursor_h = cursor_h;
  468. c->cursor_hot_x = cursor_hot_x;
  469. c->cursor_hot_y = cursor_hot_y;
  470. c->cursor_fmt = cursor_fmt;
  471. c->cursor_stride = cursor_stride;
  472. dst = c->cursor;
  473. switch (c->cursor_fmt) {
  474. case 1: // old monochrome
  475. for (j = 0; j < c->cursor_h; j++) {
  476. for (i = 0; i < c->cursor_w; i += 32) {
  477. bits = bytestream2_get_be32(gb);
  478. for (k = 0; k < 32; k++) {
  479. dst[0] = !!(bits & 0x80000000);
  480. dst += 4;
  481. bits <<= 1;
  482. }
  483. }
  484. }
  485. dst = c->cursor;
  486. for (j = 0; j < c->cursor_h; j++) {
  487. for (i = 0; i < c->cursor_w; i += 32) {
  488. bits = bytestream2_get_be32(gb);
  489. for (k = 0; k < 32; k++) {
  490. int mask_bit = !!(bits & 0x80000000);
  491. switch (dst[0] * 2 + mask_bit) {
  492. case 0:
  493. dst[0] = 0xFF;
  494. dst[1] = 0x00;
  495. dst[2] = 0x00;
  496. dst[3] = 0x00;
  497. break;
  498. case 1:
  499. dst[0] = 0xFF;
  500. dst[1] = 0xFF;
  501. dst[2] = 0xFF;
  502. dst[3] = 0xFF;
  503. break;
  504. default:
  505. dst[0] = 0x00;
  506. dst[1] = 0x00;
  507. dst[2] = 0x00;
  508. dst[3] = 0x00;
  509. }
  510. dst += 4;
  511. bits <<= 1;
  512. }
  513. }
  514. }
  515. break;
  516. case 32: // full colour
  517. /* skip monochrome version of the cursor and decode RGBA instead */
  518. bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3));
  519. for (j = 0; j < c->cursor_h; j++) {
  520. for (i = 0; i < c->cursor_w; i++) {
  521. int val = bytestream2_get_be32(gb);
  522. *dst++ = val >> 0;
  523. *dst++ = val >> 8;
  524. *dst++ = val >> 16;
  525. *dst++ = val >> 24;
  526. }
  527. }
  528. break;
  529. default:
  530. return AVERROR_PATCHWELCOME;
  531. }
  532. return 0;
  533. }
  534. #define APPLY_ALPHA(src, new, alpha) \
  535. src = (src * (256 - alpha) + new * alpha) >> 8
  536. static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
  537. {
  538. int i, j;
  539. int x, y, w, h;
  540. const uint8_t *cursor;
  541. if (!c->cursor)
  542. return;
  543. x = c->cursor_x - c->cursor_hot_x;
  544. y = c->cursor_y - c->cursor_hot_y;
  545. cursor = c->cursor;
  546. w = c->cursor_w;
  547. h = c->cursor_h;
  548. if (x + w > c->width)
  549. w = c->width - x;
  550. if (y + h > c->height)
  551. h = c->height - y;
  552. if (x < 0) {
  553. w += x;
  554. cursor += -x * 4;
  555. } else {
  556. dst += x * 3;
  557. }
  558. if (y < 0) {
  559. h += y;
  560. cursor += -y * c->cursor_stride;
  561. } else {
  562. dst += y * stride;
  563. }
  564. if (w < 0 || h < 0)
  565. return;
  566. for (j = 0; j < h; j++) {
  567. for (i = 0; i < w; i++) {
  568. uint8_t alpha = cursor[i * 4];
  569. APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
  570. APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
  571. APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
  572. }
  573. dst += stride;
  574. cursor += c->cursor_stride;
  575. }
  576. }
  577. static int g2m_decode_frame(AVCodecContext *avctx, void *data,
  578. int *got_picture_ptr, AVPacket *avpkt)
  579. {
  580. const uint8_t *buf = avpkt->data;
  581. int buf_size = avpkt->size;
  582. G2MContext *c = avctx->priv_data;
  583. AVFrame *pic = data;
  584. GetByteContext bc, tbc;
  585. int magic;
  586. int got_header = 0;
  587. uint32_t chunk_size, r_mask, g_mask, b_mask;
  588. int chunk_type, chunk_start;
  589. int i;
  590. int ret;
  591. if (buf_size < 12) {
  592. av_log(avctx, AV_LOG_ERROR,
  593. "Frame should have at least 12 bytes, got %d instead\n",
  594. buf_size);
  595. return AVERROR_INVALIDDATA;
  596. }
  597. bytestream2_init(&bc, buf, buf_size);
  598. magic = bytestream2_get_be32(&bc);
  599. if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') ||
  600. (magic & 0xF) < 2 || (magic & 0xF) > 4) {
  601. av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic);
  602. return AVERROR_INVALIDDATA;
  603. }
  604. if ((magic & 0xF) != 4) {
  605. av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n");
  606. return AVERROR(ENOSYS);
  607. }
  608. while (bytestream2_get_bytes_left(&bc) > 5) {
  609. chunk_size = bytestream2_get_le32(&bc) - 1;
  610. chunk_type = bytestream2_get_byte(&bc);
  611. chunk_start = bytestream2_tell(&bc);
  612. if (chunk_size > bytestream2_get_bytes_left(&bc)) {
  613. av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %"PRIu32" type %02X\n",
  614. chunk_size, chunk_type);
  615. break;
  616. }
  617. switch (chunk_type) {
  618. case DISPLAY_INFO:
  619. got_header =
  620. c->got_header = 0;
  621. if (chunk_size < 21) {
  622. av_log(avctx, AV_LOG_ERROR, "Invalid display info size %"PRIu32"\n",
  623. chunk_size);
  624. break;
  625. }
  626. c->width = bytestream2_get_be32(&bc);
  627. c->height = bytestream2_get_be32(&bc);
  628. if (c->width < 16 || c->width > c->orig_width ||
  629. c->height < 16 || c->height > c->orig_height) {
  630. av_log(avctx, AV_LOG_ERROR,
  631. "Invalid frame dimensions %dx%d\n",
  632. c->width, c->height);
  633. ret = AVERROR_INVALIDDATA;
  634. goto header_fail;
  635. }
  636. if (c->width != avctx->width || c->height != avctx->height) {
  637. ret = ff_set_dimensions(avctx, c->width, c->height);
  638. if (ret < 0)
  639. goto header_fail;
  640. }
  641. c->compression = bytestream2_get_be32(&bc);
  642. if (c->compression != 2 && c->compression != 3) {
  643. av_log(avctx, AV_LOG_ERROR,
  644. "Unknown compression method %d\n",
  645. c->compression);
  646. ret = AVERROR_PATCHWELCOME;
  647. goto header_fail;
  648. }
  649. c->tile_width = bytestream2_get_be32(&bc);
  650. c->tile_height = bytestream2_get_be32(&bc);
  651. if (c->tile_width <= 0 || c->tile_height <= 0 ||
  652. ((c->tile_width | c->tile_height) & 0xF) ||
  653. c->tile_width * (uint64_t)c->tile_height >= INT_MAX / 4
  654. ) {
  655. av_log(avctx, AV_LOG_ERROR,
  656. "Invalid tile dimensions %dx%d\n",
  657. c->tile_width, c->tile_height);
  658. ret = AVERROR_INVALIDDATA;
  659. goto header_fail;
  660. }
  661. c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width;
  662. c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height;
  663. c->bpp = bytestream2_get_byte(&bc);
  664. if (c->bpp == 32) {
  665. if (bytestream2_get_bytes_left(&bc) < 16 ||
  666. (chunk_size - 21) < 16) {
  667. av_log(avctx, AV_LOG_ERROR,
  668. "Display info: missing bitmasks!\n");
  669. ret = AVERROR_INVALIDDATA;
  670. goto header_fail;
  671. }
  672. r_mask = bytestream2_get_be32(&bc);
  673. g_mask = bytestream2_get_be32(&bc);
  674. b_mask = bytestream2_get_be32(&bc);
  675. if (r_mask != 0xFF0000 || g_mask != 0xFF00 || b_mask != 0xFF) {
  676. av_log(avctx, AV_LOG_ERROR,
  677. "Invalid or unsupported bitmasks: R=%"PRIX32", G=%"PRIX32", B=%"PRIX32"\n",
  678. r_mask, g_mask, b_mask);
  679. ret = AVERROR_PATCHWELCOME;
  680. goto header_fail;
  681. }
  682. } else {
  683. avpriv_request_sample(avctx, "bpp=%d", c->bpp);
  684. ret = AVERROR_PATCHWELCOME;
  685. goto header_fail;
  686. }
  687. if (g2m_init_buffers(c)) {
  688. ret = AVERROR(ENOMEM);
  689. goto header_fail;
  690. }
  691. got_header = 1;
  692. break;
  693. case TILE_DATA:
  694. if (!c->tiles_x || !c->tiles_y) {
  695. av_log(avctx, AV_LOG_WARNING,
  696. "No display info - skipping tile\n");
  697. break;
  698. }
  699. if (chunk_size < 2) {
  700. av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %"PRIu32"\n",
  701. chunk_size);
  702. break;
  703. }
  704. c->tile_x = bytestream2_get_byte(&bc);
  705. c->tile_y = bytestream2_get_byte(&bc);
  706. if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) {
  707. av_log(avctx, AV_LOG_ERROR,
  708. "Invalid tile pos %d,%d (in %dx%d grid)\n",
  709. c->tile_x, c->tile_y, c->tiles_x, c->tiles_y);
  710. break;
  711. }
  712. ret = 0;
  713. switch (c->compression) {
  714. case COMPR_EPIC_J_B:
  715. av_log(avctx, AV_LOG_ERROR,
  716. "ePIC j-b compression is not implemented yet\n");
  717. return AVERROR(ENOSYS);
  718. case COMPR_KEMPF_J_B:
  719. ret = kempf_decode_tile(c, c->tile_x, c->tile_y,
  720. buf + bytestream2_tell(&bc),
  721. chunk_size - 2);
  722. break;
  723. }
  724. if (ret && c->framebuf)
  725. av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n",
  726. c->tile_x, c->tile_y);
  727. break;
  728. case CURSOR_POS:
  729. if (chunk_size < 5) {
  730. av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %"PRIu32"\n",
  731. chunk_size);
  732. break;
  733. }
  734. c->cursor_x = bytestream2_get_be16(&bc);
  735. c->cursor_y = bytestream2_get_be16(&bc);
  736. break;
  737. case CURSOR_SHAPE:
  738. if (chunk_size < 8) {
  739. av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"\n",
  740. chunk_size);
  741. break;
  742. }
  743. bytestream2_init(&tbc, buf + bytestream2_tell(&bc),
  744. chunk_size - 4);
  745. g2m_load_cursor(avctx, c, &tbc);
  746. break;
  747. case CHUNK_CC:
  748. case CHUNK_CD:
  749. break;
  750. default:
  751. av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02d\n",
  752. chunk_type);
  753. }
  754. /* navigate to next chunk */
  755. bytestream2_skip(&bc, chunk_start + chunk_size - bytestream2_tell(&bc));
  756. }
  757. if (got_header)
  758. c->got_header = 1;
  759. if (c->width && c->height && c->framebuf) {
  760. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  761. return ret;
  762. pic->key_frame = got_header;
  763. pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  764. for (i = 0; i < avctx->height; i++)
  765. memcpy(pic->data[0] + i * pic->linesize[0],
  766. c->framebuf + i * c->framebuf_stride,
  767. c->width * 3);
  768. g2m_paint_cursor(c, pic->data[0], pic->linesize[0]);
  769. *got_picture_ptr = 1;
  770. }
  771. return buf_size;
  772. header_fail:
  773. c->width =
  774. c->height = 0;
  775. c->tiles_x =
  776. c->tiles_y = 0;
  777. return ret;
  778. }
  779. static av_cold int g2m_decode_init(AVCodecContext *avctx)
  780. {
  781. G2MContext *const c = avctx->priv_data;
  782. int ret;
  783. if ((ret = jpg_init(avctx, &c->jc)) != 0) {
  784. av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
  785. jpg_free_context(&c->jc);
  786. return AVERROR(ENOMEM);
  787. }
  788. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  789. // store original sizes and check against those if resize happens
  790. c->orig_width = avctx->width;
  791. c->orig_height = avctx->height;
  792. return 0;
  793. }
  794. static av_cold int g2m_decode_end(AVCodecContext *avctx)
  795. {
  796. G2MContext *const c = avctx->priv_data;
  797. jpg_free_context(&c->jc);
  798. av_freep(&c->kempf_buf);
  799. av_freep(&c->kempf_flags);
  800. av_freep(&c->synth_tile);
  801. av_freep(&c->jpeg_tile);
  802. av_freep(&c->cursor);
  803. av_freep(&c->framebuf);
  804. return 0;
  805. }
  806. AVCodec ff_g2m_decoder = {
  807. .name = "g2m",
  808. .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
  809. .type = AVMEDIA_TYPE_VIDEO,
  810. .id = AV_CODEC_ID_G2M,
  811. .priv_data_size = sizeof(G2MContext),
  812. .init = g2m_decode_init,
  813. .close = g2m_decode_end,
  814. .decode = g2m_decode_frame,
  815. .capabilities = CODEC_CAP_DR1,
  816. };