You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

826 lines
25KB

  1. /*
  2. * Go2Webinar decoder
  3. * Copyright (c) 2012 Konstantin Shishkov
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Go2Webinar decoder
  24. */
  25. #include <zlib.h>
  26. #include "libavutil/intreadwrite.h"
  27. #include "avcodec.h"
  28. #include "bytestream.h"
  29. #include "dsputil.h"
  30. #include "get_bits.h"
  31. #include "internal.h"
  32. #include "mjpeg.h"
  33. enum ChunkType {
  34. FRAME_INFO = 0xC8,
  35. TILE_DATA,
  36. CURSOR_POS,
  37. CURSOR_SHAPE,
  38. CHUNK_CC,
  39. CHUNK_CD
  40. };
  41. enum Compression {
  42. COMPR_EPIC_J_B = 2,
  43. COMPR_KEMPF_J_B,
  44. };
  45. static const uint8_t luma_quant[64] = {
  46. 8, 6, 5, 8, 12, 20, 26, 31,
  47. 6, 6, 7, 10, 13, 29, 30, 28,
  48. 7, 7, 8, 12, 20, 29, 35, 28,
  49. 7, 9, 11, 15, 26, 44, 40, 31,
  50. 9, 11, 19, 28, 34, 55, 52, 39,
  51. 12, 18, 28, 32, 41, 52, 57, 46,
  52. 25, 32, 39, 44, 52, 61, 60, 51,
  53. 36, 46, 48, 49, 56, 50, 52, 50
  54. };
  55. static const uint8_t chroma_quant[64] = {
  56. 9, 9, 12, 24, 50, 50, 50, 50,
  57. 9, 11, 13, 33, 50, 50, 50, 50,
  58. 12, 13, 28, 50, 50, 50, 50, 50,
  59. 24, 33, 50, 50, 50, 50, 50, 50,
  60. 50, 50, 50, 50, 50, 50, 50, 50,
  61. 50, 50, 50, 50, 50, 50, 50, 50,
  62. 50, 50, 50, 50, 50, 50, 50, 50,
  63. 50, 50, 50, 50, 50, 50, 50, 50,
  64. };
  65. typedef struct JPGContext {
  66. DSPContext dsp;
  67. ScanTable scantable;
  68. VLC dc_vlc[2], ac_vlc[2];
  69. int prev_dc[3];
  70. DECLARE_ALIGNED(16, int16_t, block)[6][64];
  71. uint8_t *buf;
  72. } JPGContext;
  73. typedef struct G2MContext {
  74. JPGContext jc;
  75. int version;
  76. int compression;
  77. int width, height, bpp;
  78. int tile_width, tile_height;
  79. int tiles_x, tiles_y, tile_x, tile_y;
  80. int got_header;
  81. uint8_t *framebuf;
  82. int framebuf_stride, old_width, old_height;
  83. uint8_t *synth_tile, *jpeg_tile;
  84. int tile_stride, old_tile_w, old_tile_h;
  85. uint8_t *kempf_buf, *kempf_flags;
  86. uint8_t *cursor;
  87. int cursor_stride;
  88. int cursor_fmt;
  89. int cursor_w, cursor_h, cursor_x, cursor_y;
  90. int cursor_hot_x, cursor_hot_y;
  91. } G2MContext;
  92. static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table,
  93. const uint8_t *val_table, int nb_codes,
  94. int is_ac)
  95. {
  96. uint8_t huff_size[256] = { 0 };
  97. uint16_t huff_code[256];
  98. uint16_t huff_sym[256];
  99. int i;
  100. ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
  101. for (i = 0; i < 256; i++)
  102. huff_sym[i] = i + 16 * is_ac;
  103. if (is_ac)
  104. huff_sym[0] = 16 * 256;
  105. return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
  106. huff_code, 2, 2, huff_sym, 2, 2, 0);
  107. }
  108. static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
  109. {
  110. int ret;
  111. ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance,
  112. avpriv_mjpeg_val_dc, 12, 0);
  113. if (ret)
  114. return ret;
  115. ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance,
  116. avpriv_mjpeg_val_dc, 12, 0);
  117. if (ret)
  118. return ret;
  119. ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance,
  120. avpriv_mjpeg_val_ac_luminance, 251, 1);
  121. if (ret)
  122. return ret;
  123. ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance,
  124. avpriv_mjpeg_val_ac_chrominance, 251, 1);
  125. if (ret)
  126. return ret;
  127. ff_dsputil_init(&c->dsp, avctx);
  128. ff_init_scantable(c->dsp.idct_permutation, &c->scantable,
  129. ff_zigzag_direct);
  130. return 0;
  131. }
  132. static av_cold void jpg_free_context(JPGContext *ctx)
  133. {
  134. int i;
  135. for (i = 0; i < 2; i++) {
  136. ff_free_vlc(&ctx->dc_vlc[i]);
  137. ff_free_vlc(&ctx->ac_vlc[i]);
  138. }
  139. av_freep(&ctx->buf);
  140. }
  141. static void jpg_unescape(const uint8_t *src, int src_size,
  142. uint8_t *dst, int *dst_size)
  143. {
  144. const uint8_t *src_end = src + src_size;
  145. uint8_t *dst_start = dst;
  146. while (src < src_end) {
  147. uint8_t x = *src++;
  148. *dst++ = x;
  149. if (x == 0xFF && !*src)
  150. src++;
  151. }
  152. *dst_size = dst - dst_start;
  153. }
  154. static int jpg_decode_block(JPGContext *c, GetBitContext *gb,
  155. int plane, int16_t *block)
  156. {
  157. int dc, val, pos;
  158. const int is_chroma = !!plane;
  159. const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant;
  160. c->dsp.clear_block(block);
  161. dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3);
  162. if (dc < 0)
  163. return AVERROR_INVALIDDATA;
  164. if (dc)
  165. dc = get_xbits(gb, dc);
  166. dc = dc * qmat[0] + c->prev_dc[plane];
  167. block[0] = dc;
  168. c->prev_dc[plane] = dc;
  169. pos = 0;
  170. while (pos < 63) {
  171. val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3);
  172. if (val < 0)
  173. return AVERROR_INVALIDDATA;
  174. pos += val >> 4;
  175. val &= 0xF;
  176. if (pos > 63)
  177. return val ? AVERROR_INVALIDDATA : 0;
  178. if (val) {
  179. int nbits = val;
  180. val = get_xbits(gb, nbits);
  181. val *= qmat[ff_zigzag_direct[pos]];
  182. block[c->scantable.permutated[pos]] = val;
  183. }
  184. }
  185. return 0;
  186. }
  187. static inline void yuv2rgb(uint8_t *out, int Y, int U, int V)
  188. {
  189. out[0] = av_clip_uint8(Y + ( 91881 * V + 32768 >> 16));
  190. out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
  191. out[2] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
  192. }
  193. static int jpg_decode_data(JPGContext *c, int width, int height,
  194. const uint8_t *src, int src_size,
  195. uint8_t *dst, int dst_stride,
  196. const uint8_t *mask, int mask_stride, int num_mbs,
  197. int swapuv)
  198. {
  199. GetBitContext gb;
  200. uint8_t *tmp;
  201. int mb_w, mb_h, mb_x, mb_y, i, j;
  202. int bx, by;
  203. int unesc_size;
  204. int ret;
  205. tmp = av_realloc(c->buf, src_size + FF_INPUT_BUFFER_PADDING_SIZE);
  206. if (!tmp)
  207. return AVERROR(ENOMEM);
  208. c->buf = tmp;
  209. jpg_unescape(src, src_size, c->buf, &unesc_size);
  210. memset(c->buf + unesc_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
  211. init_get_bits(&gb, c->buf, unesc_size * 8);
  212. width = FFALIGN(width, 16);
  213. mb_w = width >> 4;
  214. mb_h = (height + 15) >> 4;
  215. if (!num_mbs)
  216. num_mbs = mb_w * mb_h;
  217. for (i = 0; i < 3; i++)
  218. c->prev_dc[i] = 1024;
  219. bx = by = 0;
  220. for (mb_y = 0; mb_y < mb_h; mb_y++) {
  221. for (mb_x = 0; mb_x < mb_w; mb_x++) {
  222. if (mask && !mask[mb_x]) {
  223. bx += 16;
  224. continue;
  225. }
  226. for (j = 0; j < 2; j++) {
  227. for (i = 0; i < 2; i++) {
  228. if ((ret = jpg_decode_block(c, &gb, 0,
  229. c->block[i + j * 2])) != 0)
  230. return ret;
  231. c->dsp.idct(c->block[i + j * 2]);
  232. }
  233. }
  234. for (i = 1; i < 3; i++) {
  235. if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0)
  236. return ret;
  237. c->dsp.idct(c->block[i + 3]);
  238. }
  239. for (j = 0; j < 16; j++) {
  240. uint8_t *out = dst + bx * 3 + (by + j) * dst_stride;
  241. for (i = 0; i < 16; i++) {
  242. int Y, U, V;
  243. Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
  244. U = c->block[4 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
  245. V = c->block[5 ^ swapuv][(i >> 1) + (j >> 1) * 8] - 128;
  246. yuv2rgb(out + i * 3, Y, U, V);
  247. }
  248. }
  249. if (!--num_mbs)
  250. return 0;
  251. bx += 16;
  252. }
  253. bx = 0;
  254. by += 16;
  255. if (mask)
  256. mask += mask_stride;
  257. }
  258. return 0;
  259. }
  260. static void kempf_restore_buf(const uint8_t *src, int len,
  261. uint8_t *dst, int stride,
  262. const uint8_t *jpeg_tile, int tile_stride,
  263. int width, int height,
  264. const uint8_t *pal, int npal, int tidx)
  265. {
  266. GetBitContext gb;
  267. int i, j, nb, col;
  268. init_get_bits(&gb, src, len * 8);
  269. if (npal <= 2) nb = 1;
  270. else if (npal <= 4) nb = 2;
  271. else if (npal <= 16) nb = 4;
  272. else nb = 8;
  273. for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) {
  274. if (get_bits(&gb, 8))
  275. continue;
  276. for (i = 0; i < width; i++) {
  277. col = get_bits(&gb, nb);
  278. if (col != tidx)
  279. memcpy(dst + i * 3, pal + col * 3, 3);
  280. else
  281. memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
  282. }
  283. }
  284. }
  285. static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
  286. const uint8_t *src, int src_size)
  287. {
  288. int width, height;
  289. int hdr, zsize, npal, tidx = -1, ret;
  290. int i, j;
  291. const uint8_t *src_end = src + src_size;
  292. uint8_t pal[768], transp[3];
  293. uLongf dlen = (c->tile_width + 1) * c->tile_height;
  294. int sub_type;
  295. int nblocks, cblocks, bstride;
  296. int bits, bitbuf, coded;
  297. uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
  298. tile_y * c->tile_height * c->framebuf_stride;
  299. if (src_size < 2)
  300. return AVERROR_INVALIDDATA;
  301. width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
  302. height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
  303. hdr = *src++;
  304. sub_type = hdr >> 5;
  305. if (sub_type == 0) {
  306. int j;
  307. memcpy(transp, src, 3);
  308. src += 3;
  309. for (j = 0; j < height; j++, dst += c->framebuf_stride)
  310. for (i = 0; i < width; i++)
  311. memcpy(dst + i * 3, transp, 3);
  312. return 0;
  313. } else if (sub_type == 1) {
  314. return jpg_decode_data(&c->jc, width, height, src, src_end - src,
  315. dst, c->framebuf_stride, NULL, 0, 0, 0);
  316. }
  317. if (sub_type != 2) {
  318. memcpy(transp, src, 3);
  319. src += 3;
  320. }
  321. npal = *src++ + 1;
  322. memcpy(pal, src, npal * 3); src += npal * 3;
  323. if (sub_type != 2) {
  324. for (i = 0; i < npal; i++) {
  325. if (!memcmp(pal + i * 3, transp, 3)) {
  326. tidx = i;
  327. break;
  328. }
  329. }
  330. }
  331. if (src_end - src < 2)
  332. return 0;
  333. zsize = (src[0] << 8) | src[1]; src += 2;
  334. if (src_end - src < zsize)
  335. return AVERROR_INVALIDDATA;
  336. ret = uncompress(c->kempf_buf, &dlen, src, zsize);
  337. if (ret)
  338. return AVERROR_INVALIDDATA;
  339. src += zsize;
  340. if (sub_type == 2) {
  341. kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
  342. NULL, 0, width, height, pal, npal, tidx);
  343. return 0;
  344. }
  345. nblocks = *src++ + 1;
  346. cblocks = 0;
  347. bstride = FFALIGN(width, 16) >> 4;
  348. // blocks are coded LSB and we need normal bitreader for JPEG data
  349. bits = 0;
  350. for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
  351. for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
  352. if (!bits) {
  353. bitbuf = *src++;
  354. bits = 8;
  355. }
  356. coded = bitbuf & 1;
  357. bits--;
  358. bitbuf >>= 1;
  359. cblocks += coded;
  360. if (cblocks > nblocks)
  361. return AVERROR_INVALIDDATA;
  362. c->kempf_flags[j + i * bstride] = coded;
  363. }
  364. }
  365. memset(c->jpeg_tile, 0, c->tile_stride * height);
  366. jpg_decode_data(&c->jc, width, height, src, src_end - src,
  367. c->jpeg_tile, c->tile_stride,
  368. c->kempf_flags, bstride, nblocks, 0);
  369. kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
  370. c->jpeg_tile, c->tile_stride,
  371. width, height, pal, npal, tidx);
  372. return 0;
  373. }
  374. static int g2m_init_buffers(G2MContext *c)
  375. {
  376. int aligned_height;
  377. if (!c->framebuf || c->old_width < c->width || c->height < c->height) {
  378. c->framebuf_stride = FFALIGN(c->width * 3, 16);
  379. aligned_height = FFALIGN(c->height, 16);
  380. av_free(c->framebuf);
  381. c->framebuf = av_mallocz(c->framebuf_stride * aligned_height);
  382. if (!c->framebuf)
  383. return AVERROR(ENOMEM);
  384. }
  385. if (!c->synth_tile || !c->jpeg_tile ||
  386. c->old_tile_w < c->tile_width ||
  387. c->old_tile_h < c->tile_height) {
  388. c->tile_stride = FFALIGN(c->tile_width * 3, 16);
  389. aligned_height = FFALIGN(c->tile_height, 16);
  390. av_free(c->synth_tile);
  391. av_free(c->jpeg_tile);
  392. c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
  393. c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
  394. c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height
  395. + FF_INPUT_BUFFER_PADDING_SIZE);
  396. c->kempf_flags = av_mallocz( c->tile_width * aligned_height);
  397. if (!c->synth_tile || !c->jpeg_tile ||
  398. !c->kempf_buf || !c->kempf_flags)
  399. return AVERROR(ENOMEM);
  400. }
  401. return 0;
  402. }
  403. static int g2m_load_cursor(G2MContext *c, GetByteContext *gb)
  404. {
  405. int i, j, k;
  406. uint8_t *dst;
  407. uint32_t bits;
  408. c->cursor_stride = c->cursor_w * 4;
  409. c->cursor = av_realloc(c->cursor, c->cursor_stride * c->cursor_h);
  410. if (!c->cursor)
  411. return AVERROR(ENOMEM);
  412. dst = c->cursor;
  413. switch (c->cursor_fmt) {
  414. case 1: // old monochrome
  415. for (j = 0; j < c->cursor_h; j++) {
  416. for (i = 0; i < c->cursor_w; i += 32) {
  417. bits = bytestream2_get_be32(gb);
  418. for (k = 0; k < 32; k++) {
  419. dst[0] = !!(bits & 0x80000000);
  420. dst += 4;
  421. bits <<= 1;
  422. }
  423. }
  424. }
  425. dst = c->cursor;
  426. for (j = 0; j < c->cursor_h; j++) {
  427. for (i = 0; i < c->cursor_w; i += 32) {
  428. bits = bytestream2_get_be32(gb);
  429. for (k = 0; k < 32; k++) {
  430. int mask_bit = !!(bits & 0x80000000);
  431. switch (dst[0] * 2 + mask_bit) {
  432. case 0:
  433. dst[0] = 0xFF; dst[1] = 0x00;
  434. dst[2] = 0x00; dst[3] = 0x00;
  435. break;
  436. case 1:
  437. dst[0] = 0xFF; dst[1] = 0xFF;
  438. dst[2] = 0xFF; dst[3] = 0xFF;
  439. break;
  440. default:
  441. dst[0] = 0x00; dst[1] = 0x00;
  442. dst[2] = 0x00; dst[3] = 0x00;
  443. }
  444. dst += 4;
  445. bits <<= 1;
  446. }
  447. }
  448. }
  449. break;
  450. case 32: // full colour
  451. /* skip monochrome version of the cursor and decode RGBA instead */
  452. bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3));
  453. for (j = 0; j < c->cursor_h; j++) {
  454. for (i = 0; i < c->cursor_w; i++) {
  455. int val = bytestream2_get_be32(gb);
  456. *dst++ = val >> 0;
  457. *dst++ = val >> 8;
  458. *dst++ = val >> 16;
  459. *dst++ = val >> 24;
  460. }
  461. }
  462. break;
  463. default:
  464. return AVERROR_PATCHWELCOME;
  465. }
  466. return 0;
  467. }
  468. #define APPLY_ALPHA(src, new, alpha) \
  469. src = (src * (256 - alpha) + new * alpha) >> 8
  470. static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
  471. {
  472. int i, j;
  473. int x, y, w, h;
  474. const uint8_t *cursor;
  475. if (!c->cursor)
  476. return;
  477. x = c->cursor_x - c->cursor_hot_x;
  478. y = c->cursor_y - c->cursor_hot_y;
  479. cursor = c->cursor;
  480. w = c->cursor_w;
  481. h = c->cursor_h;
  482. if (x + w > c->width)
  483. w = c->width - x;
  484. if (y + h > c->height)
  485. h = c->height - y;
  486. if (x < 0) {
  487. w += x;
  488. cursor += -x * 4;
  489. } else {
  490. dst += x * 3;
  491. }
  492. if (y < 0) {
  493. h += y;
  494. cursor += -y * c->cursor_stride;
  495. } else {
  496. dst += y * stride;
  497. }
  498. if (w < 0 || h < 0)
  499. return;
  500. for (j = 0; j < h; j++) {
  501. for (i = 0; i < w; i++) {
  502. uint8_t alpha = cursor[i * 4];
  503. APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
  504. APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
  505. APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
  506. }
  507. dst += stride;
  508. cursor += c->cursor_stride;
  509. }
  510. }
  511. static int g2m_decode_frame(AVCodecContext *avctx, void *data,
  512. int *got_picture_ptr, AVPacket *avpkt)
  513. {
  514. const uint8_t *buf = avpkt->data;
  515. int buf_size = avpkt->size;
  516. G2MContext *c = avctx->priv_data;
  517. AVFrame *pic = data;
  518. GetByteContext bc, tbc;
  519. int magic;
  520. int got_header = 0;
  521. uint32_t chunk_size, cur_size;
  522. int chunk_type;
  523. int i;
  524. int ret;
  525. if (buf_size < 12) {
  526. av_log(avctx, AV_LOG_ERROR,
  527. "Frame should have at least 12 bytes, got %d instead\n",
  528. buf_size);
  529. return AVERROR_INVALIDDATA;
  530. }
  531. bytestream2_init(&bc, buf, buf_size);
  532. magic = bytestream2_get_be32(&bc);
  533. if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') ||
  534. (magic & 0xF) < 2 || (magic & 0xF) > 4) {
  535. av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic);
  536. return AVERROR_INVALIDDATA;
  537. }
  538. if ((magic & 0xF) != 4) {
  539. av_log(avctx, AV_LOG_ERROR, "G2M2 and G2M3 are not yet supported\n");
  540. return AVERROR(ENOSYS);
  541. }
  542. while (bytestream2_get_bytes_left(&bc) > 5) {
  543. chunk_size = bytestream2_get_le32(&bc) - 1;
  544. chunk_type = bytestream2_get_byte(&bc);
  545. if (chunk_size > bytestream2_get_bytes_left(&bc)) {
  546. av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %d type %02X\n",
  547. chunk_size, chunk_type);
  548. break;
  549. }
  550. switch (chunk_type) {
  551. case FRAME_INFO:
  552. c->got_header = 0;
  553. if (chunk_size < 21) {
  554. av_log(avctx, AV_LOG_ERROR, "Invalid frame info size %d\n",
  555. chunk_size);
  556. break;
  557. }
  558. c->width = bytestream2_get_be32(&bc);
  559. c->height = bytestream2_get_be32(&bc);
  560. if (c->width < 16 || c->width > avctx->width ||
  561. c->height < 16 || c->height > avctx->height) {
  562. av_log(avctx, AV_LOG_ERROR,
  563. "Invalid frame dimensions %dx%d\n",
  564. c->width, c->height);
  565. c->width = c->height = 0;
  566. bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc));
  567. }
  568. if (c->width != avctx->width || c->height != avctx->height)
  569. avcodec_set_dimensions(avctx, c->width, c->height);
  570. c->compression = bytestream2_get_be32(&bc);
  571. if (c->compression != 2 && c->compression != 3) {
  572. av_log(avctx, AV_LOG_ERROR,
  573. "Unknown compression method %d\n",
  574. c->compression);
  575. return AVERROR_PATCHWELCOME;
  576. }
  577. c->tile_width = bytestream2_get_be32(&bc);
  578. c->tile_height = bytestream2_get_be32(&bc);
  579. if (!c->tile_width || !c->tile_height) {
  580. av_log(avctx, AV_LOG_ERROR,
  581. "Invalid tile dimensions %dx%d\n",
  582. c->tile_width, c->tile_height);
  583. return AVERROR_INVALIDDATA;
  584. }
  585. c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width;
  586. c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height;
  587. c->bpp = bytestream2_get_byte(&bc);
  588. chunk_size -= 21;
  589. bytestream2_skip(&bc, chunk_size);
  590. if (g2m_init_buffers(c))
  591. return AVERROR(ENOMEM);
  592. got_header = 1;
  593. break;
  594. case TILE_DATA:
  595. if (!c->tiles_x || !c->tiles_y) {
  596. av_log(avctx, AV_LOG_WARNING,
  597. "No frame header - skipping tile\n");
  598. bytestream2_skip(&bc, bytestream2_get_bytes_left(&bc));
  599. break;
  600. }
  601. if (chunk_size < 2) {
  602. av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %d\n",
  603. chunk_size);
  604. break;
  605. }
  606. c->tile_x = bytestream2_get_byte(&bc);
  607. c->tile_y = bytestream2_get_byte(&bc);
  608. if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) {
  609. av_log(avctx, AV_LOG_ERROR,
  610. "Invalid tile pos %d,%d (in %dx%d grid)\n",
  611. c->tile_x, c->tile_y, c->tiles_x, c->tiles_y);
  612. break;
  613. }
  614. chunk_size -= 2;
  615. ret = 0;
  616. switch (c->compression) {
  617. case COMPR_EPIC_J_B:
  618. av_log(avctx, AV_LOG_ERROR,
  619. "ePIC j-b compression is not implemented yet\n");
  620. return AVERROR(ENOSYS);
  621. case COMPR_KEMPF_J_B:
  622. ret = kempf_decode_tile(c, c->tile_x, c->tile_y,
  623. buf + bytestream2_tell(&bc),
  624. chunk_size);
  625. break;
  626. }
  627. if (ret && c->framebuf)
  628. av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n",
  629. c->tile_x, c->tile_y);
  630. bytestream2_skip(&bc, chunk_size);
  631. break;
  632. case CURSOR_POS:
  633. if (chunk_size < 5) {
  634. av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %d\n",
  635. chunk_size);
  636. break;
  637. }
  638. c->cursor_x = bytestream2_get_be16(&bc);
  639. c->cursor_y = bytestream2_get_be16(&bc);
  640. bytestream2_skip(&bc, chunk_size - 4);
  641. break;
  642. case CURSOR_SHAPE:
  643. if (chunk_size < 8) {
  644. av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n",
  645. chunk_size);
  646. break;
  647. }
  648. bytestream2_init(&tbc, buf + bytestream2_tell(&bc),
  649. chunk_size - 4);
  650. cur_size = bytestream2_get_be32(&tbc);
  651. c->cursor_w = bytestream2_get_byte(&tbc);
  652. c->cursor_h = bytestream2_get_byte(&tbc);
  653. c->cursor_hot_x = bytestream2_get_byte(&tbc);
  654. c->cursor_hot_y = bytestream2_get_byte(&tbc);
  655. c->cursor_fmt = bytestream2_get_byte(&tbc);
  656. if (cur_size >= chunk_size ||
  657. c->cursor_w * c->cursor_h / 4 > cur_size) {
  658. av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %d\n",
  659. chunk_size);
  660. break;
  661. }
  662. g2m_load_cursor(c, &tbc);
  663. bytestream2_skip(&bc, chunk_size);
  664. break;
  665. case CHUNK_CC:
  666. case CHUNK_CD:
  667. bytestream2_skip(&bc, chunk_size);
  668. break;
  669. default:
  670. av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02X\n",
  671. chunk_type);
  672. bytestream2_skip(&bc, chunk_size);
  673. }
  674. }
  675. if (got_header)
  676. c->got_header = 1;
  677. if (c->width && c->height) {
  678. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0) {
  679. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  680. return ret;
  681. }
  682. pic->key_frame = got_header;
  683. pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  684. for (i = 0; i < avctx->height; i++)
  685. memcpy(pic->data[0] + i * pic->linesize[0],
  686. c->framebuf + i * c->framebuf_stride,
  687. c->width * 3);
  688. g2m_paint_cursor(c, pic->data[0], pic->linesize[0]);
  689. *got_picture_ptr = 1;
  690. }
  691. return buf_size;
  692. }
  693. static av_cold int g2m_decode_init(AVCodecContext *avctx)
  694. {
  695. G2MContext * const c = avctx->priv_data;
  696. int ret;
  697. if ((ret = jpg_init(avctx, &c->jc)) != 0) {
  698. av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
  699. jpg_free_context(&c->jc);
  700. return AVERROR(ENOMEM);
  701. }
  702. avctx->pix_fmt = PIX_FMT_RGB24;
  703. return 0;
  704. }
  705. static av_cold int g2m_decode_end(AVCodecContext *avctx)
  706. {
  707. G2MContext * const c = avctx->priv_data;
  708. jpg_free_context(&c->jc);
  709. av_freep(&c->kempf_buf);
  710. av_freep(&c->kempf_flags);
  711. av_freep(&c->synth_tile);
  712. av_freep(&c->jpeg_tile);
  713. av_freep(&c->cursor);
  714. av_freep(&c->framebuf);
  715. return 0;
  716. }
  717. AVCodec ff_g2m_decoder = {
  718. .name = "g2m",
  719. .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
  720. .type = AVMEDIA_TYPE_VIDEO,
  721. .id = AV_CODEC_ID_G2M,
  722. .priv_data_size = sizeof(G2MContext),
  723. .init = g2m_decode_init,
  724. .close = g2m_decode_end,
  725. .decode = g2m_decode_frame,
  726. .capabilities = CODEC_CAP_DR1,
  727. };