You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1652 lines
53KB

  1. /*
  2. * Go2Webinar / Go2Meeting decoder
  3. * Copyright (c) 2012 Konstantin Shishkov
  4. * Copyright (c) 2013 Maxim Poliakovski
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Go2Webinar / Go2Meeting decoder
  25. */
  26. #include <inttypes.h>
  27. #include <zlib.h>
  28. #include "libavutil/imgutils.h"
  29. #include "libavutil/intreadwrite.h"
  30. #include "avcodec.h"
  31. #include "blockdsp.h"
  32. #include "bytestream.h"
  33. #include "elsdec.h"
  34. #include "get_bits.h"
  35. #include "idctdsp.h"
  36. #include "internal.h"
  37. #include "jpegtables.h"
  38. #include "mjpeg.h"
  39. #define EPIC_PIX_STACK_SIZE 1024
  40. #define EPIC_PIX_STACK_MAX (EPIC_PIX_STACK_SIZE - 1)
  41. enum ChunkType {
  42. DISPLAY_INFO = 0xC8,
  43. TILE_DATA,
  44. CURSOR_POS,
  45. CURSOR_SHAPE,
  46. CHUNK_CC,
  47. CHUNK_CD
  48. };
  49. enum Compression {
  50. COMPR_EPIC_J_B = 2,
  51. COMPR_KEMPF_J_B,
  52. };
  53. static const uint8_t luma_quant[64] = {
  54. 8, 6, 5, 8, 12, 20, 26, 31,
  55. 6, 6, 7, 10, 13, 29, 30, 28,
  56. 7, 7, 8, 12, 20, 29, 35, 28,
  57. 7, 9, 11, 15, 26, 44, 40, 31,
  58. 9, 11, 19, 28, 34, 55, 52, 39,
  59. 12, 18, 28, 32, 41, 52, 57, 46,
  60. 25, 32, 39, 44, 52, 61, 60, 51,
  61. 36, 46, 48, 49, 56, 50, 52, 50
  62. };
  63. static const uint8_t chroma_quant[64] = {
  64. 9, 9, 12, 24, 50, 50, 50, 50,
  65. 9, 11, 13, 33, 50, 50, 50, 50,
  66. 12, 13, 28, 50, 50, 50, 50, 50,
  67. 24, 33, 50, 50, 50, 50, 50, 50,
  68. 50, 50, 50, 50, 50, 50, 50, 50,
  69. 50, 50, 50, 50, 50, 50, 50, 50,
  70. 50, 50, 50, 50, 50, 50, 50, 50,
  71. 50, 50, 50, 50, 50, 50, 50, 50,
  72. };
  73. typedef struct ePICPixListElem {
  74. struct ePICPixListElem *next;
  75. uint32_t pixel;
  76. uint8_t rung;
  77. } ePICPixListElem;
  78. typedef struct ePICPixHashElem {
  79. uint32_t pix_id;
  80. struct ePICPixListElem *list;
  81. } ePICPixHashElem;
  82. #define EPIC_HASH_SIZE 256
  83. typedef struct ePICPixHash {
  84. ePICPixHashElem *bucket[EPIC_HASH_SIZE];
  85. int bucket_size[EPIC_HASH_SIZE];
  86. int bucket_fill[EPIC_HASH_SIZE];
  87. } ePICPixHash;
  88. typedef struct ePICContext {
  89. ElsDecCtx els_ctx;
  90. int next_run_pos;
  91. ElsUnsignedRung unsigned_rung;
  92. uint8_t W_flag_rung;
  93. uint8_t N_flag_rung;
  94. uint8_t W_ctx_rung[256];
  95. uint8_t N_ctx_rung[512];
  96. uint8_t nw_pred_rung[256];
  97. uint8_t ne_pred_rung[256];
  98. uint8_t prev_row_rung[14];
  99. uint8_t runlen_zeroes[14];
  100. uint8_t runlen_one;
  101. int stack_pos;
  102. uint32_t stack[EPIC_PIX_STACK_SIZE];
  103. ePICPixHash hash;
  104. } ePICContext;
  105. typedef struct JPGContext {
  106. BlockDSPContext bdsp;
  107. IDCTDSPContext idsp;
  108. ScanTable scantable;
  109. VLC dc_vlc[2], ac_vlc[2];
  110. int prev_dc[3];
  111. DECLARE_ALIGNED(32, int16_t, block)[6][64];
  112. uint8_t *buf;
  113. } JPGContext;
  114. typedef struct G2MContext {
  115. ePICContext ec;
  116. JPGContext jc;
  117. int version;
  118. int compression;
  119. int width, height, bpp;
  120. int orig_width, orig_height;
  121. int tile_width, tile_height;
  122. int tiles_x, tiles_y, tile_x, tile_y;
  123. int got_header;
  124. uint8_t *framebuf;
  125. int framebuf_stride, old_width, old_height;
  126. uint8_t *synth_tile, *jpeg_tile, *epic_buf, *epic_buf_base;
  127. int tile_stride, epic_buf_stride, old_tile_w, old_tile_h;
  128. int swapuv;
  129. uint8_t *kempf_buf, *kempf_flags;
  130. uint8_t *cursor;
  131. int cursor_stride;
  132. int cursor_fmt;
  133. int cursor_w, cursor_h, cursor_x, cursor_y;
  134. int cursor_hot_x, cursor_hot_y;
  135. } G2MContext;
  136. static av_cold int build_vlc(VLC *vlc, const uint8_t *bits_table,
  137. const uint8_t *val_table, int nb_codes,
  138. int is_ac)
  139. {
  140. uint8_t huff_size[256] = { 0 };
  141. uint16_t huff_code[256];
  142. uint16_t huff_sym[256];
  143. int i;
  144. ff_mjpeg_build_huffman_codes(huff_size, huff_code, bits_table, val_table);
  145. for (i = 0; i < 256; i++)
  146. huff_sym[i] = i + 16 * is_ac;
  147. if (is_ac)
  148. huff_sym[0] = 16 * 256;
  149. return ff_init_vlc_sparse(vlc, 9, nb_codes, huff_size, 1, 1,
  150. huff_code, 2, 2, huff_sym, 2, 2, 0);
  151. }
  152. static av_cold int jpg_init(AVCodecContext *avctx, JPGContext *c)
  153. {
  154. int ret;
  155. ret = build_vlc(&c->dc_vlc[0], avpriv_mjpeg_bits_dc_luminance,
  156. avpriv_mjpeg_val_dc, 12, 0);
  157. if (ret)
  158. return ret;
  159. ret = build_vlc(&c->dc_vlc[1], avpriv_mjpeg_bits_dc_chrominance,
  160. avpriv_mjpeg_val_dc, 12, 0);
  161. if (ret)
  162. return ret;
  163. ret = build_vlc(&c->ac_vlc[0], avpriv_mjpeg_bits_ac_luminance,
  164. avpriv_mjpeg_val_ac_luminance, 251, 1);
  165. if (ret)
  166. return ret;
  167. ret = build_vlc(&c->ac_vlc[1], avpriv_mjpeg_bits_ac_chrominance,
  168. avpriv_mjpeg_val_ac_chrominance, 251, 1);
  169. if (ret)
  170. return ret;
  171. ff_blockdsp_init(&c->bdsp, avctx);
  172. ff_idctdsp_init(&c->idsp, avctx);
  173. ff_init_scantable(c->idsp.idct_permutation, &c->scantable,
  174. ff_zigzag_direct);
  175. return 0;
  176. }
  177. static av_cold void jpg_free_context(JPGContext *ctx)
  178. {
  179. int i;
  180. for (i = 0; i < 2; i++) {
  181. ff_free_vlc(&ctx->dc_vlc[i]);
  182. ff_free_vlc(&ctx->ac_vlc[i]);
  183. }
  184. av_freep(&ctx->buf);
  185. }
  186. static void jpg_unescape(const uint8_t *src, int src_size,
  187. uint8_t *dst, int *dst_size)
  188. {
  189. const uint8_t *src_end = src + src_size;
  190. uint8_t *dst_start = dst;
  191. while (src < src_end) {
  192. uint8_t x = *src++;
  193. *dst++ = x;
  194. if (x == 0xFF && !*src)
  195. src++;
  196. }
  197. *dst_size = dst - dst_start;
  198. }
  199. static int jpg_decode_block(JPGContext *c, GetBitContext *gb,
  200. int plane, int16_t *block)
  201. {
  202. int dc, val, pos;
  203. const int is_chroma = !!plane;
  204. const uint8_t *qmat = is_chroma ? chroma_quant : luma_quant;
  205. if (get_bits_left(gb) < 1)
  206. return AVERROR_INVALIDDATA;
  207. c->bdsp.clear_block(block);
  208. dc = get_vlc2(gb, c->dc_vlc[is_chroma].table, 9, 3);
  209. if (dc < 0)
  210. return AVERROR_INVALIDDATA;
  211. if (dc)
  212. dc = get_xbits(gb, dc);
  213. dc = dc * qmat[0] + c->prev_dc[plane];
  214. block[0] = dc;
  215. c->prev_dc[plane] = dc;
  216. pos = 0;
  217. while (pos < 63) {
  218. val = get_vlc2(gb, c->ac_vlc[is_chroma].table, 9, 3);
  219. if (val < 0)
  220. return AVERROR_INVALIDDATA;
  221. pos += val >> 4;
  222. val &= 0xF;
  223. if (pos > 63)
  224. return val ? AVERROR_INVALIDDATA : 0;
  225. if (val) {
  226. int nbits = val;
  227. val = get_xbits(gb, nbits);
  228. val *= qmat[ff_zigzag_direct[pos]];
  229. block[c->scantable.permutated[pos]] = val;
  230. }
  231. }
  232. return 0;
  233. }
  234. static inline void yuv2rgb(uint8_t *out, int ridx, int Y, int U, int V)
  235. {
  236. out[ridx] = av_clip_uint8(Y + (91881 * V + 32768 >> 16));
  237. out[1] = av_clip_uint8(Y + (-22554 * U - 46802 * V + 32768 >> 16));
  238. out[2 - ridx] = av_clip_uint8(Y + (116130 * U + 32768 >> 16));
  239. }
  240. static int jpg_decode_data(JPGContext *c, int width, int height,
  241. const uint8_t *src, int src_size,
  242. uint8_t *dst, int dst_stride,
  243. const uint8_t *mask, int mask_stride, int num_mbs,
  244. int swapuv)
  245. {
  246. GetBitContext gb;
  247. int mb_w, mb_h, mb_x, mb_y, i, j;
  248. int bx, by;
  249. int unesc_size;
  250. int ret;
  251. const int ridx = swapuv ? 2 : 0;
  252. if ((ret = av_reallocp(&c->buf,
  253. src_size + AV_INPUT_BUFFER_PADDING_SIZE)) < 0)
  254. return ret;
  255. jpg_unescape(src, src_size, c->buf, &unesc_size);
  256. memset(c->buf + unesc_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
  257. if((ret = init_get_bits8(&gb, c->buf, unesc_size)) < 0)
  258. return ret;
  259. width = FFALIGN(width, 16);
  260. mb_w = width >> 4;
  261. mb_h = (height + 15) >> 4;
  262. if (!num_mbs)
  263. num_mbs = mb_w * mb_h * 4;
  264. for (i = 0; i < 3; i++)
  265. c->prev_dc[i] = 1024;
  266. bx =
  267. by = 0;
  268. c->bdsp.clear_blocks(c->block[0]);
  269. for (mb_y = 0; mb_y < mb_h; mb_y++) {
  270. for (mb_x = 0; mb_x < mb_w; mb_x++) {
  271. if (mask && !mask[mb_x * 2] && !mask[mb_x * 2 + 1] &&
  272. !mask[mb_x * 2 + mask_stride] &&
  273. !mask[mb_x * 2 + 1 + mask_stride]) {
  274. bx += 16;
  275. continue;
  276. }
  277. for (j = 0; j < 2; j++) {
  278. for (i = 0; i < 2; i++) {
  279. if (mask && !mask[mb_x * 2 + i + j * mask_stride])
  280. continue;
  281. num_mbs--;
  282. if ((ret = jpg_decode_block(c, &gb, 0,
  283. c->block[i + j * 2])) != 0)
  284. return ret;
  285. c->idsp.idct(c->block[i + j * 2]);
  286. }
  287. }
  288. for (i = 1; i < 3; i++) {
  289. if ((ret = jpg_decode_block(c, &gb, i, c->block[i + 3])) != 0)
  290. return ret;
  291. c->idsp.idct(c->block[i + 3]);
  292. }
  293. for (j = 0; j < 16; j++) {
  294. uint8_t *out = dst + bx * 3 + (by + j) * dst_stride;
  295. for (i = 0; i < 16; i++) {
  296. int Y, U, V;
  297. Y = c->block[(j >> 3) * 2 + (i >> 3)][(i & 7) + (j & 7) * 8];
  298. U = c->block[4][(i >> 1) + (j >> 1) * 8] - 128;
  299. V = c->block[5][(i >> 1) + (j >> 1) * 8] - 128;
  300. yuv2rgb(out + i * 3, ridx, Y, U, V);
  301. }
  302. }
  303. if (!num_mbs)
  304. return 0;
  305. bx += 16;
  306. }
  307. bx = 0;
  308. by += 16;
  309. if (mask)
  310. mask += mask_stride * 2;
  311. }
  312. return 0;
  313. }
  314. #define LOAD_NEIGHBOURS(x) \
  315. W = curr_row[(x) - 1]; \
  316. N = above_row[(x)]; \
  317. WW = curr_row[(x) - 2]; \
  318. NW = above_row[(x) - 1]; \
  319. NE = above_row[(x) + 1]; \
  320. NN = above2_row[(x)]; \
  321. NNW = above2_row[(x) - 1]; \
  322. NWW = above_row[(x) - 2]; \
  323. NNE = above2_row[(x) + 1]
  324. #define UPDATE_NEIGHBOURS(x) \
  325. NNW = NN; \
  326. NN = NNE; \
  327. NWW = NW; \
  328. NW = N; \
  329. N = NE; \
  330. NE = above_row[(x) + 1]; \
  331. NNE = above2_row[(x) + 1]
  332. #define R_shift 16
  333. #define G_shift 8
  334. #define B_shift 0
  335. /* improved djb2 hash from http://www.cse.yorku.ca/~oz/hash.html */
  336. static int djb2_hash(uint32_t key)
  337. {
  338. uint32_t h = 5381;
  339. h = (h * 33) ^ ((key >> 24) & 0xFF); // xxx: probably not needed at all
  340. h = (h * 33) ^ ((key >> 16) & 0xFF);
  341. h = (h * 33) ^ ((key >> 8) & 0xFF);
  342. h = (h * 33) ^ (key & 0xFF);
  343. return h & (EPIC_HASH_SIZE - 1);
  344. }
  345. static void epic_hash_init(ePICPixHash *hash)
  346. {
  347. memset(hash, 0, sizeof(*hash));
  348. }
  349. static ePICPixHashElem *epic_hash_find(const ePICPixHash *hash, uint32_t key)
  350. {
  351. int i, idx = djb2_hash(key);
  352. ePICPixHashElem *bucket = hash->bucket[idx];
  353. for (i = 0; i < hash->bucket_fill[idx]; i++)
  354. if (bucket[i].pix_id == key)
  355. return &bucket[i];
  356. return NULL;
  357. }
  358. static ePICPixHashElem *epic_hash_add(ePICPixHash *hash, uint32_t key)
  359. {
  360. ePICPixHashElem *bucket, *ret;
  361. int idx = djb2_hash(key);
  362. if (hash->bucket_size[idx] > INT_MAX / sizeof(**hash->bucket))
  363. return NULL;
  364. if (!(hash->bucket_fill[idx] < hash->bucket_size[idx])) {
  365. int new_size = hash->bucket_size[idx] + 16;
  366. bucket = av_realloc(hash->bucket[idx], new_size * sizeof(*bucket));
  367. if (!bucket)
  368. return NULL;
  369. hash->bucket[idx] = bucket;
  370. hash->bucket_size[idx] = new_size;
  371. }
  372. ret = &hash->bucket[idx][hash->bucket_fill[idx]++];
  373. memset(ret, 0, sizeof(*ret));
  374. ret->pix_id = key;
  375. return ret;
  376. }
  377. static int epic_add_pixel_to_cache(ePICPixHash *hash, uint32_t key, uint32_t pix)
  378. {
  379. ePICPixListElem *new_elem;
  380. ePICPixHashElem *hash_elem = epic_hash_find(hash, key);
  381. if (!hash_elem) {
  382. if (!(hash_elem = epic_hash_add(hash, key)))
  383. return AVERROR(ENOMEM);
  384. }
  385. new_elem = av_mallocz(sizeof(*new_elem));
  386. if (!new_elem)
  387. return AVERROR(ENOMEM);
  388. new_elem->pixel = pix;
  389. new_elem->next = hash_elem->list;
  390. hash_elem->list = new_elem;
  391. return 0;
  392. }
  393. static inline int epic_cache_entries_for_pixel(const ePICPixHash *hash,
  394. uint32_t pix)
  395. {
  396. ePICPixHashElem *hash_elem = epic_hash_find(hash, pix);
  397. if (hash_elem != NULL && hash_elem->list != NULL)
  398. return 1;
  399. return 0;
  400. }
  401. static void epic_free_pixel_cache(ePICPixHash *hash)
  402. {
  403. int i, j;
  404. for (i = 0; i < EPIC_HASH_SIZE; i++) {
  405. for (j = 0; j < hash->bucket_fill[i]; j++) {
  406. ePICPixListElem *list_elem = hash->bucket[i][j].list;
  407. while (list_elem) {
  408. ePICPixListElem *tmp = list_elem->next;
  409. av_free(list_elem);
  410. list_elem = tmp;
  411. }
  412. }
  413. av_freep(&hash->bucket[i]);
  414. hash->bucket_size[i] =
  415. hash->bucket_fill[i] = 0;
  416. }
  417. }
  418. static inline int is_pixel_on_stack(const ePICContext *dc, uint32_t pix)
  419. {
  420. int i;
  421. for (i = 0; i < dc->stack_pos; i++)
  422. if (dc->stack[i] == pix)
  423. break;
  424. return i != dc->stack_pos;
  425. }
  426. #define TOSIGNED(val) (((val) >> 1) ^ -((val) & 1))
  427. static inline int epic_decode_component_pred(ePICContext *dc,
  428. int N, int W, int NW)
  429. {
  430. unsigned delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
  431. return mid_pred(N, N + W - NW, W) - TOSIGNED(delta);
  432. }
  433. static uint32_t epic_decode_pixel_pred(ePICContext *dc, int x, int y,
  434. const uint32_t *curr_row,
  435. const uint32_t *above_row)
  436. {
  437. uint32_t N, W, NW, pred;
  438. unsigned delta;
  439. int GN, GW, GNW, R, G, B;
  440. if (x && y) {
  441. W = curr_row[x - 1];
  442. N = above_row[x];
  443. NW = above_row[x - 1];
  444. GN = (N >> G_shift) & 0xFF;
  445. GW = (W >> G_shift) & 0xFF;
  446. GNW = (NW >> G_shift) & 0xFF;
  447. G = epic_decode_component_pred(dc, GN, GW, GNW);
  448. R = G + epic_decode_component_pred(dc,
  449. ((N >> R_shift) & 0xFF) - GN,
  450. ((W >> R_shift) & 0xFF) - GW,
  451. ((NW >> R_shift) & 0xFF) - GNW);
  452. B = G + epic_decode_component_pred(dc,
  453. ((N >> B_shift) & 0xFF) - GN,
  454. ((W >> B_shift) & 0xFF) - GW,
  455. ((NW >> B_shift) & 0xFF) - GNW);
  456. } else {
  457. if (x)
  458. pred = curr_row[x - 1];
  459. else
  460. pred = above_row[x];
  461. delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
  462. R = ((pred >> R_shift) & 0xFF) - TOSIGNED(delta);
  463. delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
  464. G = ((pred >> G_shift) & 0xFF) - TOSIGNED(delta);
  465. delta = ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung);
  466. B = ((pred >> B_shift) & 0xFF) - TOSIGNED(delta);
  467. }
  468. if (R<0 || G<0 || B<0 || R > 255 || G > 255 || B > 255) {
  469. avpriv_request_sample(NULL, "RGB %d %d %d is out of range\n", R, G, B);
  470. return 0;
  471. }
  472. return (R << R_shift) | (G << G_shift) | (B << B_shift);
  473. }
  474. static int epic_predict_pixel(ePICContext *dc, uint8_t *rung,
  475. uint32_t *pPix, uint32_t pix)
  476. {
  477. if (!ff_els_decode_bit(&dc->els_ctx, rung)) {
  478. *pPix = pix;
  479. return 1;
  480. }
  481. dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = pix;
  482. return 0;
  483. }
  484. static int epic_handle_edges(ePICContext *dc, int x, int y,
  485. const uint32_t *curr_row,
  486. const uint32_t *above_row, uint32_t *pPix)
  487. {
  488. uint32_t pix;
  489. if (!x && !y) { /* special case: top-left pixel */
  490. /* the top-left pixel is coded independently with 3 unsigned numbers */
  491. *pPix = (ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung) << R_shift) |
  492. (ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung) << G_shift) |
  493. (ff_els_decode_unsigned(&dc->els_ctx, &dc->unsigned_rung) << B_shift);
  494. return 1;
  495. }
  496. if (x) { /* predict from W first */
  497. pix = curr_row[x - 1];
  498. if (epic_predict_pixel(dc, &dc->W_flag_rung, pPix, pix))
  499. return 1;
  500. }
  501. if (y) { /* then try to predict from N */
  502. pix = above_row[x];
  503. if (!dc->stack_pos || dc->stack[0] != pix) {
  504. if (epic_predict_pixel(dc, &dc->N_flag_rung, pPix, pix))
  505. return 1;
  506. }
  507. }
  508. return 0;
  509. }
  510. static int epic_decode_run_length(ePICContext *dc, int x, int y, int tile_width,
  511. const uint32_t *curr_row,
  512. const uint32_t *above_row,
  513. const uint32_t *above2_row,
  514. uint32_t *pPix, int *pRun)
  515. {
  516. int idx, got_pixel = 0, WWneW, old_WWneW = 0;
  517. uint32_t W, WW, N, NN, NW, NE, NWW, NNW, NNE;
  518. *pRun = 0;
  519. LOAD_NEIGHBOURS(x);
  520. if (dc->next_run_pos == x) {
  521. /* can't reuse W for the new pixel in this case */
  522. WWneW = 1;
  523. } else {
  524. idx = (WW != W) << 7 |
  525. (NW != W) << 6 |
  526. (N != NE) << 5 |
  527. (NW != N) << 4 |
  528. (NWW != NW) << 3 |
  529. (NNE != NE) << 2 |
  530. (NN != N) << 1 |
  531. (NNW != NW);
  532. WWneW = ff_els_decode_bit(&dc->els_ctx, &dc->W_ctx_rung[idx]);
  533. if (WWneW < 0)
  534. return WWneW;
  535. }
  536. if (WWneW)
  537. dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = W;
  538. else {
  539. *pPix = W;
  540. got_pixel = 1;
  541. }
  542. do {
  543. int NWneW = 1;
  544. if (got_pixel) // pixel value already known (derived from either W or N)
  545. NWneW = *pPix != N;
  546. else { // pixel value is unknown and will be decoded later
  547. NWneW = *pRun ? NWneW : NW != W;
  548. /* TODO: RFC this mess! */
  549. switch (((NW != N) << 2) | (NWneW << 1) | WWneW) {
  550. case 0:
  551. break; // do nothing here
  552. case 3:
  553. case 5:
  554. case 6:
  555. case 7:
  556. if (!is_pixel_on_stack(dc, N)) {
  557. idx = WWneW << 8 |
  558. (*pRun ? old_WWneW : WW != W) << 7 |
  559. NWneW << 6 |
  560. (N != NE) << 5 |
  561. (NW != N) << 4 |
  562. (NWW != NW) << 3 |
  563. (NNE != NE) << 2 |
  564. (NN != N) << 1 |
  565. (NNW != NW);
  566. if (!ff_els_decode_bit(&dc->els_ctx, &dc->N_ctx_rung[idx])) {
  567. NWneW = 0;
  568. *pPix = N;
  569. got_pixel = 1;
  570. break;
  571. }
  572. }
  573. /* fall through */
  574. default:
  575. NWneW = 1;
  576. old_WWneW = WWneW;
  577. if (!is_pixel_on_stack(dc, N))
  578. dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = N;
  579. }
  580. }
  581. (*pRun)++;
  582. if (x + *pRun >= tile_width - 1)
  583. break;
  584. UPDATE_NEIGHBOURS(x + *pRun);
  585. if (!NWneW && NW == N && N == NE) {
  586. int pos, run, rle;
  587. int start_pos = x + *pRun;
  588. /* scan for a run of pix in the line above */
  589. uint32_t pix = above_row[start_pos + 1];
  590. for (pos = start_pos + 2; pos < tile_width; pos++)
  591. if (!(above_row[pos] == pix))
  592. break;
  593. run = pos - start_pos - 1;
  594. idx = av_ceil_log2(run);
  595. if (ff_els_decode_bit(&dc->els_ctx, &dc->prev_row_rung[idx]))
  596. *pRun += run;
  597. else {
  598. int flag;
  599. /* run-length is coded as plain binary number of idx - 1 bits */
  600. for (pos = idx - 1, rle = 0, flag = 0; pos >= 0; pos--) {
  601. if ((1 << pos) + rle < run &&
  602. ff_els_decode_bit(&dc->els_ctx,
  603. flag ? &dc->runlen_one
  604. : &dc->runlen_zeroes[pos])) {
  605. flag = 1;
  606. rle |= 1 << pos;
  607. }
  608. }
  609. *pRun += rle;
  610. break; // return immediately
  611. }
  612. if (x + *pRun >= tile_width - 1)
  613. break;
  614. LOAD_NEIGHBOURS(x + *pRun);
  615. WWneW = 0;
  616. NWneW = 0;
  617. }
  618. idx = WWneW << 7 |
  619. NWneW << 6 |
  620. (N != NE) << 5 |
  621. (NW != N) << 4 |
  622. (NWW != NW) << 3 |
  623. (NNE != NE) << 2 |
  624. (NN != N) << 1 |
  625. (NNW != NW);
  626. WWneW = ff_els_decode_bit(&dc->els_ctx, &dc->W_ctx_rung[idx]);
  627. } while (!WWneW);
  628. dc->next_run_pos = x + *pRun;
  629. return got_pixel;
  630. }
  631. static int epic_predict_pixel2(ePICContext *dc, uint8_t *rung,
  632. uint32_t *pPix, uint32_t pix)
  633. {
  634. if (ff_els_decode_bit(&dc->els_ctx, rung)) {
  635. *pPix = pix;
  636. return 1;
  637. }
  638. dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = pix;
  639. return 0;
  640. }
  641. static int epic_predict_from_NW_NE(ePICContext *dc, int x, int y, int run,
  642. int tile_width, const uint32_t *curr_row,
  643. const uint32_t *above_row, uint32_t *pPix)
  644. {
  645. int pos;
  646. /* try to reuse the NW pixel first */
  647. if (x && y) {
  648. uint32_t NW = above_row[x - 1];
  649. if (NW != curr_row[x - 1] && NW != above_row[x] && !is_pixel_on_stack(dc, NW)) {
  650. if (epic_predict_pixel2(dc, &dc->nw_pred_rung[NW & 0xFF], pPix, NW))
  651. return 1;
  652. }
  653. }
  654. /* try to reuse the NE[x + run, y] pixel */
  655. pos = x + run - 1;
  656. if (pos < tile_width - 1 && y) {
  657. uint32_t NE = above_row[pos + 1];
  658. if (NE != above_row[pos] && !is_pixel_on_stack(dc, NE)) {
  659. if (epic_predict_pixel2(dc, &dc->ne_pred_rung[NE & 0xFF], pPix, NE))
  660. return 1;
  661. }
  662. }
  663. return 0;
  664. }
  665. static int epic_decode_from_cache(ePICContext *dc, uint32_t W, uint32_t *pPix)
  666. {
  667. ePICPixListElem *list, *prev = NULL;
  668. ePICPixHashElem *hash_elem = epic_hash_find(&dc->hash, W);
  669. if (!hash_elem || !hash_elem->list)
  670. return 0;
  671. list = hash_elem->list;
  672. while (list) {
  673. if (!is_pixel_on_stack(dc, list->pixel)) {
  674. if (ff_els_decode_bit(&dc->els_ctx, &list->rung)) {
  675. *pPix = list->pixel;
  676. if (list != hash_elem->list) {
  677. prev->next = list->next;
  678. list->next = hash_elem->list;
  679. hash_elem->list = list;
  680. }
  681. return 1;
  682. }
  683. dc->stack[dc->stack_pos++ & EPIC_PIX_STACK_MAX] = list->pixel;
  684. }
  685. prev = list;
  686. list = list->next;
  687. }
  688. return 0;
  689. }
  690. static int epic_decode_tile(ePICContext *dc, uint8_t *out, int tile_height,
  691. int tile_width, int stride)
  692. {
  693. int x, y;
  694. uint32_t pix;
  695. uint32_t *curr_row = NULL, *above_row = NULL, *above2_row;
  696. for (y = 0; y < tile_height; y++, out += stride) {
  697. above2_row = above_row;
  698. above_row = curr_row;
  699. curr_row = (uint32_t *) out;
  700. for (x = 0, dc->next_run_pos = 0; x < tile_width;) {
  701. if (dc->els_ctx.err)
  702. return AVERROR_INVALIDDATA; // bail out in the case of ELS overflow
  703. pix = curr_row[x - 1]; // get W pixel
  704. if (y >= 1 && x >= 2 &&
  705. pix != curr_row[x - 2] && pix != above_row[x - 1] &&
  706. pix != above_row[x - 2] && pix != above_row[x] &&
  707. !epic_cache_entries_for_pixel(&dc->hash, pix)) {
  708. curr_row[x] = epic_decode_pixel_pred(dc, x, y, curr_row, above_row);
  709. x++;
  710. } else {
  711. int got_pixel, run;
  712. dc->stack_pos = 0; // empty stack
  713. if (y < 2 || x < 2 || x == tile_width - 1) {
  714. run = 1;
  715. got_pixel = epic_handle_edges(dc, x, y, curr_row, above_row, &pix);
  716. } else {
  717. got_pixel = epic_decode_run_length(dc, x, y, tile_width,
  718. curr_row, above_row,
  719. above2_row, &pix, &run);
  720. if (got_pixel < 0)
  721. return got_pixel;
  722. }
  723. if (!got_pixel && !epic_predict_from_NW_NE(dc, x, y, run,
  724. tile_width, curr_row,
  725. above_row, &pix)) {
  726. uint32_t ref_pix = curr_row[x - 1];
  727. if (!x || !epic_decode_from_cache(dc, ref_pix, &pix)) {
  728. pix = epic_decode_pixel_pred(dc, x, y, curr_row, above_row);
  729. if (is_pixel_on_stack(dc, pix))
  730. return AVERROR_INVALIDDATA;
  731. if (x) {
  732. int ret = epic_add_pixel_to_cache(&dc->hash,
  733. ref_pix,
  734. pix);
  735. if (ret)
  736. return ret;
  737. }
  738. }
  739. }
  740. for (; run > 0; x++, run--)
  741. curr_row[x] = pix;
  742. }
  743. }
  744. }
  745. return 0;
  746. }
  747. static int epic_jb_decode_tile(G2MContext *c, int tile_x, int tile_y,
  748. const uint8_t *src, size_t src_size,
  749. AVCodecContext *avctx)
  750. {
  751. uint8_t prefix, mask = 0x80;
  752. int extrabytes, tile_width, tile_height, awidth, aheight;
  753. size_t els_dsize;
  754. uint8_t *dst;
  755. if (!src_size)
  756. return 0;
  757. /* get data size of the ELS partition as unsigned variable-length integer */
  758. prefix = *src++;
  759. src_size--;
  760. for (extrabytes = 0; (prefix & mask) && (extrabytes < 7); extrabytes++)
  761. mask >>= 1;
  762. if (extrabytes > 3 || src_size < extrabytes) {
  763. av_log(avctx, AV_LOG_ERROR, "ePIC: invalid data size VLI\n");
  764. return AVERROR_INVALIDDATA;
  765. }
  766. els_dsize = prefix & ((0x80 >> extrabytes) - 1); // mask out the length prefix
  767. while (extrabytes-- > 0) {
  768. els_dsize = (els_dsize << 8) | *src++;
  769. src_size--;
  770. }
  771. if (src_size < els_dsize) {
  772. av_log(avctx, AV_LOG_ERROR, "ePIC: data too short, needed %"SIZE_SPECIFIER", got %"SIZE_SPECIFIER"\n",
  773. els_dsize, src_size);
  774. return AVERROR_INVALIDDATA;
  775. }
  776. tile_width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
  777. tile_height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
  778. awidth = FFALIGN(tile_width, 16);
  779. aheight = FFALIGN(tile_height, 16);
  780. if (els_dsize) {
  781. int ret, i, j, k;
  782. uint8_t tr_r, tr_g, tr_b, *buf;
  783. uint32_t *in;
  784. /* ELS decoder initializations */
  785. memset(&c->ec, 0, sizeof(c->ec));
  786. ff_els_decoder_init(&c->ec.els_ctx, src, els_dsize);
  787. epic_hash_init(&c->ec.hash);
  788. /* decode transparent pixel value */
  789. tr_r = ff_els_decode_unsigned(&c->ec.els_ctx, &c->ec.unsigned_rung);
  790. tr_g = ff_els_decode_unsigned(&c->ec.els_ctx, &c->ec.unsigned_rung);
  791. tr_b = ff_els_decode_unsigned(&c->ec.els_ctx, &c->ec.unsigned_rung);
  792. if (c->ec.els_ctx.err != 0) {
  793. av_log(avctx, AV_LOG_ERROR,
  794. "ePIC: couldn't decode transparency pixel!\n");
  795. ff_els_decoder_uninit(&c->ec.unsigned_rung);
  796. return AVERROR_INVALIDDATA;
  797. }
  798. ret = epic_decode_tile(&c->ec, c->epic_buf, tile_height, tile_width,
  799. c->epic_buf_stride);
  800. epic_free_pixel_cache(&c->ec.hash);
  801. ff_els_decoder_uninit(&c->ec.unsigned_rung);
  802. if (ret) {
  803. av_log(avctx, AV_LOG_ERROR,
  804. "ePIC: tile decoding failed, frame=%d, tile_x=%d, tile_y=%d\n",
  805. avctx->frame_number, tile_x, tile_y);
  806. return AVERROR_INVALIDDATA;
  807. }
  808. buf = c->epic_buf;
  809. dst = c->framebuf + tile_x * c->tile_width * 3 +
  810. tile_y * c->tile_height * c->framebuf_stride;
  811. for (j = 0; j < tile_height; j++) {
  812. uint8_t *out = dst;
  813. in = (uint32_t *) buf;
  814. for (i = 0; i < tile_width; i++) {
  815. out[0] = (in[i] >> R_shift) & 0xFF;
  816. out[1] = (in[i] >> G_shift) & 0xFF;
  817. out[2] = (in[i] >> B_shift) & 0xFF;
  818. out += 3;
  819. }
  820. buf += c->epic_buf_stride;
  821. dst += c->framebuf_stride;
  822. }
  823. if (src_size > els_dsize) {
  824. uint8_t *jpg;
  825. uint32_t tr;
  826. int bstride = FFALIGN(tile_width, 16) >> 3;
  827. int nblocks = 0;
  828. int estride = c->epic_buf_stride >> 2;
  829. src += els_dsize;
  830. src_size -= els_dsize;
  831. in = (uint32_t *) c->epic_buf;
  832. tr = (tr_r << R_shift) | (tr_g << G_shift) | (tr_b << B_shift);
  833. memset(c->kempf_flags, 0,
  834. (aheight >> 3) * bstride * sizeof(*c->kempf_flags));
  835. for (j = 0; j < tile_height; j += 8) {
  836. for (i = 0; i < tile_width; i += 8) {
  837. c->kempf_flags[(i >> 3) + (j >> 3) * bstride] = 0;
  838. for (k = 0; k < 8 * 8; k++) {
  839. if (in[i + (k & 7) + (k >> 3) * estride] == tr) {
  840. c->kempf_flags[(i >> 3) + (j >> 3) * bstride] = 1;
  841. nblocks++;
  842. break;
  843. }
  844. }
  845. }
  846. in += 8 * estride;
  847. }
  848. memset(c->jpeg_tile, 0, c->tile_stride * aheight);
  849. jpg_decode_data(&c->jc, awidth, aheight, src, src_size,
  850. c->jpeg_tile, c->tile_stride,
  851. c->kempf_flags, bstride, nblocks, c->swapuv);
  852. in = (uint32_t *) c->epic_buf;
  853. dst = c->framebuf + tile_x * c->tile_width * 3 +
  854. tile_y * c->tile_height * c->framebuf_stride;
  855. jpg = c->jpeg_tile;
  856. for (j = 0; j < tile_height; j++) {
  857. for (i = 0; i < tile_width; i++)
  858. if (in[i] == tr)
  859. memcpy(dst + i * 3, jpg + i * 3, 3);
  860. in += c->epic_buf_stride >> 2;
  861. dst += c->framebuf_stride;
  862. jpg += c->tile_stride;
  863. }
  864. }
  865. } else {
  866. dst = c->framebuf + tile_x * c->tile_width * 3 +
  867. tile_y * c->tile_height * c->framebuf_stride;
  868. return jpg_decode_data(&c->jc, tile_width, tile_height, src, src_size,
  869. dst, c->framebuf_stride, NULL, 0, 0, c->swapuv);
  870. }
  871. return 0;
  872. }
  873. static int kempf_restore_buf(const uint8_t *src, int len,
  874. uint8_t *dst, int stride,
  875. const uint8_t *jpeg_tile, int tile_stride,
  876. int width, int height,
  877. const uint8_t *pal, int npal, int tidx)
  878. {
  879. GetBitContext gb;
  880. int i, j, nb, col;
  881. int ret;
  882. int align_width = FFALIGN(width, 16);
  883. if ((ret = init_get_bits8(&gb, src, len)) < 0)
  884. return ret;
  885. if (npal <= 2) nb = 1;
  886. else if (npal <= 4) nb = 2;
  887. else if (npal <= 16) nb = 4;
  888. else nb = 8;
  889. for (j = 0; j < height; j++, dst += stride, jpeg_tile += tile_stride) {
  890. if (get_bits(&gb, 8))
  891. continue;
  892. for (i = 0; i < width; i++) {
  893. col = get_bits(&gb, nb);
  894. if (col != tidx)
  895. memcpy(dst + i * 3, pal + col * 3, 3);
  896. else
  897. memcpy(dst + i * 3, jpeg_tile + i * 3, 3);
  898. }
  899. skip_bits_long(&gb, nb * (align_width - width));
  900. }
  901. return 0;
  902. }
  903. static int kempf_decode_tile(G2MContext *c, int tile_x, int tile_y,
  904. const uint8_t *src, int src_size)
  905. {
  906. int width, height;
  907. int hdr, zsize, npal, tidx = -1, ret;
  908. int i, j;
  909. const uint8_t *src_end = src + src_size;
  910. uint8_t pal[768], transp[3];
  911. uLongf dlen = (c->tile_width + 1) * c->tile_height;
  912. int sub_type;
  913. int nblocks, cblocks, bstride;
  914. int bits, bitbuf, coded;
  915. uint8_t *dst = c->framebuf + tile_x * c->tile_width * 3 +
  916. tile_y * c->tile_height * c->framebuf_stride;
  917. if (src_size < 2)
  918. return AVERROR_INVALIDDATA;
  919. width = FFMIN(c->width - tile_x * c->tile_width, c->tile_width);
  920. height = FFMIN(c->height - tile_y * c->tile_height, c->tile_height);
  921. hdr = *src++;
  922. sub_type = hdr >> 5;
  923. if (sub_type == 0) {
  924. int j;
  925. memcpy(transp, src, 3);
  926. src += 3;
  927. for (j = 0; j < height; j++, dst += c->framebuf_stride)
  928. for (i = 0; i < width; i++)
  929. memcpy(dst + i * 3, transp, 3);
  930. return 0;
  931. } else if (sub_type == 1) {
  932. return jpg_decode_data(&c->jc, width, height, src, src_end - src,
  933. dst, c->framebuf_stride, NULL, 0, 0, 0);
  934. }
  935. if (sub_type != 2) {
  936. memcpy(transp, src, 3);
  937. src += 3;
  938. }
  939. npal = *src++ + 1;
  940. if (src_end - src < npal * 3)
  941. return AVERROR_INVALIDDATA;
  942. memcpy(pal, src, npal * 3);
  943. src += npal * 3;
  944. if (sub_type != 2) {
  945. for (i = 0; i < npal; i++) {
  946. if (!memcmp(pal + i * 3, transp, 3)) {
  947. tidx = i;
  948. break;
  949. }
  950. }
  951. }
  952. if (src_end - src < 2)
  953. return 0;
  954. zsize = (src[0] << 8) | src[1];
  955. src += 2;
  956. if (src_end - src < zsize + (sub_type != 2))
  957. return AVERROR_INVALIDDATA;
  958. ret = uncompress(c->kempf_buf, &dlen, src, zsize);
  959. if (ret)
  960. return AVERROR_INVALIDDATA;
  961. src += zsize;
  962. if (sub_type == 2) {
  963. kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
  964. NULL, 0, width, height, pal, npal, tidx);
  965. return 0;
  966. }
  967. nblocks = *src++ + 1;
  968. cblocks = 0;
  969. bstride = FFALIGN(width, 16) >> 3;
  970. // blocks are coded LSB and we need normal bitreader for JPEG data
  971. bits = 0;
  972. for (i = 0; i < (FFALIGN(height, 16) >> 4); i++) {
  973. for (j = 0; j < (FFALIGN(width, 16) >> 4); j++) {
  974. if (!bits) {
  975. if (src >= src_end)
  976. return AVERROR_INVALIDDATA;
  977. bitbuf = *src++;
  978. bits = 8;
  979. }
  980. coded = bitbuf & 1;
  981. bits--;
  982. bitbuf >>= 1;
  983. cblocks += coded;
  984. if (cblocks > nblocks)
  985. return AVERROR_INVALIDDATA;
  986. c->kempf_flags[j * 2 + i * 2 * bstride] =
  987. c->kempf_flags[j * 2 + 1 + i * 2 * bstride] =
  988. c->kempf_flags[j * 2 + (i * 2 + 1) * bstride] =
  989. c->kempf_flags[j * 2 + 1 + (i * 2 + 1) * bstride] = coded;
  990. }
  991. }
  992. memset(c->jpeg_tile, 0, c->tile_stride * height);
  993. jpg_decode_data(&c->jc, width, height, src, src_end - src,
  994. c->jpeg_tile, c->tile_stride,
  995. c->kempf_flags, bstride, nblocks * 4, 0);
  996. kempf_restore_buf(c->kempf_buf, dlen, dst, c->framebuf_stride,
  997. c->jpeg_tile, c->tile_stride,
  998. width, height, pal, npal, tidx);
  999. return 0;
  1000. }
  1001. static int g2m_init_buffers(G2MContext *c)
  1002. {
  1003. int aligned_height;
  1004. if (!c->framebuf || c->old_width < c->width || c->old_height < c->height) {
  1005. c->framebuf_stride = FFALIGN(c->width + 15, 16) * 3;
  1006. aligned_height = c->height + 15;
  1007. av_free(c->framebuf);
  1008. c->framebuf = av_mallocz_array(c->framebuf_stride, aligned_height);
  1009. if (!c->framebuf)
  1010. return AVERROR(ENOMEM);
  1011. }
  1012. if (!c->synth_tile || !c->jpeg_tile ||
  1013. (c->compression == 2 && !c->epic_buf_base) ||
  1014. c->old_tile_w < c->tile_width ||
  1015. c->old_tile_h < c->tile_height) {
  1016. c->tile_stride = FFALIGN(c->tile_width, 16) * 3;
  1017. c->epic_buf_stride = FFALIGN(c->tile_width * 4, 16);
  1018. aligned_height = FFALIGN(c->tile_height, 16);
  1019. av_freep(&c->synth_tile);
  1020. av_freep(&c->jpeg_tile);
  1021. av_freep(&c->kempf_buf);
  1022. av_freep(&c->kempf_flags);
  1023. av_freep(&c->epic_buf_base);
  1024. c->epic_buf = NULL;
  1025. c->synth_tile = av_mallocz(c->tile_stride * aligned_height);
  1026. c->jpeg_tile = av_mallocz(c->tile_stride * aligned_height);
  1027. c->kempf_buf = av_mallocz((c->tile_width + 1) * aligned_height +
  1028. AV_INPUT_BUFFER_PADDING_SIZE);
  1029. c->kempf_flags = av_mallocz(c->tile_width * aligned_height);
  1030. if (!c->synth_tile || !c->jpeg_tile ||
  1031. !c->kempf_buf || !c->kempf_flags)
  1032. return AVERROR(ENOMEM);
  1033. if (c->compression == 2) {
  1034. c->epic_buf_base = av_mallocz(c->epic_buf_stride * aligned_height + 4);
  1035. if (!c->epic_buf_base)
  1036. return AVERROR(ENOMEM);
  1037. c->epic_buf = c->epic_buf_base + 4;
  1038. }
  1039. }
  1040. return 0;
  1041. }
  1042. static int g2m_load_cursor(AVCodecContext *avctx, G2MContext *c,
  1043. GetByteContext *gb)
  1044. {
  1045. int i, j, k;
  1046. uint8_t *dst;
  1047. uint32_t bits;
  1048. uint32_t cur_size, cursor_w, cursor_h, cursor_stride;
  1049. uint32_t cursor_hot_x, cursor_hot_y;
  1050. int cursor_fmt, err;
  1051. cur_size = bytestream2_get_be32(gb);
  1052. cursor_w = bytestream2_get_byte(gb);
  1053. cursor_h = bytestream2_get_byte(gb);
  1054. cursor_hot_x = bytestream2_get_byte(gb);
  1055. cursor_hot_y = bytestream2_get_byte(gb);
  1056. cursor_fmt = bytestream2_get_byte(gb);
  1057. cursor_stride = FFALIGN(cursor_w, cursor_fmt==1 ? 32 : 1) * 4;
  1058. if (cursor_w < 1 || cursor_w > 256 ||
  1059. cursor_h < 1 || cursor_h > 256) {
  1060. av_log(avctx, AV_LOG_ERROR, "Invalid cursor dimensions %"PRIu32"x%"PRIu32"\n",
  1061. cursor_w, cursor_h);
  1062. return AVERROR_INVALIDDATA;
  1063. }
  1064. if (cursor_hot_x > cursor_w || cursor_hot_y > cursor_h) {
  1065. av_log(avctx, AV_LOG_WARNING, "Invalid hotspot position %"PRIu32",%"PRIu32"\n",
  1066. cursor_hot_x, cursor_hot_y);
  1067. cursor_hot_x = FFMIN(cursor_hot_x, cursor_w - 1);
  1068. cursor_hot_y = FFMIN(cursor_hot_y, cursor_h - 1);
  1069. }
  1070. if (cur_size - 9 > bytestream2_get_bytes_left(gb) ||
  1071. c->cursor_w * c->cursor_h / 4 > cur_size) {
  1072. av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"/%u\n",
  1073. cur_size, bytestream2_get_bytes_left(gb));
  1074. return AVERROR_INVALIDDATA;
  1075. }
  1076. if (cursor_fmt != 1 && cursor_fmt != 32) {
  1077. avpriv_report_missing_feature(avctx, "Cursor format %d",
  1078. cursor_fmt);
  1079. return AVERROR_PATCHWELCOME;
  1080. }
  1081. if ((err = av_reallocp(&c->cursor, cursor_stride * cursor_h)) < 0) {
  1082. av_log(avctx, AV_LOG_ERROR, "Cannot allocate cursor buffer\n");
  1083. return err;
  1084. }
  1085. c->cursor_w = cursor_w;
  1086. c->cursor_h = cursor_h;
  1087. c->cursor_hot_x = cursor_hot_x;
  1088. c->cursor_hot_y = cursor_hot_y;
  1089. c->cursor_fmt = cursor_fmt;
  1090. c->cursor_stride = cursor_stride;
  1091. dst = c->cursor;
  1092. switch (c->cursor_fmt) {
  1093. case 1: // old monochrome
  1094. for (j = 0; j < c->cursor_h; j++) {
  1095. for (i = 0; i < c->cursor_w; i += 32) {
  1096. bits = bytestream2_get_be32(gb);
  1097. for (k = 0; k < 32; k++) {
  1098. dst[0] = !!(bits & 0x80000000);
  1099. dst += 4;
  1100. bits <<= 1;
  1101. }
  1102. }
  1103. }
  1104. dst = c->cursor;
  1105. for (j = 0; j < c->cursor_h; j++) {
  1106. for (i = 0; i < c->cursor_w; i += 32) {
  1107. bits = bytestream2_get_be32(gb);
  1108. for (k = 0; k < 32; k++) {
  1109. int mask_bit = !!(bits & 0x80000000);
  1110. switch (dst[0] * 2 + mask_bit) {
  1111. case 0:
  1112. dst[0] = 0xFF;
  1113. dst[1] = 0x00;
  1114. dst[2] = 0x00;
  1115. dst[3] = 0x00;
  1116. break;
  1117. case 1:
  1118. dst[0] = 0xFF;
  1119. dst[1] = 0xFF;
  1120. dst[2] = 0xFF;
  1121. dst[3] = 0xFF;
  1122. break;
  1123. default:
  1124. dst[0] = 0x00;
  1125. dst[1] = 0x00;
  1126. dst[2] = 0x00;
  1127. dst[3] = 0x00;
  1128. }
  1129. dst += 4;
  1130. bits <<= 1;
  1131. }
  1132. }
  1133. }
  1134. break;
  1135. case 32: // full colour
  1136. /* skip monochrome version of the cursor and decode RGBA instead */
  1137. bytestream2_skip(gb, c->cursor_h * (FFALIGN(c->cursor_w, 32) >> 3));
  1138. for (j = 0; j < c->cursor_h; j++) {
  1139. for (i = 0; i < c->cursor_w; i++) {
  1140. int val = bytestream2_get_be32(gb);
  1141. *dst++ = val >> 0;
  1142. *dst++ = val >> 8;
  1143. *dst++ = val >> 16;
  1144. *dst++ = val >> 24;
  1145. }
  1146. }
  1147. break;
  1148. default:
  1149. return AVERROR_PATCHWELCOME;
  1150. }
  1151. return 0;
  1152. }
  1153. #define APPLY_ALPHA(src, new, alpha) \
  1154. src = (src * (256 - alpha) + new * alpha) >> 8
  1155. static void g2m_paint_cursor(G2MContext *c, uint8_t *dst, int stride)
  1156. {
  1157. int i, j;
  1158. int x, y, w, h;
  1159. const uint8_t *cursor;
  1160. if (!c->cursor)
  1161. return;
  1162. x = c->cursor_x - c->cursor_hot_x;
  1163. y = c->cursor_y - c->cursor_hot_y;
  1164. cursor = c->cursor;
  1165. w = c->cursor_w;
  1166. h = c->cursor_h;
  1167. if (x + w > c->width)
  1168. w = c->width - x;
  1169. if (y + h > c->height)
  1170. h = c->height - y;
  1171. if (x < 0) {
  1172. w += x;
  1173. cursor += -x * 4;
  1174. } else {
  1175. dst += x * 3;
  1176. }
  1177. if (y < 0)
  1178. h += y;
  1179. if (w < 0 || h < 0)
  1180. return;
  1181. if (y < 0) {
  1182. cursor += -y * c->cursor_stride;
  1183. } else {
  1184. dst += y * stride;
  1185. }
  1186. for (j = 0; j < h; j++) {
  1187. for (i = 0; i < w; i++) {
  1188. uint8_t alpha = cursor[i * 4];
  1189. APPLY_ALPHA(dst[i * 3 + 0], cursor[i * 4 + 1], alpha);
  1190. APPLY_ALPHA(dst[i * 3 + 1], cursor[i * 4 + 2], alpha);
  1191. APPLY_ALPHA(dst[i * 3 + 2], cursor[i * 4 + 3], alpha);
  1192. }
  1193. dst += stride;
  1194. cursor += c->cursor_stride;
  1195. }
  1196. }
  1197. static int g2m_decode_frame(AVCodecContext *avctx, void *data,
  1198. int *got_picture_ptr, AVPacket *avpkt)
  1199. {
  1200. const uint8_t *buf = avpkt->data;
  1201. int buf_size = avpkt->size;
  1202. G2MContext *c = avctx->priv_data;
  1203. AVFrame *pic = data;
  1204. GetByteContext bc, tbc;
  1205. int magic;
  1206. int got_header = 0;
  1207. uint32_t chunk_size, r_mask, g_mask, b_mask;
  1208. int chunk_type, chunk_start;
  1209. int i;
  1210. int ret;
  1211. if (buf_size < 12) {
  1212. av_log(avctx, AV_LOG_ERROR,
  1213. "Frame should have at least 12 bytes, got %d instead\n",
  1214. buf_size);
  1215. return AVERROR_INVALIDDATA;
  1216. }
  1217. bytestream2_init(&bc, buf, buf_size);
  1218. magic = bytestream2_get_be32(&bc);
  1219. if ((magic & ~0xF) != MKBETAG('G', '2', 'M', '0') ||
  1220. (magic & 0xF) < 2 || (magic & 0xF) > 5) {
  1221. av_log(avctx, AV_LOG_ERROR, "Wrong magic %08X\n", magic);
  1222. return AVERROR_INVALIDDATA;
  1223. }
  1224. c->swapuv = magic == MKBETAG('G', '2', 'M', '2');
  1225. while (bytestream2_get_bytes_left(&bc) > 5) {
  1226. chunk_size = bytestream2_get_le32(&bc) - 1;
  1227. chunk_type = bytestream2_get_byte(&bc);
  1228. chunk_start = bytestream2_tell(&bc);
  1229. if (chunk_size > bytestream2_get_bytes_left(&bc)) {
  1230. av_log(avctx, AV_LOG_ERROR, "Invalid chunk size %"PRIu32" type %02X\n",
  1231. chunk_size, chunk_type);
  1232. break;
  1233. }
  1234. switch (chunk_type) {
  1235. case DISPLAY_INFO:
  1236. got_header =
  1237. c->got_header = 0;
  1238. if (chunk_size < 21) {
  1239. av_log(avctx, AV_LOG_ERROR, "Invalid display info size %"PRIu32"\n",
  1240. chunk_size);
  1241. break;
  1242. }
  1243. c->width = bytestream2_get_be32(&bc);
  1244. c->height = bytestream2_get_be32(&bc);
  1245. if (c->width < 16 || c->height < 16) {
  1246. av_log(avctx, AV_LOG_ERROR,
  1247. "Invalid frame dimensions %dx%d\n",
  1248. c->width, c->height);
  1249. ret = AVERROR_INVALIDDATA;
  1250. goto header_fail;
  1251. }
  1252. if (c->width != avctx->width || c->height != avctx->height) {
  1253. ret = ff_set_dimensions(avctx, c->width, c->height);
  1254. if (ret < 0)
  1255. goto header_fail;
  1256. }
  1257. c->compression = bytestream2_get_be32(&bc);
  1258. if (c->compression != 2 && c->compression != 3) {
  1259. avpriv_report_missing_feature(avctx, "Compression method %d",
  1260. c->compression);
  1261. ret = AVERROR_PATCHWELCOME;
  1262. goto header_fail;
  1263. }
  1264. c->tile_width = bytestream2_get_be32(&bc);
  1265. c->tile_height = bytestream2_get_be32(&bc);
  1266. if (c->tile_width <= 0 || c->tile_height <= 0 ||
  1267. ((c->tile_width | c->tile_height) & 0xF) ||
  1268. c->tile_width * (uint64_t)c->tile_height >= INT_MAX / 4 ||
  1269. av_image_check_size2(c->tile_width, c->tile_height, avctx->max_pixels, avctx->pix_fmt, 0, avctx) < 0
  1270. ) {
  1271. av_log(avctx, AV_LOG_ERROR,
  1272. "Invalid tile dimensions %dx%d\n",
  1273. c->tile_width, c->tile_height);
  1274. ret = AVERROR_INVALIDDATA;
  1275. goto header_fail;
  1276. }
  1277. c->tiles_x = (c->width + c->tile_width - 1) / c->tile_width;
  1278. c->tiles_y = (c->height + c->tile_height - 1) / c->tile_height;
  1279. c->bpp = bytestream2_get_byte(&bc);
  1280. if (c->bpp == 32) {
  1281. if (bytestream2_get_bytes_left(&bc) < 16 ||
  1282. (chunk_size - 21) < 16) {
  1283. av_log(avctx, AV_LOG_ERROR,
  1284. "Display info: missing bitmasks!\n");
  1285. ret = AVERROR_INVALIDDATA;
  1286. goto header_fail;
  1287. }
  1288. r_mask = bytestream2_get_be32(&bc);
  1289. g_mask = bytestream2_get_be32(&bc);
  1290. b_mask = bytestream2_get_be32(&bc);
  1291. if (r_mask != 0xFF0000 || g_mask != 0xFF00 || b_mask != 0xFF) {
  1292. avpriv_report_missing_feature(avctx,
  1293. "Bitmasks: R=%"PRIX32", G=%"PRIX32", B=%"PRIX32,
  1294. r_mask, g_mask, b_mask);
  1295. ret = AVERROR_PATCHWELCOME;
  1296. goto header_fail;
  1297. }
  1298. } else {
  1299. avpriv_request_sample(avctx, "bpp=%d", c->bpp);
  1300. ret = AVERROR_PATCHWELCOME;
  1301. goto header_fail;
  1302. }
  1303. if (g2m_init_buffers(c)) {
  1304. ret = AVERROR(ENOMEM);
  1305. goto header_fail;
  1306. }
  1307. got_header = 1;
  1308. break;
  1309. case TILE_DATA:
  1310. if (!c->tiles_x || !c->tiles_y) {
  1311. av_log(avctx, AV_LOG_WARNING,
  1312. "No display info - skipping tile\n");
  1313. break;
  1314. }
  1315. if (chunk_size < 2) {
  1316. av_log(avctx, AV_LOG_ERROR, "Invalid tile data size %"PRIu32"\n",
  1317. chunk_size);
  1318. break;
  1319. }
  1320. c->tile_x = bytestream2_get_byte(&bc);
  1321. c->tile_y = bytestream2_get_byte(&bc);
  1322. if (c->tile_x >= c->tiles_x || c->tile_y >= c->tiles_y) {
  1323. av_log(avctx, AV_LOG_ERROR,
  1324. "Invalid tile pos %d,%d (in %dx%d grid)\n",
  1325. c->tile_x, c->tile_y, c->tiles_x, c->tiles_y);
  1326. break;
  1327. }
  1328. ret = 0;
  1329. switch (c->compression) {
  1330. case COMPR_EPIC_J_B:
  1331. ret = epic_jb_decode_tile(c, c->tile_x, c->tile_y,
  1332. buf + bytestream2_tell(&bc),
  1333. chunk_size - 2, avctx);
  1334. break;
  1335. case COMPR_KEMPF_J_B:
  1336. ret = kempf_decode_tile(c, c->tile_x, c->tile_y,
  1337. buf + bytestream2_tell(&bc),
  1338. chunk_size - 2);
  1339. break;
  1340. }
  1341. if (ret && c->framebuf)
  1342. av_log(avctx, AV_LOG_ERROR, "Error decoding tile %d,%d\n",
  1343. c->tile_x, c->tile_y);
  1344. break;
  1345. case CURSOR_POS:
  1346. if (chunk_size < 5) {
  1347. av_log(avctx, AV_LOG_ERROR, "Invalid cursor pos size %"PRIu32"\n",
  1348. chunk_size);
  1349. break;
  1350. }
  1351. c->cursor_x = bytestream2_get_be16(&bc);
  1352. c->cursor_y = bytestream2_get_be16(&bc);
  1353. break;
  1354. case CURSOR_SHAPE:
  1355. if (chunk_size < 8) {
  1356. av_log(avctx, AV_LOG_ERROR, "Invalid cursor data size %"PRIu32"\n",
  1357. chunk_size);
  1358. break;
  1359. }
  1360. bytestream2_init(&tbc, buf + bytestream2_tell(&bc),
  1361. chunk_size - 4);
  1362. g2m_load_cursor(avctx, c, &tbc);
  1363. break;
  1364. case CHUNK_CC:
  1365. case CHUNK_CD:
  1366. break;
  1367. default:
  1368. av_log(avctx, AV_LOG_WARNING, "Skipping chunk type %02d\n",
  1369. chunk_type);
  1370. }
  1371. /* navigate to next chunk */
  1372. bytestream2_skip(&bc, chunk_start + chunk_size - bytestream2_tell(&bc));
  1373. }
  1374. if (got_header)
  1375. c->got_header = 1;
  1376. if (c->width && c->height && c->framebuf) {
  1377. if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
  1378. return ret;
  1379. pic->key_frame = got_header;
  1380. pic->pict_type = got_header ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
  1381. for (i = 0; i < avctx->height; i++)
  1382. memcpy(pic->data[0] + i * pic->linesize[0],
  1383. c->framebuf + i * c->framebuf_stride,
  1384. c->width * 3);
  1385. g2m_paint_cursor(c, pic->data[0], pic->linesize[0]);
  1386. *got_picture_ptr = 1;
  1387. }
  1388. return buf_size;
  1389. header_fail:
  1390. c->width =
  1391. c->height = 0;
  1392. c->tiles_x =
  1393. c->tiles_y = 0;
  1394. c->tile_width =
  1395. c->tile_height = 0;
  1396. return ret;
  1397. }
  1398. static av_cold int g2m_decode_init(AVCodecContext *avctx)
  1399. {
  1400. G2MContext *const c = avctx->priv_data;
  1401. int ret;
  1402. if ((ret = jpg_init(avctx, &c->jc)) != 0) {
  1403. av_log(avctx, AV_LOG_ERROR, "Cannot initialise VLCs\n");
  1404. jpg_free_context(&c->jc);
  1405. return AVERROR(ENOMEM);
  1406. }
  1407. avctx->pix_fmt = AV_PIX_FMT_RGB24;
  1408. // store original sizes and check against those if resize happens
  1409. c->orig_width = avctx->width;
  1410. c->orig_height = avctx->height;
  1411. return 0;
  1412. }
  1413. static av_cold int g2m_decode_end(AVCodecContext *avctx)
  1414. {
  1415. G2MContext *const c = avctx->priv_data;
  1416. jpg_free_context(&c->jc);
  1417. av_freep(&c->epic_buf_base);
  1418. c->epic_buf = NULL;
  1419. av_freep(&c->kempf_buf);
  1420. av_freep(&c->kempf_flags);
  1421. av_freep(&c->synth_tile);
  1422. av_freep(&c->jpeg_tile);
  1423. av_freep(&c->cursor);
  1424. av_freep(&c->framebuf);
  1425. return 0;
  1426. }
  1427. AVCodec ff_g2m_decoder = {
  1428. .name = "g2m",
  1429. .long_name = NULL_IF_CONFIG_SMALL("Go2Meeting"),
  1430. .type = AVMEDIA_TYPE_VIDEO,
  1431. .id = AV_CODEC_ID_G2M,
  1432. .priv_data_size = sizeof(G2MContext),
  1433. .init = g2m_decode_init,
  1434. .close = g2m_decode_end,
  1435. .decode = g2m_decode_frame,
  1436. .capabilities = AV_CODEC_CAP_DR1,
  1437. .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
  1438. };