You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

829 lines
23KB

  1. /*
  2. * Wing Commander/Xan Video Decoder
  3. * Copyright (C) 2003 the ffmpeg project
  4. *
  5. * This library is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU Lesser General Public
  7. * License as published by the Free Software Foundation; either
  8. * version 2 of the License, or (at your option) any later version.
  9. *
  10. * This library is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  13. * Lesser General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU Lesser General Public
  16. * License along with this library; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. */
  20. /**
  21. * @file xan.c
  22. * Xan video decoder for Wing Commander III & IV computer games
  23. * by Mario Brito (mbrito@student.dei.uc.pt)
  24. * and Mike Melanson (melanson@pcisys.net)
  25. *
  26. * The xan_wc3 decoder outputs the following colorspaces natively:
  27. * PAL8 (default), RGB555, RGB565, RGB24, BGR24, RGBA32, YUV444P
  28. */
  29. #include <stdio.h>
  30. #include <stdlib.h>
  31. #include <string.h>
  32. #include <unistd.h>
  33. #include "common.h"
  34. #include "avcodec.h"
  35. #include "dsputil.h"
  36. #define PALETTE_COUNT 256
  37. #define PALETTE_CONTROL_SIZE ((256 * 3) + 1)
  38. typedef struct XanContext {
  39. AVCodecContext *avctx;
  40. DSPContext dsp;
  41. AVFrame last_frame;
  42. AVFrame current_frame;
  43. unsigned char *buf;
  44. int size;
  45. unsigned char palette[PALETTE_COUNT * 4];
  46. /* scratch space */
  47. unsigned char *buffer1;
  48. unsigned char *buffer2;
  49. } XanContext;
  50. #define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1])
  51. #define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0])
  52. #define LE_32(x) ((((uint8_t*)(x))[3] << 24) | \
  53. (((uint8_t*)(x))[2] << 16) | \
  54. (((uint8_t*)(x))[1] << 8) | \
  55. ((uint8_t*)(x))[0])
  56. /* RGB -> YUV conversion stuff */
  57. #define SCALEFACTOR 65536
  58. #define CENTERSAMPLE 128
  59. #define COMPUTE_Y(r, g, b) \
  60. (unsigned char) \
  61. ((y_r_table[r] + y_g_table[g] + y_b_table[b]) / SCALEFACTOR)
  62. #define COMPUTE_U(r, g, b) \
  63. (unsigned char) \
  64. ((u_r_table[r] + u_g_table[g] + u_b_table[b]) / SCALEFACTOR + CENTERSAMPLE)
  65. #define COMPUTE_V(r, g, b) \
  66. (unsigned char) \
  67. ((v_r_table[r] + v_g_table[g] + v_b_table[b]) / SCALEFACTOR + CENTERSAMPLE)
  68. #define Y_R (SCALEFACTOR * 0.29900)
  69. #define Y_G (SCALEFACTOR * 0.58700)
  70. #define Y_B (SCALEFACTOR * 0.11400)
  71. #define U_R (SCALEFACTOR * -0.16874)
  72. #define U_G (SCALEFACTOR * -0.33126)
  73. #define U_B (SCALEFACTOR * 0.50000)
  74. #define V_R (SCALEFACTOR * 0.50000)
  75. #define V_G (SCALEFACTOR * -0.41869)
  76. #define V_B (SCALEFACTOR * -0.08131)
  77. /*
  78. * Precalculate all of the YUV tables since it requires fewer than
  79. * 10 kilobytes to store them.
  80. */
  81. static int y_r_table[256];
  82. static int y_g_table[256];
  83. static int y_b_table[256];
  84. static int u_r_table[256];
  85. static int u_g_table[256];
  86. static int u_b_table[256];
  87. static int v_r_table[256];
  88. static int v_g_table[256];
  89. static int v_b_table[256];
  90. static int xan_decode_init(AVCodecContext *avctx)
  91. {
  92. XanContext *s = avctx->priv_data;
  93. int i;
  94. s->avctx = avctx;
  95. if ((avctx->codec->id == CODEC_ID_XAN_WC3) &&
  96. (s->avctx->palctrl == NULL)) {
  97. av_log(avctx, AV_LOG_ERROR, " WC3 Xan video: palette expected.\n");
  98. return -1;
  99. }
  100. avctx->pix_fmt = PIX_FMT_PAL8;
  101. avctx->has_b_frames = 0;
  102. dsputil_init(&s->dsp, avctx);
  103. /* initialize the RGB -> YUV tables */
  104. for (i = 0; i < 256; i++) {
  105. y_r_table[i] = Y_R * i;
  106. y_g_table[i] = Y_G * i;
  107. y_b_table[i] = Y_B * i;
  108. u_r_table[i] = U_R * i;
  109. u_g_table[i] = U_G * i;
  110. u_b_table[i] = U_B * i;
  111. v_r_table[i] = V_R * i;
  112. v_g_table[i] = V_G * i;
  113. v_b_table[i] = V_B * i;
  114. }
  115. s->buffer1 = av_malloc(avctx->width * avctx->height);
  116. s->buffer2 = av_malloc(avctx->width * avctx->height);
  117. if (!s->buffer1 || !s->buffer2)
  118. return -1;
  119. return 0;
  120. }
  121. /* This function is used in lieu of memcpy(). This decoder can not use
  122. * memcpy because the memory locations often overlap and
  123. * memcpy doesn't like that; it's not uncommon, for example, for
  124. * dest = src+1, to turn byte A into pattern AAAAAAAA.
  125. * This was originally repz movsb in Intel x86 ASM. */
  126. static inline void bytecopy(unsigned char *dest, unsigned char *src, int count)
  127. {
  128. int i;
  129. for (i = 0; i < count; i++)
  130. dest[i] = src[i];
  131. }
  132. static int xan_huffman_decode(unsigned char *dest, unsigned char *src)
  133. {
  134. unsigned char byte = *src++;
  135. unsigned char ival = byte + 0x16;
  136. unsigned char * ptr = src + byte*2;
  137. unsigned char val = ival;
  138. int counter = 0;
  139. unsigned char bits = *ptr++;
  140. while ( val != 0x16 ) {
  141. if ( (1 << counter) & bits )
  142. val = src[byte + val - 0x17];
  143. else
  144. val = src[val - 0x17];
  145. if ( val < 0x16 ) {
  146. *dest++ = val;
  147. val = ival;
  148. }
  149. if (counter++ == 7) {
  150. counter = 0;
  151. bits = *ptr++;
  152. }
  153. }
  154. return 0;
  155. }
  156. static void xan_unpack(unsigned char *dest, unsigned char *src)
  157. {
  158. unsigned char opcode;
  159. int size;
  160. int offset;
  161. int byte1, byte2, byte3;
  162. for (;;) {
  163. opcode = *src++;
  164. if ( (opcode & 0x80) == 0 ) {
  165. offset = *src++;
  166. size = opcode & 3;
  167. bytecopy(dest, src, size); dest += size; src += size;
  168. size = ((opcode & 0x1c) >> 2) + 3;
  169. bytecopy (dest, dest - (((opcode & 0x60) << 3) + offset + 1), size);
  170. dest += size;
  171. } else if ( (opcode & 0x40) == 0 ) {
  172. byte1 = *src++;
  173. byte2 = *src++;
  174. size = byte1 >> 6;
  175. bytecopy (dest, src, size); dest += size; src += size;
  176. size = (opcode & 0x3f) + 4;
  177. bytecopy (dest, dest - (((byte1 & 0x3f) << 8) + byte2 + 1), size);
  178. dest += size;
  179. } else if ( (opcode & 0x20) == 0 ) {
  180. byte1 = *src++;
  181. byte2 = *src++;
  182. byte3 = *src++;
  183. size = opcode & 3;
  184. bytecopy (dest, src, size); dest += size; src += size;
  185. size = byte3 + 5 + ((opcode & 0xc) << 6);
  186. bytecopy (dest,
  187. dest - ((((opcode & 0x10) >> 4) << 0x10) + 1 + (byte1 << 8) + byte2),
  188. size);
  189. dest += size;
  190. } else {
  191. size = ((opcode & 0x1f) << 2) + 4;
  192. if (size > 0x70)
  193. break;
  194. bytecopy (dest, src, size); dest += size; src += size;
  195. }
  196. }
  197. size = opcode & 3;
  198. bytecopy(dest, src, size); dest += size; src += size;
  199. }
  200. static void inline xan_wc3_build_palette(XanContext *s,
  201. unsigned int *palette_data)
  202. {
  203. int i;
  204. unsigned char r, g, b;
  205. unsigned short *palette16;
  206. unsigned int *palette32;
  207. unsigned int pal_elem;
  208. /* transform the palette passed through the palette control structure
  209. * into the necessary internal format depending on colorspace */
  210. switch (s->avctx->pix_fmt) {
  211. case PIX_FMT_RGB555:
  212. palette16 = (unsigned short *)s->palette;
  213. for (i = 0; i < PALETTE_COUNT; i++) {
  214. pal_elem = palette_data[i];
  215. r = (pal_elem >> 16) & 0xff;
  216. g = (pal_elem >> 8) & 0xff;
  217. b = pal_elem & 0xff;
  218. palette16[i] =
  219. ((r >> 3) << 10) |
  220. ((g >> 3) << 5) |
  221. ((b >> 3) << 0);
  222. }
  223. break;
  224. case PIX_FMT_RGB565:
  225. palette16 = (unsigned short *)s->palette;
  226. for (i = 0; i < PALETTE_COUNT; i++) {
  227. pal_elem = palette_data[i];
  228. r = (pal_elem >> 16) & 0xff;
  229. g = (pal_elem >> 8) & 0xff;
  230. b = pal_elem & 0xff;
  231. palette16[i] =
  232. ((r >> 3) << 11) |
  233. ((g >> 2) << 5) |
  234. ((b >> 3) << 0);
  235. }
  236. break;
  237. case PIX_FMT_RGB24:
  238. for (i = 0; i < PALETTE_COUNT; i++) {
  239. pal_elem = palette_data[i];
  240. r = (pal_elem >> 16) & 0xff;
  241. g = (pal_elem >> 8) & 0xff;
  242. b = pal_elem & 0xff;
  243. s->palette[i * 4 + 0] = r;
  244. s->palette[i * 4 + 1] = g;
  245. s->palette[i * 4 + 2] = b;
  246. }
  247. break;
  248. case PIX_FMT_BGR24:
  249. for (i = 0; i < PALETTE_COUNT; i++) {
  250. pal_elem = palette_data[i];
  251. r = (pal_elem >> 16) & 0xff;
  252. g = (pal_elem >> 8) & 0xff;
  253. b = pal_elem & 0xff;
  254. s->palette[i * 4 + 0] = b;
  255. s->palette[i * 4 + 1] = g;
  256. s->palette[i * 4 + 2] = r;
  257. }
  258. break;
  259. case PIX_FMT_PAL8:
  260. case PIX_FMT_RGBA32:
  261. palette32 = (unsigned int *)s->palette;
  262. memcpy (palette32, palette_data, PALETTE_COUNT * sizeof(unsigned int));
  263. break;
  264. case PIX_FMT_YUV444P:
  265. for (i = 0; i < PALETTE_COUNT; i++) {
  266. pal_elem = palette_data[i];
  267. r = (pal_elem >> 16) & 0xff;
  268. g = (pal_elem >> 8) & 0xff;
  269. b = pal_elem & 0xff;
  270. s->palette[i * 4 + 0] = COMPUTE_Y(r, g, b);
  271. s->palette[i * 4 + 1] = COMPUTE_U(r, g, b);
  272. s->palette[i * 4 + 2] = COMPUTE_V(r, g, b);
  273. }
  274. break;
  275. default:
  276. av_log(s->avctx, AV_LOG_ERROR, " Xan WC3: Unhandled colorspace\n");
  277. break;
  278. }
  279. }
  280. /* advance current_x variable; reset accounting variables if current_x
  281. * moves beyond width */
  282. #define ADVANCE_CURRENT_X() \
  283. current_x++; \
  284. if (current_x >= width) { \
  285. index += line_inc; \
  286. current_x = 0; \
  287. }
  288. static void inline xan_wc3_output_pixel_run(XanContext *s,
  289. unsigned char *pixel_buffer, int x, int y, int pixel_count)
  290. {
  291. int stride;
  292. int line_inc;
  293. int index;
  294. int current_x;
  295. int width = s->avctx->width;
  296. unsigned char pix;
  297. unsigned char *palette_plane;
  298. unsigned char *y_plane;
  299. unsigned char *u_plane;
  300. unsigned char *v_plane;
  301. unsigned char *rgb_plane;
  302. unsigned short *rgb16_plane;
  303. unsigned short *palette16;
  304. unsigned int *rgb32_plane;
  305. unsigned int *palette32;
  306. switch (s->avctx->pix_fmt) {
  307. case PIX_FMT_PAL8:
  308. palette_plane = s->current_frame.data[0];
  309. stride = s->current_frame.linesize[0];
  310. line_inc = stride - width;
  311. index = y * stride + x;
  312. current_x = x;
  313. while(pixel_count--) {
  314. /* don't do a memcpy() here; keyframes generally copy an entire
  315. * frame of data and the stride needs to be accounted for */
  316. palette_plane[index++] = *pixel_buffer++;
  317. ADVANCE_CURRENT_X();
  318. }
  319. break;
  320. case PIX_FMT_RGB555:
  321. case PIX_FMT_RGB565:
  322. rgb16_plane = (unsigned short *)s->current_frame.data[0];
  323. palette16 = (unsigned short *)s->palette;
  324. stride = s->current_frame.linesize[0] / 2;
  325. line_inc = stride - width;
  326. index = y * stride + x;
  327. current_x = x;
  328. while(pixel_count--) {
  329. rgb16_plane[index++] = palette16[*pixel_buffer++];
  330. ADVANCE_CURRENT_X();
  331. }
  332. break;
  333. case PIX_FMT_RGB24:
  334. case PIX_FMT_BGR24:
  335. rgb_plane = s->current_frame.data[0];
  336. stride = s->current_frame.linesize[0];
  337. line_inc = stride - width * 3;
  338. index = y * stride + x * 3;
  339. current_x = x;
  340. while(pixel_count--) {
  341. pix = *pixel_buffer++;
  342. rgb_plane[index++] = s->palette[pix * 4 + 0];
  343. rgb_plane[index++] = s->palette[pix * 4 + 1];
  344. rgb_plane[index++] = s->palette[pix * 4 + 2];
  345. ADVANCE_CURRENT_X();
  346. }
  347. break;
  348. case PIX_FMT_RGBA32:
  349. rgb32_plane = (unsigned int *)s->current_frame.data[0];
  350. palette32 = (unsigned int *)s->palette;
  351. stride = s->current_frame.linesize[0] / 4;
  352. line_inc = stride - width;
  353. index = y * stride + x;
  354. current_x = x;
  355. while(pixel_count--) {
  356. rgb32_plane[index++] = palette32[*pixel_buffer++];
  357. ADVANCE_CURRENT_X();
  358. }
  359. break;
  360. case PIX_FMT_YUV444P:
  361. y_plane = s->current_frame.data[0];
  362. u_plane = s->current_frame.data[1];
  363. v_plane = s->current_frame.data[2];
  364. stride = s->current_frame.linesize[0];
  365. line_inc = stride - width;
  366. index = y * stride + x;
  367. current_x = x;
  368. while(pixel_count--) {
  369. pix = *pixel_buffer++;
  370. y_plane[index] = s->palette[pix * 4 + 0];
  371. u_plane[index] = s->palette[pix * 4 + 1];
  372. v_plane[index] = s->palette[pix * 4 + 2];
  373. index++;
  374. ADVANCE_CURRENT_X();
  375. }
  376. break;
  377. default:
  378. av_log(s->avctx, AV_LOG_ERROR, " Xan WC3: Unhandled colorspace\n");
  379. break;
  380. }
  381. }
  382. #define ADVANCE_CURFRAME_X() \
  383. curframe_x++; \
  384. if (curframe_x >= width) { \
  385. curframe_index += line_inc; \
  386. curframe_x = 0; \
  387. }
  388. #define ADVANCE_PREVFRAME_X() \
  389. prevframe_x++; \
  390. if (prevframe_x >= width) { \
  391. prevframe_index += line_inc; \
  392. prevframe_x = 0; \
  393. }
  394. static void inline xan_wc3_copy_pixel_run(XanContext *s,
  395. int x, int y, int pixel_count, int motion_x, int motion_y)
  396. {
  397. int stride;
  398. int line_inc;
  399. int curframe_index, prevframe_index;
  400. int curframe_x, prevframe_x;
  401. int width = s->avctx->width;
  402. unsigned char *palette_plane, *prev_palette_plane;
  403. unsigned char *y_plane, *u_plane, *v_plane;
  404. unsigned char *prev_y_plane, *prev_u_plane, *prev_v_plane;
  405. unsigned char *rgb_plane, *prev_rgb_plane;
  406. unsigned short *rgb16_plane, *prev_rgb16_plane;
  407. unsigned int *rgb32_plane, *prev_rgb32_plane;
  408. switch (s->avctx->pix_fmt) {
  409. case PIX_FMT_PAL8:
  410. palette_plane = s->current_frame.data[0];
  411. prev_palette_plane = s->last_frame.data[0];
  412. stride = s->current_frame.linesize[0];
  413. line_inc = stride - width;
  414. curframe_index = y * stride + x;
  415. curframe_x = x;
  416. prevframe_index = (y + motion_y) * stride + x + motion_x;
  417. prevframe_x = x + motion_x;
  418. while(pixel_count--) {
  419. palette_plane[curframe_index++] =
  420. prev_palette_plane[prevframe_index++];
  421. ADVANCE_CURFRAME_X();
  422. ADVANCE_PREVFRAME_X();
  423. }
  424. break;
  425. case PIX_FMT_RGB555:
  426. case PIX_FMT_RGB565:
  427. rgb16_plane = (unsigned short *)s->current_frame.data[0];
  428. prev_rgb16_plane = (unsigned short *)s->last_frame.data[0];
  429. stride = s->current_frame.linesize[0] / 2;
  430. line_inc = stride - width;
  431. curframe_index = y * stride + x;
  432. curframe_x = x;
  433. prevframe_index = (y + motion_y) * stride + x + motion_x;
  434. prevframe_x = x + motion_x;
  435. while(pixel_count--) {
  436. rgb16_plane[curframe_index++] =
  437. prev_rgb16_plane[prevframe_index++];
  438. ADVANCE_CURFRAME_X();
  439. ADVANCE_PREVFRAME_X();
  440. }
  441. break;
  442. case PIX_FMT_RGB24:
  443. case PIX_FMT_BGR24:
  444. rgb_plane = s->current_frame.data[0];
  445. prev_rgb_plane = s->last_frame.data[0];
  446. stride = s->current_frame.linesize[0];
  447. line_inc = stride - width * 3;
  448. curframe_index = y * stride + x * 3;
  449. curframe_x = x;
  450. prevframe_index = (y + motion_y) * stride +
  451. (3 * (x + motion_x));
  452. prevframe_x = x + motion_x;
  453. while(pixel_count--) {
  454. rgb_plane[curframe_index++] = prev_rgb_plane[prevframe_index++];
  455. rgb_plane[curframe_index++] = prev_rgb_plane[prevframe_index++];
  456. rgb_plane[curframe_index++] = prev_rgb_plane[prevframe_index++];
  457. ADVANCE_CURFRAME_X();
  458. ADVANCE_PREVFRAME_X();
  459. }
  460. break;
  461. case PIX_FMT_RGBA32:
  462. rgb32_plane = (unsigned int *)s->current_frame.data[0];
  463. prev_rgb32_plane = (unsigned int *)s->last_frame.data[0];
  464. stride = s->current_frame.linesize[0] / 4;
  465. line_inc = stride - width;
  466. curframe_index = y * stride + x;
  467. curframe_x = x;
  468. prevframe_index = (y + motion_y) * stride + x + motion_x;
  469. prevframe_x = x + motion_x;
  470. while(pixel_count--) {
  471. rgb32_plane[curframe_index++] =
  472. prev_rgb32_plane[prevframe_index++];
  473. ADVANCE_CURFRAME_X();
  474. ADVANCE_PREVFRAME_X();
  475. }
  476. break;
  477. case PIX_FMT_YUV444P:
  478. y_plane = s->current_frame.data[0];
  479. u_plane = s->current_frame.data[1];
  480. v_plane = s->current_frame.data[2];
  481. prev_y_plane = s->last_frame.data[0];
  482. prev_u_plane = s->last_frame.data[1];
  483. prev_v_plane = s->last_frame.data[2];
  484. stride = s->current_frame.linesize[0];
  485. line_inc = stride - width;
  486. curframe_index = y * stride + x;
  487. curframe_x = x;
  488. prevframe_index = (y + motion_y) * stride + x + motion_x;
  489. prevframe_x = x + motion_x;
  490. while(pixel_count--) {
  491. y_plane[curframe_index] = prev_y_plane[prevframe_index];
  492. u_plane[curframe_index] = prev_u_plane[prevframe_index];
  493. v_plane[curframe_index] = prev_v_plane[prevframe_index];
  494. curframe_index++;
  495. ADVANCE_CURFRAME_X();
  496. prevframe_index++;
  497. ADVANCE_PREVFRAME_X();
  498. }
  499. break;
  500. default:
  501. av_log(s->avctx, AV_LOG_ERROR, " Xan WC3: Unhandled colorspace\n");
  502. break;
  503. }
  504. }
  505. static void xan_wc3_decode_frame(XanContext *s) {
  506. int width = s->avctx->width;
  507. int height = s->avctx->height;
  508. int total_pixels = width * height;
  509. unsigned char opcode;
  510. unsigned char flag = 0;
  511. int size = 0;
  512. int motion_x, motion_y;
  513. int x, y;
  514. unsigned char *opcode_buffer = s->buffer1;
  515. unsigned char *imagedata_buffer = s->buffer2;
  516. /* pointers to segments inside the compressed chunk */
  517. unsigned char *huffman_segment;
  518. unsigned char *size_segment;
  519. unsigned char *vector_segment;
  520. unsigned char *imagedata_segment;
  521. huffman_segment = s->buf + LE_16(&s->buf[0]);
  522. size_segment = s->buf + LE_16(&s->buf[2]);
  523. vector_segment = s->buf + LE_16(&s->buf[4]);
  524. imagedata_segment = s->buf + LE_16(&s->buf[6]);
  525. xan_huffman_decode(opcode_buffer, huffman_segment);
  526. if (imagedata_segment[0] == 2)
  527. xan_unpack(imagedata_buffer, &imagedata_segment[1]);
  528. else
  529. imagedata_buffer = &imagedata_segment[1];
  530. /* use the decoded data segments to build the frame */
  531. x = y = 0;
  532. while (total_pixels) {
  533. opcode = *opcode_buffer++;
  534. size = 0;
  535. switch (opcode) {
  536. case 0:
  537. flag ^= 1;
  538. continue;
  539. case 1:
  540. case 2:
  541. case 3:
  542. case 4:
  543. case 5:
  544. case 6:
  545. case 7:
  546. case 8:
  547. size = opcode;
  548. break;
  549. case 12:
  550. case 13:
  551. case 14:
  552. case 15:
  553. case 16:
  554. case 17:
  555. case 18:
  556. size += (opcode - 10);
  557. break;
  558. case 9:
  559. case 19:
  560. size = *size_segment++;
  561. break;
  562. case 10:
  563. case 20:
  564. size = BE_16(&size_segment[0]);
  565. size_segment += 2;
  566. break;
  567. case 11:
  568. case 21:
  569. size = (size_segment[0] << 16) | (size_segment[1] << 8) |
  570. size_segment[2];
  571. size_segment += 3;
  572. break;
  573. }
  574. if (opcode < 12) {
  575. flag ^= 1;
  576. if (flag) {
  577. /* run of (size) pixels is unchanged from last frame */
  578. xan_wc3_copy_pixel_run(s, x, y, size, 0, 0);
  579. } else {
  580. /* output a run of pixels from imagedata_buffer */
  581. xan_wc3_output_pixel_run(s, imagedata_buffer, x, y, size);
  582. imagedata_buffer += size;
  583. }
  584. } else {
  585. /* run-based motion compensation from last frame */
  586. motion_x = (*vector_segment >> 4) & 0xF;
  587. motion_y = *vector_segment & 0xF;
  588. vector_segment++;
  589. /* sign extension */
  590. if (motion_x & 0x8)
  591. motion_x |= 0xFFFFFFF0;
  592. if (motion_y & 0x8)
  593. motion_y |= 0xFFFFFFF0;
  594. /* copy a run of pixels from the previous frame */
  595. xan_wc3_copy_pixel_run(s, x, y, size, motion_x, motion_y);
  596. flag = 0;
  597. }
  598. /* coordinate accounting */
  599. total_pixels -= size;
  600. while (size) {
  601. if (x + size >= width) {
  602. y++;
  603. size -= (width - x);
  604. x = 0;
  605. } else {
  606. x += size;
  607. size = 0;
  608. }
  609. }
  610. }
  611. /* for PAL8, make the palette available on the way out */
  612. if (s->avctx->pix_fmt == PIX_FMT_PAL8) {
  613. memcpy(s->current_frame.data[1], s->palette, PALETTE_COUNT * 4);
  614. s->current_frame.palette_has_changed = 1;
  615. s->avctx->palctrl->palette_changed = 0;
  616. }
  617. }
  618. static void xan_wc4_decode_frame(XanContext *s) {
  619. }
  620. static int xan_decode_frame(AVCodecContext *avctx,
  621. void *data, int *data_size,
  622. uint8_t *buf, int buf_size)
  623. {
  624. XanContext *s = avctx->priv_data;
  625. AVPaletteControl *palette_control = avctx->palctrl;
  626. int keyframe = 0;
  627. if (palette_control->palette_changed) {
  628. /* load the new palette and reset the palette control */
  629. xan_wc3_build_palette(s, palette_control->palette);
  630. /* If pal8 we clear flag when we copy palette */
  631. if (s->avctx->pix_fmt != PIX_FMT_PAL8)
  632. palette_control->palette_changed = 0;
  633. keyframe = 1;
  634. }
  635. if (avctx->get_buffer(avctx, &s->current_frame)) {
  636. av_log(s->avctx, AV_LOG_ERROR, " Xan Video: get_buffer() failed\n");
  637. return -1;
  638. }
  639. s->current_frame.reference = 3;
  640. s->buf = buf;
  641. s->size = buf_size;
  642. if (avctx->codec->id == CODEC_ID_XAN_WC3)
  643. xan_wc3_decode_frame(s);
  644. else if (avctx->codec->id == CODEC_ID_XAN_WC4)
  645. xan_wc4_decode_frame(s);
  646. /* release the last frame if it is allocated */
  647. if (s->last_frame.data[0])
  648. avctx->release_buffer(avctx, &s->last_frame);
  649. /* shuffle frames */
  650. s->last_frame = s->current_frame;
  651. *data_size = sizeof(AVFrame);
  652. *(AVFrame*)data = s->current_frame;
  653. /* always report that the buffer was completely consumed */
  654. return buf_size;
  655. }
  656. static int xan_decode_end(AVCodecContext *avctx)
  657. {
  658. XanContext *s = avctx->priv_data;
  659. /* release the last frame */
  660. avctx->release_buffer(avctx, &s->last_frame);
  661. av_free(s->buffer1);
  662. av_free(s->buffer2);
  663. return 0;
  664. }
  665. AVCodec xan_wc3_decoder = {
  666. "xan_wc3",
  667. CODEC_TYPE_VIDEO,
  668. CODEC_ID_XAN_WC3,
  669. sizeof(XanContext),
  670. xan_decode_init,
  671. NULL,
  672. xan_decode_end,
  673. xan_decode_frame,
  674. CODEC_CAP_DR1,
  675. };
  676. /*
  677. AVCodec xan_wc4_decoder = {
  678. "xan_wc4",
  679. CODEC_TYPE_VIDEO,
  680. CODEC_ID_XAN_WC4,
  681. sizeof(XanContext),
  682. xan_decode_init,
  683. NULL,
  684. xan_decode_end,
  685. xan_decode_frame,
  686. CODEC_CAP_DR1,
  687. };
  688. */