You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

338 lines
11KB

  1. /*
  2. * Feeble Files/ScummVM DXA decoder
  3. * Copyright (c) 2007 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * DXA Video decoder
  24. */
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include "libavutil/common.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "bytestream.h"
  30. #include "avcodec.h"
  31. #include "internal.h"
  32. #include <zlib.h>
  33. /*
  34. * Decoder context
  35. */
  36. typedef struct DxaDecContext {
  37. AVFrame pic, prev;
  38. int dsize;
  39. uint8_t *decomp_buf;
  40. uint32_t pal[256];
  41. } DxaDecContext;
  42. static const int shift1[6] = { 0, 8, 8, 8, 4, 4 };
  43. static const int shift2[6] = { 0, 0, 8, 4, 0, 4 };
  44. static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, uint8_t *src, uint8_t *ref)
  45. {
  46. uint8_t *code, *data, *mv, *msk, *tmp, *tmp2;
  47. int i, j, k;
  48. int type, x, y, d, d2;
  49. int stride = c->pic.linesize[0];
  50. uint32_t mask;
  51. code = src + 12;
  52. data = code + ((avctx->width * avctx->height) >> 4);
  53. mv = data + AV_RB32(src + 0);
  54. msk = mv + AV_RB32(src + 4);
  55. for(j = 0; j < avctx->height; j += 4){
  56. for(i = 0; i < avctx->width; i += 4){
  57. tmp = dst + i;
  58. tmp2 = ref + i;
  59. type = *code++;
  60. switch(type){
  61. case 4: // motion compensation
  62. x = (*mv) >> 4; if(x & 8) x = 8 - x;
  63. y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
  64. tmp2 += x + y*stride;
  65. case 0: // skip
  66. case 5: // skip in method 12
  67. for(y = 0; y < 4; y++){
  68. memcpy(tmp, tmp2, 4);
  69. tmp += stride;
  70. tmp2 += stride;
  71. }
  72. break;
  73. case 1: // masked change
  74. case 10: // masked change with only half of pixels changed
  75. case 11: // cases 10-15 are for method 12 only
  76. case 12:
  77. case 13:
  78. case 14:
  79. case 15:
  80. if(type == 1){
  81. mask = AV_RB16(msk);
  82. msk += 2;
  83. }else{
  84. type -= 10;
  85. mask = ((msk[0] & 0xF0) << shift1[type]) | ((msk[0] & 0xF) << shift2[type]);
  86. msk++;
  87. }
  88. for(y = 0; y < 4; y++){
  89. for(x = 0; x < 4; x++){
  90. tmp[x] = (mask & 0x8000) ? *data++ : tmp2[x];
  91. mask <<= 1;
  92. }
  93. tmp += stride;
  94. tmp2 += stride;
  95. }
  96. break;
  97. case 2: // fill block
  98. for(y = 0; y < 4; y++){
  99. memset(tmp, data[0], 4);
  100. tmp += stride;
  101. }
  102. data++;
  103. break;
  104. case 3: // raw block
  105. for(y = 0; y < 4; y++){
  106. memcpy(tmp, data, 4);
  107. data += 4;
  108. tmp += stride;
  109. }
  110. break;
  111. case 8: // subblocks - method 13 only
  112. mask = *msk++;
  113. for(k = 0; k < 4; k++){
  114. d = ((k & 1) << 1) + ((k & 2) * stride);
  115. d2 = ((k & 1) << 1) + ((k & 2) * stride);
  116. tmp2 = ref + i + d2;
  117. switch(mask & 0xC0){
  118. case 0x80: // motion compensation
  119. x = (*mv) >> 4; if(x & 8) x = 8 - x;
  120. y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
  121. tmp2 += x + y*stride;
  122. case 0x00: // skip
  123. tmp[d + 0 ] = tmp2[0];
  124. tmp[d + 1 ] = tmp2[1];
  125. tmp[d + 0 + stride] = tmp2[0 + stride];
  126. tmp[d + 1 + stride] = tmp2[1 + stride];
  127. break;
  128. case 0x40: // fill
  129. tmp[d + 0 ] = data[0];
  130. tmp[d + 1 ] = data[0];
  131. tmp[d + 0 + stride] = data[0];
  132. tmp[d + 1 + stride] = data[0];
  133. data++;
  134. break;
  135. case 0xC0: // raw
  136. tmp[d + 0 ] = *data++;
  137. tmp[d + 1 ] = *data++;
  138. tmp[d + 0 + stride] = *data++;
  139. tmp[d + 1 + stride] = *data++;
  140. break;
  141. }
  142. mask <<= 2;
  143. }
  144. break;
  145. case 32: // vector quantization - 2 colors
  146. mask = AV_RB16(msk);
  147. msk += 2;
  148. for(y = 0; y < 4; y++){
  149. for(x = 0; x < 4; x++){
  150. tmp[x] = data[mask & 1];
  151. mask >>= 1;
  152. }
  153. tmp += stride;
  154. tmp2 += stride;
  155. }
  156. data += 2;
  157. break;
  158. case 33: // vector quantization - 3 or 4 colors
  159. case 34:
  160. mask = AV_RB32(msk);
  161. msk += 4;
  162. for(y = 0; y < 4; y++){
  163. for(x = 0; x < 4; x++){
  164. tmp[x] = data[mask & 3];
  165. mask >>= 2;
  166. }
  167. tmp += stride;
  168. tmp2 += stride;
  169. }
  170. data += type - 30;
  171. break;
  172. default:
  173. av_log(avctx, AV_LOG_ERROR, "Unknown opcode %d\n", type);
  174. return AVERROR_INVALIDDATA;
  175. }
  176. }
  177. dst += stride * 4;
  178. ref += stride * 4;
  179. }
  180. return 0;
  181. }
  182. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
  183. {
  184. DxaDecContext * const c = avctx->priv_data;
  185. uint8_t *outptr, *srcptr, *tmpptr;
  186. unsigned long dsize;
  187. int i, j, compr, ret;
  188. int stride;
  189. int pc = 0;
  190. GetByteContext gb;
  191. bytestream2_init(&gb, avpkt->data, avpkt->size);
  192. /* make the palette available on the way out */
  193. if (bytestream2_peek_le32(&gb) == MKTAG('C','M','A','P')) {
  194. bytestream2_skip(&gb, 4);
  195. for(i = 0; i < 256; i++){
  196. c->pal[i] = 0xFFU << 24 | bytestream2_get_be24(&gb);
  197. }
  198. pc = 1;
  199. }
  200. if ((ret = ff_get_buffer(avctx, &c->pic)) < 0){
  201. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  202. return ret;
  203. }
  204. memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
  205. c->pic.palette_has_changed = pc;
  206. outptr = c->pic.data[0];
  207. srcptr = c->decomp_buf;
  208. tmpptr = c->prev.data[0];
  209. stride = c->pic.linesize[0];
  210. if (bytestream2_get_le32(&gb) == MKTAG('N','U','L','L'))
  211. compr = -1;
  212. else
  213. compr = bytestream2_get_byte(&gb);
  214. dsize = c->dsize;
  215. if (compr != 4 && compr != -1) {
  216. bytestream2_skip(&gb, 4);
  217. if (uncompress(c->decomp_buf, &dsize, avpkt->data + bytestream2_tell(&gb),
  218. bytestream2_get_bytes_left(&gb)) != Z_OK) {
  219. av_log(avctx, AV_LOG_ERROR, "Uncompress failed!\n");
  220. return AVERROR_INVALIDDATA;
  221. }
  222. }
  223. switch(compr){
  224. case -1:
  225. c->pic.key_frame = 0;
  226. c->pic.pict_type = AV_PICTURE_TYPE_P;
  227. if(c->prev.data[0])
  228. memcpy(c->pic.data[0], c->prev.data[0], c->pic.linesize[0] * avctx->height);
  229. else{ // Should happen only when first frame is 'NULL'
  230. memset(c->pic.data[0], 0, c->pic.linesize[0] * avctx->height);
  231. c->pic.key_frame = 1;
  232. c->pic.pict_type = AV_PICTURE_TYPE_I;
  233. }
  234. break;
  235. case 2:
  236. case 3:
  237. case 4:
  238. case 5:
  239. c->pic.key_frame = !(compr & 1);
  240. c->pic.pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
  241. for(j = 0; j < avctx->height; j++){
  242. if((compr & 1) && tmpptr){
  243. for(i = 0; i < avctx->width; i++)
  244. outptr[i] = srcptr[i] ^ tmpptr[i];
  245. tmpptr += stride;
  246. }else
  247. memcpy(outptr, srcptr, avctx->width);
  248. outptr += stride;
  249. srcptr += avctx->width;
  250. }
  251. break;
  252. case 12: // ScummVM coding
  253. case 13:
  254. c->pic.key_frame = 0;
  255. c->pic.pict_type = AV_PICTURE_TYPE_P;
  256. if (!c->prev.data[0]) {
  257. av_log(avctx, AV_LOG_ERROR, "Missing reference frame\n");
  258. return AVERROR_INVALIDDATA;
  259. }
  260. decode_13(avctx, c, c->pic.data[0], srcptr, c->prev.data[0]);
  261. break;
  262. default:
  263. av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", compr);
  264. return AVERROR_INVALIDDATA;
  265. }
  266. FFSWAP(AVFrame, c->pic, c->prev);
  267. if(c->pic.data[0])
  268. avctx->release_buffer(avctx, &c->pic);
  269. *got_frame = 1;
  270. *(AVFrame*)data = c->prev;
  271. /* always report that the buffer was completely consumed */
  272. return avpkt->size;
  273. }
  274. static av_cold int decode_init(AVCodecContext *avctx)
  275. {
  276. DxaDecContext * const c = avctx->priv_data;
  277. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  278. avcodec_get_frame_defaults(&c->pic);
  279. avcodec_get_frame_defaults(&c->prev);
  280. c->dsize = avctx->width * avctx->height * 2;
  281. c->decomp_buf = av_malloc(c->dsize);
  282. if (!c->decomp_buf) {
  283. av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
  284. return AVERROR(ENOMEM);
  285. }
  286. return 0;
  287. }
  288. static av_cold int decode_end(AVCodecContext *avctx)
  289. {
  290. DxaDecContext * const c = avctx->priv_data;
  291. av_freep(&c->decomp_buf);
  292. if(c->prev.data[0])
  293. avctx->release_buffer(avctx, &c->prev);
  294. if(c->pic.data[0])
  295. avctx->release_buffer(avctx, &c->pic);
  296. return 0;
  297. }
  298. AVCodec ff_dxa_decoder = {
  299. .name = "dxa",
  300. .type = AVMEDIA_TYPE_VIDEO,
  301. .id = AV_CODEC_ID_DXA,
  302. .priv_data_size = sizeof(DxaDecContext),
  303. .init = decode_init,
  304. .close = decode_end,
  305. .decode = decode_frame,
  306. .capabilities = CODEC_CAP_DR1,
  307. .long_name = NULL_IF_CONFIG_SMALL("Feeble Files/ScummVM DXA"),
  308. };