You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

375 lines
12KB

  1. /*
  2. * Feeble Files/ScummVM DXA decoder
  3. * Copyright (c) 2007 Konstantin Shishkov
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * DXA Video decoder
  24. */
  25. #include <stdio.h>
  26. #include <stdlib.h>
  27. #include "libavutil/common.h"
  28. #include "libavutil/intreadwrite.h"
  29. #include "bytestream.h"
  30. #include "avcodec.h"
  31. #include "internal.h"
  32. #include <zlib.h>
  33. /*
  34. * Decoder context
  35. */
  36. typedef struct DxaDecContext {
  37. AVFrame *prev;
  38. int dsize;
  39. #define DECOMP_BUF_PADDING 16
  40. uint8_t *decomp_buf;
  41. uint32_t pal[256];
  42. } DxaDecContext;
  43. static const int shift1[6] = { 0, 8, 8, 8, 4, 4 };
  44. static const int shift2[6] = { 0, 0, 8, 4, 0, 4 };
  45. static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst,
  46. int stride, uint8_t *src, int srcsize, uint8_t *ref)
  47. {
  48. uint8_t *code, *data, *mv, *msk, *tmp, *tmp2;
  49. uint8_t *src_end = src + srcsize;
  50. int i, j, k;
  51. int type, x, y, d, d2;
  52. uint32_t mask;
  53. if (12ULL + ((avctx->width * avctx->height) >> 4) + AV_RB32(src + 0) + AV_RB32(src + 4) > srcsize)
  54. return AVERROR_INVALIDDATA;
  55. code = src + 12;
  56. data = code + ((avctx->width * avctx->height) >> 4);
  57. mv = data + AV_RB32(src + 0);
  58. msk = mv + AV_RB32(src + 4);
  59. for(j = 0; j < avctx->height; j += 4){
  60. for(i = 0; i < avctx->width; i += 4){
  61. if (data > src_end || mv > src_end || msk > src_end)
  62. return AVERROR_INVALIDDATA;
  63. tmp = dst + i;
  64. tmp2 = ref + i;
  65. type = *code++;
  66. switch(type){
  67. case 4: // motion compensation
  68. x = (*mv) >> 4; if(x & 8) x = 8 - x;
  69. y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
  70. if (i < -x || avctx->width - i - 4 < x ||
  71. j < -y || avctx->height - j - 4 < y) {
  72. av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
  73. return AVERROR_INVALIDDATA;
  74. }
  75. tmp2 += x + y*stride;
  76. case 0: // skip
  77. case 5: // skip in method 12
  78. for(y = 0; y < 4; y++){
  79. memcpy(tmp, tmp2, 4);
  80. tmp += stride;
  81. tmp2 += stride;
  82. }
  83. break;
  84. case 1: // masked change
  85. case 10: // masked change with only half of pixels changed
  86. case 11: // cases 10-15 are for method 12 only
  87. case 12:
  88. case 13:
  89. case 14:
  90. case 15:
  91. if(type == 1){
  92. mask = AV_RB16(msk);
  93. msk += 2;
  94. }else{
  95. type -= 10;
  96. mask = ((msk[0] & 0xF0) << shift1[type]) | ((msk[0] & 0xF) << shift2[type]);
  97. msk++;
  98. }
  99. for(y = 0; y < 4; y++){
  100. for(x = 0; x < 4; x++){
  101. tmp[x] = (mask & 0x8000) ? *data++ : tmp2[x];
  102. mask <<= 1;
  103. }
  104. tmp += stride;
  105. tmp2 += stride;
  106. }
  107. break;
  108. case 2: // fill block
  109. for(y = 0; y < 4; y++){
  110. memset(tmp, data[0], 4);
  111. tmp += stride;
  112. }
  113. data++;
  114. break;
  115. case 3: // raw block
  116. for(y = 0; y < 4; y++){
  117. memcpy(tmp, data, 4);
  118. data += 4;
  119. tmp += stride;
  120. }
  121. break;
  122. case 8: // subblocks - method 13 only
  123. mask = *msk++;
  124. for(k = 0; k < 4; k++){
  125. d = ((k & 1) << 1) + ((k & 2) * stride);
  126. d2 = ((k & 1) << 1) + ((k & 2) * stride);
  127. tmp2 = ref + i + d2;
  128. switch(mask & 0xC0){
  129. case 0x80: // motion compensation
  130. x = (*mv) >> 4; if(x & 8) x = 8 - x;
  131. y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
  132. if (i + 2*(k & 1) < -x || avctx->width - i - 2*(k & 1) - 2 < x ||
  133. j + (k & 2) < -y || avctx->height - j - (k & 2) - 2 < y) {
  134. av_log(avctx, AV_LOG_ERROR, "MV %d %d out of bounds\n", x,y);
  135. return AVERROR_INVALIDDATA;
  136. }
  137. tmp2 += x + y*stride;
  138. case 0x00: // skip
  139. tmp[d + 0 ] = tmp2[0];
  140. tmp[d + 1 ] = tmp2[1];
  141. tmp[d + 0 + stride] = tmp2[0 + stride];
  142. tmp[d + 1 + stride] = tmp2[1 + stride];
  143. break;
  144. case 0x40: // fill
  145. tmp[d + 0 ] = data[0];
  146. tmp[d + 1 ] = data[0];
  147. tmp[d + 0 + stride] = data[0];
  148. tmp[d + 1 + stride] = data[0];
  149. data++;
  150. break;
  151. case 0xC0: // raw
  152. tmp[d + 0 ] = *data++;
  153. tmp[d + 1 ] = *data++;
  154. tmp[d + 0 + stride] = *data++;
  155. tmp[d + 1 + stride] = *data++;
  156. break;
  157. }
  158. mask <<= 2;
  159. }
  160. break;
  161. case 32: // vector quantization - 2 colors
  162. mask = AV_RB16(msk);
  163. msk += 2;
  164. for(y = 0; y < 4; y++){
  165. for(x = 0; x < 4; x++){
  166. tmp[x] = data[mask & 1];
  167. mask >>= 1;
  168. }
  169. tmp += stride;
  170. tmp2 += stride;
  171. }
  172. data += 2;
  173. break;
  174. case 33: // vector quantization - 3 or 4 colors
  175. case 34:
  176. mask = AV_RB32(msk);
  177. msk += 4;
  178. for(y = 0; y < 4; y++){
  179. for(x = 0; x < 4; x++){
  180. tmp[x] = data[mask & 3];
  181. mask >>= 2;
  182. }
  183. tmp += stride;
  184. tmp2 += stride;
  185. }
  186. data += type - 30;
  187. break;
  188. default:
  189. av_log(avctx, AV_LOG_ERROR, "Unknown opcode %d\n", type);
  190. return AVERROR_INVALIDDATA;
  191. }
  192. }
  193. dst += stride * 4;
  194. ref += stride * 4;
  195. }
  196. return 0;
  197. }
  198. static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
  199. {
  200. AVFrame *frame = data;
  201. DxaDecContext * const c = avctx->priv_data;
  202. uint8_t *outptr, *srcptr, *tmpptr;
  203. unsigned long dsize;
  204. int i, j, compr, ret;
  205. int stride;
  206. int pc = 0;
  207. GetByteContext gb;
  208. bytestream2_init(&gb, avpkt->data, avpkt->size);
  209. /* make the palette available on the way out */
  210. if (bytestream2_peek_le32(&gb) == MKTAG('C','M','A','P')) {
  211. bytestream2_skip(&gb, 4);
  212. for(i = 0; i < 256; i++){
  213. c->pal[i] = 0xFFU << 24 | bytestream2_get_be24(&gb);
  214. }
  215. pc = 1;
  216. }
  217. if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
  218. return ret;
  219. memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
  220. frame->palette_has_changed = pc;
  221. outptr = frame->data[0];
  222. srcptr = c->decomp_buf;
  223. tmpptr = c->prev->data[0];
  224. stride = frame->linesize[0];
  225. if (bytestream2_get_le32(&gb) == MKTAG('N','U','L','L'))
  226. compr = -1;
  227. else
  228. compr = bytestream2_get_byte(&gb);
  229. dsize = c->dsize;
  230. if (compr != 4 && compr != -1) {
  231. bytestream2_skip(&gb, 4);
  232. if (uncompress(c->decomp_buf, &dsize, avpkt->data + bytestream2_tell(&gb),
  233. bytestream2_get_bytes_left(&gb)) != Z_OK) {
  234. av_log(avctx, AV_LOG_ERROR, "Uncompress failed!\n");
  235. return AVERROR_UNKNOWN;
  236. }
  237. memset(c->decomp_buf + dsize, 0, DECOMP_BUF_PADDING);
  238. }
  239. if (avctx->debug & FF_DEBUG_PICT_INFO)
  240. av_log(avctx, AV_LOG_DEBUG, "compr:%2d, dsize:%d\n", compr, (int)dsize);
  241. switch(compr){
  242. case -1:
  243. frame->key_frame = 0;
  244. frame->pict_type = AV_PICTURE_TYPE_P;
  245. if (c->prev->data[0])
  246. memcpy(frame->data[0], c->prev->data[0], frame->linesize[0] * avctx->height);
  247. else{ // Should happen only when first frame is 'NULL'
  248. memset(frame->data[0], 0, frame->linesize[0] * avctx->height);
  249. frame->key_frame = 1;
  250. frame->pict_type = AV_PICTURE_TYPE_I;
  251. }
  252. break;
  253. case 2:
  254. case 4:
  255. frame->key_frame = 1;
  256. frame->pict_type = AV_PICTURE_TYPE_I;
  257. for (j = 0; j < avctx->height; j++) {
  258. memcpy(outptr, srcptr, avctx->width);
  259. outptr += stride;
  260. srcptr += avctx->width;
  261. }
  262. break;
  263. case 3:
  264. case 5:
  265. if (!tmpptr) {
  266. av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
  267. if (!(avctx->flags2 & AV_CODEC_FLAG2_SHOW_ALL))
  268. return AVERROR_INVALIDDATA;
  269. }
  270. frame->key_frame = 0;
  271. frame->pict_type = AV_PICTURE_TYPE_P;
  272. for (j = 0; j < avctx->height; j++) {
  273. if(tmpptr){
  274. for(i = 0; i < avctx->width; i++)
  275. outptr[i] = srcptr[i] ^ tmpptr[i];
  276. tmpptr += stride;
  277. }else
  278. memcpy(outptr, srcptr, avctx->width);
  279. outptr += stride;
  280. srcptr += avctx->width;
  281. }
  282. break;
  283. case 12: // ScummVM coding
  284. case 13:
  285. frame->key_frame = 0;
  286. frame->pict_type = AV_PICTURE_TYPE_P;
  287. if (!c->prev->data[0]) {
  288. av_log(avctx, AV_LOG_ERROR, "Missing reference frame\n");
  289. return AVERROR_INVALIDDATA;
  290. }
  291. decode_13(avctx, c, frame->data[0], frame->linesize[0], srcptr, dsize, c->prev->data[0]);
  292. break;
  293. default:
  294. av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", compr);
  295. return AVERROR_INVALIDDATA;
  296. }
  297. av_frame_unref(c->prev);
  298. if ((ret = av_frame_ref(c->prev, frame)) < 0)
  299. return ret;
  300. *got_frame = 1;
  301. /* always report that the buffer was completely consumed */
  302. return avpkt->size;
  303. }
  304. static av_cold int decode_init(AVCodecContext *avctx)
  305. {
  306. DxaDecContext * const c = avctx->priv_data;
  307. if (avctx->width%4 || avctx->height%4) {
  308. avpriv_request_sample(avctx, "dimensions are not a multiple of 4");
  309. return AVERROR_INVALIDDATA;
  310. }
  311. c->prev = av_frame_alloc();
  312. if (!c->prev)
  313. return AVERROR(ENOMEM);
  314. avctx->pix_fmt = AV_PIX_FMT_PAL8;
  315. c->dsize = avctx->width * avctx->height * 2;
  316. c->decomp_buf = av_malloc(c->dsize + DECOMP_BUF_PADDING);
  317. if (!c->decomp_buf) {
  318. av_frame_free(&c->prev);
  319. av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
  320. return AVERROR(ENOMEM);
  321. }
  322. return 0;
  323. }
  324. static av_cold int decode_end(AVCodecContext *avctx)
  325. {
  326. DxaDecContext * const c = avctx->priv_data;
  327. av_freep(&c->decomp_buf);
  328. av_frame_free(&c->prev);
  329. return 0;
  330. }
  331. AVCodec ff_dxa_decoder = {
  332. .name = "dxa",
  333. .long_name = NULL_IF_CONFIG_SMALL("Feeble Files/ScummVM DXA"),
  334. .type = AVMEDIA_TYPE_VIDEO,
  335. .id = AV_CODEC_ID_DXA,
  336. .priv_data_size = sizeof(DxaDecContext),
  337. .init = decode_init,
  338. .close = decode_end,
  339. .decode = decode_frame,
  340. .capabilities = AV_CODEC_CAP_DR1,
  341. };