You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

391 lines
12KB

  1. /*
  2. * Discworld II BMV video and audio decoder
  3. * Copyright (c) 2011 Konstantin Shishkov
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include "avcodec.h"
  22. #include "bytestream.h"
  23. #include "libavutil/avassert.h"
  24. enum BMVFlags{
  25. BMV_NOP = 0,
  26. BMV_END,
  27. BMV_DELTA,
  28. BMV_INTRA,
  29. BMV_SCROLL = 0x04,
  30. BMV_PALETTE = 0x08,
  31. BMV_COMMAND = 0x10,
  32. BMV_AUDIO = 0x20,
  33. BMV_EXT = 0x40,
  34. BMV_PRINT = 0x80
  35. };
  36. #define SCREEN_WIDE 640
  37. #define SCREEN_HIGH 429
  38. typedef struct BMVDecContext {
  39. AVCodecContext *avctx;
  40. AVFrame pic;
  41. uint8_t *frame, frame_base[SCREEN_WIDE * (SCREEN_HIGH + 1)];
  42. uint32_t pal[256];
  43. const uint8_t *stream;
  44. } BMVDecContext;
  45. #define NEXT_BYTE(v) v = forward ? v + 1 : v - 1;
  46. static int decode_bmv_frame(const uint8_t *source, int src_len, uint8_t *frame, int frame_off)
  47. {
  48. unsigned val, saved_val = 0;
  49. int tmplen = src_len;
  50. const uint8_t *src, *source_end = source + src_len;
  51. uint8_t *frame_end = frame + SCREEN_WIDE * SCREEN_HIGH;
  52. uint8_t *dst, *dst_end;
  53. int len, mask;
  54. int forward = (frame_off <= -SCREEN_WIDE) || (frame_off >= 0);
  55. int read_two_nibbles, flag;
  56. int advance_mode;
  57. int mode = 0;
  58. int i;
  59. if (src_len <= 0)
  60. return -1;
  61. if (forward) {
  62. src = source;
  63. dst = frame;
  64. dst_end = frame_end;
  65. } else {
  66. src = source + src_len - 1;
  67. dst = frame_end - 1;
  68. dst_end = frame - 1;
  69. }
  70. for (;;) {
  71. int shift = 0;
  72. flag = 0;
  73. /* The mode/len decoding is a bit strange:
  74. * values are coded as variable-length codes with nibble units,
  75. * code end is signalled by two top bits in the nibble being nonzero.
  76. * And since data is bytepacked and we read two nibbles at a time,
  77. * we may get a nibble belonging to the next code.
  78. * Hence this convoluted loop.
  79. */
  80. if (!mode || (tmplen == 4)) {
  81. if (src < source || src >= source_end)
  82. return -1;
  83. val = *src;
  84. read_two_nibbles = 1;
  85. } else {
  86. val = saved_val;
  87. read_two_nibbles = 0;
  88. }
  89. if (!(val & 0xC)) {
  90. for (;;) {
  91. if(shift>22)
  92. return -1;
  93. if (!read_two_nibbles) {
  94. if (src < source || src >= source_end)
  95. return -1;
  96. shift += 2;
  97. val |= *src << shift;
  98. if (*src & 0xC)
  99. break;
  100. }
  101. // two upper bits of the nibble is zero,
  102. // so shift top nibble value down into their place
  103. read_two_nibbles = 0;
  104. shift += 2;
  105. mask = (1 << shift) - 1;
  106. val = ((val >> 2) & ~mask) | (val & mask);
  107. NEXT_BYTE(src);
  108. if ((val & (0xC << shift))) {
  109. flag = 1;
  110. break;
  111. }
  112. }
  113. } else if (mode) {
  114. flag = tmplen != 4;
  115. }
  116. if (flag) {
  117. tmplen = 4;
  118. } else {
  119. saved_val = val >> (4 + shift);
  120. tmplen = 0;
  121. val &= (1 << (shift + 4)) - 1;
  122. NEXT_BYTE(src);
  123. }
  124. advance_mode = val & 1;
  125. len = (val >> 1) - 1;
  126. av_assert0(len>0);
  127. mode += 1 + advance_mode;
  128. if (mode >= 4)
  129. mode -= 3;
  130. if (FFABS(dst_end - dst) < len)
  131. return -1;
  132. switch (mode) {
  133. case 1:
  134. if (forward) {
  135. if (dst - frame + SCREEN_WIDE < frame_off ||
  136. dst - frame + SCREEN_WIDE + frame_off < 0 ||
  137. frame_end - dst < frame_off + len ||
  138. frame_end - dst < len)
  139. return -1;
  140. for (i = 0; i < len; i++)
  141. dst[i] = dst[frame_off + i];
  142. dst += len;
  143. } else {
  144. dst -= len;
  145. if (dst - frame + SCREEN_WIDE < frame_off ||
  146. dst - frame + SCREEN_WIDE + frame_off < 0 ||
  147. frame_end - dst < frame_off + len ||
  148. frame_end - dst < len)
  149. return -1;
  150. for (i = len - 1; i >= 0; i--)
  151. dst[i] = dst[frame_off + i];
  152. }
  153. break;
  154. case 2:
  155. if (forward) {
  156. if (source + src_len - src < len)
  157. return -1;
  158. memcpy(dst, src, len);
  159. dst += len;
  160. src += len;
  161. } else {
  162. if (src - source < len)
  163. return -1;
  164. dst -= len;
  165. src -= len;
  166. memcpy(dst, src, len);
  167. }
  168. break;
  169. case 3:
  170. val = forward ? dst[-1] : dst[1];
  171. if (forward) {
  172. memset(dst, val, len);
  173. dst += len;
  174. } else {
  175. dst -= len;
  176. memset(dst, val, len);
  177. }
  178. break;
  179. default:
  180. break;
  181. }
  182. if (dst == dst_end)
  183. return 0;
  184. }
  185. return 0;
  186. }
  187. static int decode_frame(AVCodecContext *avctx, void *data, int *data_size, AVPacket *pkt)
  188. {
  189. BMVDecContext * const c = avctx->priv_data;
  190. int type, scr_off;
  191. int i;
  192. uint8_t *srcptr, *outptr;
  193. c->stream = pkt->data;
  194. type = bytestream_get_byte(&c->stream);
  195. if (type & BMV_AUDIO) {
  196. int blobs = bytestream_get_byte(&c->stream);
  197. if (pkt->size < blobs * 65 + 2) {
  198. av_log(avctx, AV_LOG_ERROR, "Audio data doesn't fit in frame\n");
  199. return AVERROR_INVALIDDATA;
  200. }
  201. c->stream += blobs * 65;
  202. }
  203. if (type & BMV_COMMAND) {
  204. int command_size = (type & BMV_PRINT) ? 8 : 10;
  205. if (c->stream - pkt->data + command_size > pkt->size) {
  206. av_log(avctx, AV_LOG_ERROR, "Command data doesn't fit in frame\n");
  207. return AVERROR_INVALIDDATA;
  208. }
  209. c->stream += command_size;
  210. }
  211. if (type & BMV_PALETTE) {
  212. if (c->stream - pkt->data > pkt->size - 768) {
  213. av_log(avctx, AV_LOG_ERROR, "Palette data doesn't fit in frame\n");
  214. return AVERROR_INVALIDDATA;
  215. }
  216. for (i = 0; i < 256; i++)
  217. c->pal[i] = 0xFF << 24 | bytestream_get_be24(&c->stream);
  218. }
  219. if (type & BMV_SCROLL) {
  220. if (c->stream - pkt->data > pkt->size - 2) {
  221. av_log(avctx, AV_LOG_ERROR, "Screen offset data doesn't fit in frame\n");
  222. return AVERROR_INVALIDDATA;
  223. }
  224. scr_off = (int16_t)bytestream_get_le16(&c->stream);
  225. } else if ((type & BMV_INTRA) == BMV_INTRA) {
  226. scr_off = -640;
  227. } else {
  228. scr_off = 0;
  229. }
  230. if (decode_bmv_frame(c->stream, pkt->size - (c->stream - pkt->data), c->frame, scr_off)) {
  231. av_log(avctx, AV_LOG_ERROR, "Error decoding frame data\n");
  232. return AVERROR_INVALIDDATA;
  233. }
  234. memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
  235. c->pic.palette_has_changed = type & BMV_PALETTE;
  236. outptr = c->pic.data[0];
  237. srcptr = c->frame;
  238. for (i = 0; i < avctx->height; i++) {
  239. memcpy(outptr, srcptr, avctx->width);
  240. srcptr += avctx->width;
  241. outptr += c->pic.linesize[0];
  242. }
  243. *data_size = sizeof(AVFrame);
  244. *(AVFrame*)data = c->pic;
  245. /* always report that the buffer was completely consumed */
  246. return pkt->size;
  247. }
  248. static av_cold int decode_init(AVCodecContext *avctx)
  249. {
  250. BMVDecContext * const c = avctx->priv_data;
  251. c->avctx = avctx;
  252. avctx->pix_fmt = PIX_FMT_PAL8;
  253. if (avctx->width != SCREEN_WIDE || avctx->height != SCREEN_HIGH) {
  254. av_log(avctx, AV_LOG_ERROR, "Invalid dimension %dx%d\n", avctx->width, avctx->height);
  255. return AVERROR_INVALIDDATA;
  256. }
  257. c->pic.reference = 1;
  258. if (avctx->get_buffer(avctx, &c->pic) < 0) {
  259. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  260. return -1;
  261. }
  262. c->frame = c->frame_base + 640;
  263. return 0;
  264. }
  265. static av_cold int decode_end(AVCodecContext *avctx)
  266. {
  267. BMVDecContext *c = avctx->priv_data;
  268. if (c->pic.data[0])
  269. avctx->release_buffer(avctx, &c->pic);
  270. return 0;
  271. }
  272. typedef struct BMVAudioDecContext {
  273. AVFrame frame;
  274. } BMVAudioDecContext;
  275. static const int bmv_aud_mults[16] = {
  276. 16512, 8256, 4128, 2064, 1032, 516, 258, 192, 129, 88, 64, 56, 48, 40, 36, 32
  277. };
  278. static av_cold int bmv_aud_decode_init(AVCodecContext *avctx)
  279. {
  280. BMVAudioDecContext *c = avctx->priv_data;
  281. if (avctx->channels != 2) {
  282. av_log(avctx, AV_LOG_INFO, "invalid number of channels\n");
  283. return AVERROR(EINVAL);
  284. }
  285. avctx->sample_fmt = AV_SAMPLE_FMT_S16;
  286. avcodec_get_frame_defaults(&c->frame);
  287. avctx->coded_frame = &c->frame;
  288. return 0;
  289. }
  290. static int bmv_aud_decode_frame(AVCodecContext *avctx, void *data,
  291. int *got_frame_ptr, AVPacket *avpkt)
  292. {
  293. BMVAudioDecContext *c = avctx->priv_data;
  294. const uint8_t *buf = avpkt->data;
  295. int buf_size = avpkt->size;
  296. int blocks = 0, total_blocks, i;
  297. int ret;
  298. int16_t *output_samples;
  299. int scale[2];
  300. total_blocks = *buf++;
  301. if (buf_size < total_blocks * 65 + 1) {
  302. av_log(avctx, AV_LOG_ERROR, "expected %d bytes, got %d\n",
  303. total_blocks * 65 + 1, buf_size);
  304. return AVERROR_INVALIDDATA;
  305. }
  306. /* get output buffer */
  307. c->frame.nb_samples = total_blocks * 32;
  308. if ((ret = avctx->get_buffer(avctx, &c->frame)) < 0) {
  309. av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
  310. return ret;
  311. }
  312. output_samples = (int16_t *)c->frame.data[0];
  313. for (blocks = 0; blocks < total_blocks; blocks++) {
  314. uint8_t code = *buf++;
  315. code = (code >> 1) | (code << 7);
  316. scale[0] = bmv_aud_mults[code & 0xF];
  317. scale[1] = bmv_aud_mults[code >> 4];
  318. for (i = 0; i < 32; i++) {
  319. *output_samples++ = av_clip_int16((scale[0] * (int8_t)*buf++) >> 5);
  320. *output_samples++ = av_clip_int16((scale[1] * (int8_t)*buf++) >> 5);
  321. }
  322. }
  323. *got_frame_ptr = 1;
  324. *(AVFrame *)data = c->frame;
  325. return buf_size;
  326. }
  327. AVCodec ff_bmv_video_decoder = {
  328. .name = "bmv_video",
  329. .type = AVMEDIA_TYPE_VIDEO,
  330. .id = AV_CODEC_ID_BMV_VIDEO,
  331. .priv_data_size = sizeof(BMVDecContext),
  332. .init = decode_init,
  333. .close = decode_end,
  334. .decode = decode_frame,
  335. .capabilities = CODEC_CAP_DR1,
  336. .long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV video"),
  337. };
  338. AVCodec ff_bmv_audio_decoder = {
  339. .name = "bmv_audio",
  340. .type = AVMEDIA_TYPE_AUDIO,
  341. .id = AV_CODEC_ID_BMV_AUDIO,
  342. .priv_data_size = sizeof(BMVAudioDecContext),
  343. .init = bmv_aud_decode_init,
  344. .decode = bmv_aud_decode_frame,
  345. .capabilities = CODEC_CAP_DR1,
  346. .long_name = NULL_IF_CONFIG_SMALL("Discworld II BMV audio"),
  347. };