You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

387 lines
13KB

  1. /*
  2. * Fraps FPS1 decoder
  3. * Copyright (c) 2005 Roine Gustafsson
  4. * Copyright (c) 2006 Konstantin Shishkov
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Lossless Fraps 'FPS1' decoder
  25. * @author Roine Gustafsson (roine at users sf net)
  26. * @author Konstantin Shishkov
  27. *
  28. * Codec algorithm for version 0 is taken from Transcode <www.transcoding.org>
  29. *
  30. * Version 2 files support by Konstantin Shishkov
  31. */
  32. #include "avcodec.h"
  33. #include "get_bits.h"
  34. #include "huffman.h"
  35. #include "bytestream.h"
  36. #include "bswapdsp.h"
  37. #include "internal.h"
  38. #define FPS_TAG MKTAG('F', 'P', 'S', 'x')
  39. /**
  40. * local variable storage
  41. */
  42. typedef struct FrapsContext {
  43. AVCodecContext *avctx;
  44. BswapDSPContext bdsp;
  45. AVFrame *frame;
  46. uint8_t *tmpbuf;
  47. int tmpbuf_size;
  48. } FrapsContext;
  49. /**
  50. * initializes decoder
  51. * @param avctx codec context
  52. * @return 0 on success or negative if fails
  53. */
  54. static av_cold int decode_init(AVCodecContext *avctx)
  55. {
  56. FrapsContext * const s = avctx->priv_data;
  57. avctx->pix_fmt = AV_PIX_FMT_NONE; /* set in decode_frame */
  58. s->avctx = avctx;
  59. s->tmpbuf = NULL;
  60. s->frame = av_frame_alloc();
  61. if (!s->frame)
  62. return AVERROR(ENOMEM);
  63. ff_bswapdsp_init(&s->bdsp);
  64. return 0;
  65. }
  66. /**
  67. * Comparator - our nodes should ascend by count
  68. * but with preserved symbol order
  69. */
  70. static int huff_cmp(const void *va, const void *vb)
  71. {
  72. const Node *a = va, *b = vb;
  73. return (a->count - b->count)*256 + a->sym - b->sym;
  74. }
  75. /**
  76. * decode Fraps v2 packed plane
  77. */
  78. static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
  79. int h, const uint8_t *src, int size, int Uoff,
  80. const int step)
  81. {
  82. int i, j, ret;
  83. GetBitContext gb;
  84. VLC vlc;
  85. Node nodes[512];
  86. for (i = 0; i < 256; i++)
  87. nodes[i].count = bytestream_get_le32(&src);
  88. size -= 1024;
  89. if ((ret = ff_huff_build_tree(s->avctx, &vlc, 256, nodes, huff_cmp,
  90. FF_HUFFMAN_FLAG_ZERO_COUNT)) < 0)
  91. return ret;
  92. /* we have built Huffman table and are ready to decode plane */
  93. /* convert bits so they may be used by standard bitreader */
  94. s->bdsp.bswap_buf((uint32_t *) s->tmpbuf,
  95. (const uint32_t *) src, size >> 2);
  96. init_get_bits(&gb, s->tmpbuf, size * 8);
  97. for (j = 0; j < h; j++) {
  98. for (i = 0; i < w*step; i += step) {
  99. dst[i] = get_vlc2(&gb, vlc.table, 9, 3);
  100. /* lines are stored as deltas between previous lines
  101. * and we need to add 0x80 to the first lines of chroma planes
  102. */
  103. if (j)
  104. dst[i] += dst[i - stride];
  105. else if (Uoff)
  106. dst[i] += 0x80;
  107. if (get_bits_left(&gb) < 0) {
  108. ff_free_vlc(&vlc);
  109. return AVERROR_INVALIDDATA;
  110. }
  111. }
  112. dst += stride;
  113. }
  114. ff_free_vlc(&vlc);
  115. return 0;
  116. }
  117. static int decode_frame(AVCodecContext *avctx,
  118. void *data, int *got_frame,
  119. AVPacket *avpkt)
  120. {
  121. FrapsContext * const s = avctx->priv_data;
  122. const uint8_t *buf = avpkt->data;
  123. int buf_size = avpkt->size;
  124. AVFrame *frame = data;
  125. AVFrame * const f = s->frame;
  126. uint32_t header;
  127. unsigned int version,header_size;
  128. unsigned int x, y;
  129. const uint32_t *buf32;
  130. uint32_t *luma1,*luma2,*cb,*cr;
  131. uint32_t offs[4];
  132. int i, j, ret, is_chroma, planes;
  133. enum AVPixelFormat pix_fmt;
  134. int prev_pic_bit, expected_size;
  135. if (buf_size < 4) {
  136. av_log(avctx, AV_LOG_ERROR, "Packet is too short\n");
  137. return AVERROR_INVALIDDATA;
  138. }
  139. header = AV_RL32(buf);
  140. version = header & 0xff;
  141. header_size = (header & (1<<30))? 8 : 4; /* bit 30 means pad to 8 bytes */
  142. prev_pic_bit = header & (1U << 31); /* bit 31 means same as previous pic */
  143. if (version > 5) {
  144. av_log(avctx, AV_LOG_ERROR,
  145. "This file is encoded with Fraps version %d. " \
  146. "This codec can only decode versions <= 5.\n", version);
  147. return AVERROR_PATCHWELCOME;
  148. }
  149. buf += 4;
  150. if (header_size == 8)
  151. buf += 4;
  152. pix_fmt = version & 1 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUVJ420P;
  153. if (avctx->pix_fmt != pix_fmt && f->data[0]) {
  154. av_frame_unref(f);
  155. }
  156. avctx->pix_fmt = pix_fmt;
  157. avctx->color_range = version & 1 ? AVCOL_RANGE_UNSPECIFIED
  158. : AVCOL_RANGE_JPEG;
  159. expected_size = header_size;
  160. switch (version) {
  161. case 0:
  162. default:
  163. /* Fraps v0 is a reordered YUV420 */
  164. if (!prev_pic_bit)
  165. expected_size += avctx->width * avctx->height * 3 / 2;
  166. if (buf_size != expected_size) {
  167. av_log(avctx, AV_LOG_ERROR,
  168. "Invalid frame length %d (should be %d)\n",
  169. buf_size, expected_size);
  170. return AVERROR_INVALIDDATA;
  171. }
  172. if (((avctx->width % 8) != 0) || ((avctx->height % 2) != 0)) {
  173. av_log(avctx, AV_LOG_ERROR, "Invalid frame size %dx%d\n",
  174. avctx->width, avctx->height);
  175. return AVERROR_INVALIDDATA;
  176. }
  177. if ((ret = ff_reget_buffer(avctx, f)) < 0) {
  178. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  179. return ret;
  180. }
  181. f->pict_type = prev_pic_bit ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
  182. f->key_frame = f->pict_type == AV_PICTURE_TYPE_I;
  183. if (f->pict_type == AV_PICTURE_TYPE_I) {
  184. buf32 = (const uint32_t*)buf;
  185. for (y = 0; y < avctx->height / 2; y++) {
  186. luma1 = (uint32_t*)&f->data[0][ y * 2 * f->linesize[0]];
  187. luma2 = (uint32_t*)&f->data[0][(y * 2 + 1) * f->linesize[0]];
  188. cr = (uint32_t*)&f->data[1][ y * f->linesize[1]];
  189. cb = (uint32_t*)&f->data[2][ y * f->linesize[2]];
  190. for (x = 0; x < avctx->width; x += 8) {
  191. *(luma1++) = *(buf32++);
  192. *(luma1++) = *(buf32++);
  193. *(luma2++) = *(buf32++);
  194. *(luma2++) = *(buf32++);
  195. *(cr++) = *(buf32++);
  196. *(cb++) = *(buf32++);
  197. }
  198. }
  199. }
  200. break;
  201. case 1:
  202. /* Fraps v1 is an upside-down BGR24 */
  203. if (!prev_pic_bit)
  204. expected_size += avctx->width * avctx->height * 3;
  205. if (buf_size != expected_size) {
  206. av_log(avctx, AV_LOG_ERROR,
  207. "Invalid frame length %d (should be %d)\n",
  208. buf_size, expected_size);
  209. return AVERROR_INVALIDDATA;
  210. }
  211. if ((ret = ff_reget_buffer(avctx, f)) < 0) {
  212. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  213. return ret;
  214. }
  215. f->pict_type = prev_pic_bit ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
  216. f->key_frame = f->pict_type == AV_PICTURE_TYPE_I;
  217. if (f->pict_type == AV_PICTURE_TYPE_I) {
  218. for (y = 0; y<avctx->height; y++)
  219. memcpy(&f->data[0][(avctx->height - y - 1) * f->linesize[0]],
  220. &buf[y * avctx->width * 3],
  221. 3 * avctx->width);
  222. }
  223. break;
  224. case 2:
  225. case 4:
  226. /**
  227. * Fraps v2 is Huffman-coded YUV420 planes
  228. * Fraps v4 is virtually the same
  229. */
  230. planes = 3;
  231. if ((ret = ff_reget_buffer(avctx, f)) < 0) {
  232. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  233. return ret;
  234. }
  235. /* skip frame */
  236. if (buf_size == 8) {
  237. f->pict_type = AV_PICTURE_TYPE_P;
  238. f->key_frame = 0;
  239. break;
  240. }
  241. f->pict_type = AV_PICTURE_TYPE_I;
  242. f->key_frame = 1;
  243. if ((AV_RL32(buf) != FPS_TAG) || (buf_size < (planes * 1024 + 24))) {
  244. av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
  245. return AVERROR_INVALIDDATA;
  246. }
  247. for (i = 0; i < planes; i++) {
  248. offs[i] = AV_RL32(buf + 4 + i * 4);
  249. if (offs[i] >= buf_size || (i && offs[i] <= offs[i - 1] + 1024)) {
  250. av_log(avctx, AV_LOG_ERROR, "Fraps: plane %i offset is out of bounds\n", i);
  251. return AVERROR_INVALIDDATA;
  252. }
  253. }
  254. offs[planes] = buf_size;
  255. for (i = 0; i < planes; i++) {
  256. is_chroma = !!i;
  257. av_fast_padded_malloc(&s->tmpbuf, &s->tmpbuf_size,
  258. offs[i + 1] - offs[i] - 1024);
  259. if (!s->tmpbuf)
  260. return AVERROR(ENOMEM);
  261. if ((ret = fraps2_decode_plane(s, f->data[i], f->linesize[i],
  262. avctx->width >> is_chroma,
  263. avctx->height >> is_chroma,
  264. buf + offs[i], offs[i + 1] - offs[i],
  265. is_chroma, 1)) < 0) {
  266. av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
  267. return ret;
  268. }
  269. }
  270. break;
  271. case 3:
  272. case 5:
  273. /* Virtually the same as version 4, but is for RGB24 */
  274. planes = 3;
  275. if ((ret = ff_reget_buffer(avctx, f)) < 0) {
  276. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  277. return ret;
  278. }
  279. /* skip frame */
  280. if (buf_size == 8) {
  281. f->pict_type = AV_PICTURE_TYPE_P;
  282. f->key_frame = 0;
  283. break;
  284. }
  285. f->pict_type = AV_PICTURE_TYPE_I;
  286. f->key_frame = 1;
  287. if ((AV_RL32(buf) != FPS_TAG)||(buf_size < (planes*1024 + 24))) {
  288. av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
  289. return AVERROR_INVALIDDATA;
  290. }
  291. for (i = 0; i < planes; i++) {
  292. offs[i] = AV_RL32(buf + 4 + i * 4);
  293. if (offs[i] >= buf_size || (i && offs[i] <= offs[i - 1] + 1024)) {
  294. av_log(avctx, AV_LOG_ERROR, "Fraps: plane %i offset is out of bounds\n", i);
  295. return AVERROR_INVALIDDATA;
  296. }
  297. }
  298. offs[planes] = buf_size;
  299. for (i = 0; i < planes; i++) {
  300. av_fast_padded_malloc(&s->tmpbuf, &s->tmpbuf_size,
  301. offs[i + 1] - offs[i] - 1024);
  302. if (!s->tmpbuf)
  303. return AVERROR(ENOMEM);
  304. if ((ret = fraps2_decode_plane(s, f->data[0] + i + (f->linesize[0] * (avctx->height - 1)),
  305. -f->linesize[0], avctx->width, avctx->height,
  306. buf + offs[i], offs[i + 1] - offs[i], 0, 3)) < 0) {
  307. av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
  308. return ret;
  309. }
  310. }
  311. // convert pseudo-YUV into real RGB
  312. for (j = 0; j < avctx->height; j++) {
  313. for (i = 0; i < avctx->width; i++) {
  314. f->data[0][0 + i*3 + j*f->linesize[0]] += f->data[0][1 + i*3 + j*f->linesize[0]];
  315. f->data[0][2 + i*3 + j*f->linesize[0]] += f->data[0][1 + i*3 + j*f->linesize[0]];
  316. }
  317. }
  318. break;
  319. }
  320. if ((ret = av_frame_ref(frame, f)) < 0)
  321. return ret;
  322. *got_frame = 1;
  323. return buf_size;
  324. }
  325. /**
  326. * closes decoder
  327. * @param avctx codec context
  328. * @return 0 on success or negative if fails
  329. */
  330. static av_cold int decode_end(AVCodecContext *avctx)
  331. {
  332. FrapsContext *s = (FrapsContext*)avctx->priv_data;
  333. av_frame_free(&s->frame);
  334. av_freep(&s->tmpbuf);
  335. return 0;
  336. }
  337. AVCodec ff_fraps_decoder = {
  338. .name = "fraps",
  339. .long_name = NULL_IF_CONFIG_SMALL("Fraps"),
  340. .type = AVMEDIA_TYPE_VIDEO,
  341. .id = AV_CODEC_ID_FRAPS,
  342. .priv_data_size = sizeof(FrapsContext),
  343. .init = decode_init,
  344. .close = decode_end,
  345. .decode = decode_frame,
  346. .capabilities = CODEC_CAP_DR1,
  347. };