You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

362 lines
11KB

  1. /*
  2. * Fraps FPS1 decoder
  3. * Copyright (c) 2005 Roine Gustafsson
  4. * Copyright (c) 2006 Konstantin Shishkov
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Lossless Fraps 'FPS1' decoder
  25. * @author Roine Gustafsson (roine at users sf net)
  26. * @author Konstantin Shishkov
  27. *
  28. * Codec algorithm for version 0 is taken from Transcode <www.transcoding.org>
  29. *
  30. * Version 2 files support by Konstantin Shishkov
  31. */
  32. #include "avcodec.h"
  33. #include "get_bits.h"
  34. #include "huffman.h"
  35. #include "bytestream.h"
  36. #include "dsputil.h"
  37. #define FPS_TAG MKTAG('F', 'P', 'S', 'x')
  38. /**
  39. * local variable storage
  40. */
  41. typedef struct FrapsContext{
  42. AVCodecContext *avctx;
  43. AVFrame frame;
  44. uint8_t *tmpbuf;
  45. int tmpbuf_size;
  46. DSPContext dsp;
  47. } FrapsContext;
  48. /**
  49. * initializes decoder
  50. * @param avctx codec context
  51. * @return 0 on success or negative if fails
  52. */
  53. static av_cold int decode_init(AVCodecContext *avctx)
  54. {
  55. FrapsContext * const s = avctx->priv_data;
  56. avcodec_get_frame_defaults(&s->frame);
  57. avctx->coded_frame = (AVFrame*)&s->frame;
  58. s->avctx = avctx;
  59. s->tmpbuf = NULL;
  60. dsputil_init(&s->dsp, avctx);
  61. return 0;
  62. }
  63. /**
  64. * Comparator - our nodes should ascend by count
  65. * but with preserved symbol order
  66. */
  67. static int huff_cmp(const void *va, const void *vb){
  68. const Node *a = va, *b = vb;
  69. return (a->count - b->count)*256 + a->sym - b->sym;
  70. }
  71. /**
  72. * decode Fraps v2 packed plane
  73. */
  74. static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w,
  75. int h, const uint8_t *src, int size, int Uoff,
  76. const int step)
  77. {
  78. int i, j;
  79. GetBitContext gb;
  80. VLC vlc;
  81. Node nodes[512];
  82. for(i = 0; i < 256; i++)
  83. nodes[i].count = bytestream_get_le32(&src);
  84. size -= 1024;
  85. if (ff_huff_build_tree(s->avctx, &vlc, 256, nodes, huff_cmp,
  86. FF_HUFFMAN_FLAG_ZERO_COUNT) < 0)
  87. return -1;
  88. /* we have built Huffman table and are ready to decode plane */
  89. /* convert bits so they may be used by standard bitreader */
  90. s->dsp.bswap_buf((uint32_t *)s->tmpbuf, (const uint32_t *)src, size >> 2);
  91. init_get_bits(&gb, s->tmpbuf, size * 8);
  92. for(j = 0; j < h; j++){
  93. for(i = 0; i < w*step; i += step){
  94. dst[i] = get_vlc2(&gb, vlc.table, 9, 3);
  95. /* lines are stored as deltas between previous lines
  96. * and we need to add 0x80 to the first lines of chroma planes
  97. */
  98. if(j) dst[i] += dst[i - stride];
  99. else if(Uoff) dst[i] += 0x80;
  100. if (get_bits_left(&gb) < 0) {
  101. free_vlc(&vlc);
  102. return AVERROR_INVALIDDATA;
  103. }
  104. }
  105. dst += stride;
  106. }
  107. free_vlc(&vlc);
  108. return 0;
  109. }
  110. static int decode_frame(AVCodecContext *avctx,
  111. void *data, int *data_size,
  112. AVPacket *avpkt)
  113. {
  114. const uint8_t *buf = avpkt->data;
  115. int buf_size = avpkt->size;
  116. FrapsContext * const s = avctx->priv_data;
  117. AVFrame *frame = data;
  118. AVFrame * const f = (AVFrame*)&s->frame;
  119. uint32_t header;
  120. unsigned int version,header_size;
  121. unsigned int x, y;
  122. const uint32_t *buf32;
  123. uint32_t *luma1,*luma2,*cb,*cr;
  124. uint32_t offs[4];
  125. int i, j, is_chroma;
  126. const int planes = 3;
  127. header = AV_RL32(buf);
  128. version = header & 0xff;
  129. header_size = (header & (1<<30))? 8 : 4; /* bit 30 means pad to 8 bytes */
  130. if (version > 5) {
  131. av_log(avctx, AV_LOG_ERROR,
  132. "This file is encoded with Fraps version %d. " \
  133. "This codec can only decode versions <= 5.\n", version);
  134. return -1;
  135. }
  136. buf += header_size;
  137. f->pict_type = AV_PICTURE_TYPE_I;
  138. f->key_frame = 1;
  139. f->reference = 3;
  140. f->buffer_hints = FF_BUFFER_HINTS_VALID |
  141. FF_BUFFER_HINTS_PRESERVE |
  142. FF_BUFFER_HINTS_REUSABLE;
  143. switch(version) {
  144. case 0:
  145. default:
  146. /* Fraps v0 is a reordered YUV420 */
  147. avctx->pix_fmt = PIX_FMT_YUVJ420P;
  148. if ( buf_size != avctx->width*avctx->height*3/2+header_size &&
  149. buf_size != header_size ) {
  150. av_log(avctx, AV_LOG_ERROR,
  151. "Invalid frame length %d (should be %d)\n",
  152. buf_size, avctx->width*avctx->height*3/2+header_size);
  153. return -1;
  154. }
  155. if ( (avctx->width % 8) != 0 || (avctx->height % 2) != 0 ) {
  156. av_log(avctx, AV_LOG_ERROR, "Invalid frame size %dx%d\n",
  157. avctx->width, avctx->height);
  158. return -1;
  159. }
  160. if (avctx->reget_buffer(avctx, f)) {
  161. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  162. return -1;
  163. }
  164. /* bit 31 means same as previous pic */
  165. if (header & (1U<<31)) {
  166. f->pict_type = AV_PICTURE_TYPE_P;
  167. f->key_frame = 0;
  168. } else {
  169. buf32=(const uint32_t*)buf;
  170. for(y=0; y<avctx->height/2; y++){
  171. luma1=(uint32_t*)&f->data[0][ y*2*f->linesize[0] ];
  172. luma2=(uint32_t*)&f->data[0][ (y*2+1)*f->linesize[0] ];
  173. cr=(uint32_t*)&f->data[1][ y*f->linesize[1] ];
  174. cb=(uint32_t*)&f->data[2][ y*f->linesize[2] ];
  175. for(x=0; x<avctx->width; x+=8){
  176. *luma1++ = *buf32++;
  177. *luma1++ = *buf32++;
  178. *luma2++ = *buf32++;
  179. *luma2++ = *buf32++;
  180. *cr++ = *buf32++;
  181. *cb++ = *buf32++;
  182. }
  183. }
  184. }
  185. break;
  186. case 1:
  187. /* Fraps v1 is an upside-down BGR24 */
  188. avctx->pix_fmt = PIX_FMT_BGR24;
  189. if ( buf_size != avctx->width*avctx->height*3+header_size &&
  190. buf_size != header_size ) {
  191. av_log(avctx, AV_LOG_ERROR,
  192. "Invalid frame length %d (should be %d)\n",
  193. buf_size, avctx->width*avctx->height*3+header_size);
  194. return -1;
  195. }
  196. if (avctx->reget_buffer(avctx, f)) {
  197. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  198. return -1;
  199. }
  200. /* bit 31 means same as previous pic */
  201. if (header & (1U<<31)) {
  202. f->pict_type = AV_PICTURE_TYPE_P;
  203. f->key_frame = 0;
  204. } else {
  205. for(y=0; y<avctx->height; y++)
  206. memcpy(&f->data[0][ (avctx->height-y)*f->linesize[0] ],
  207. &buf[y*avctx->width*3],
  208. 3*avctx->width);
  209. }
  210. break;
  211. case 2:
  212. case 4:
  213. /**
  214. * Fraps v2 is Huffman-coded YUV420 planes
  215. * Fraps v4 is virtually the same
  216. */
  217. avctx->pix_fmt = PIX_FMT_YUVJ420P;
  218. if (avctx->reget_buffer(avctx, f)) {
  219. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  220. return -1;
  221. }
  222. /* skip frame */
  223. if(buf_size == 8) {
  224. f->pict_type = AV_PICTURE_TYPE_P;
  225. f->key_frame = 0;
  226. break;
  227. }
  228. if (AV_RL32(buf) != FPS_TAG || buf_size < planes*1024 + 24) {
  229. av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
  230. return -1;
  231. }
  232. for(i = 0; i < planes; i++) {
  233. offs[i] = AV_RL32(buf + 4 + i * 4);
  234. if(offs[i] >= buf_size || (i && offs[i] <= offs[i - 1] + 1024)) {
  235. av_log(avctx, AV_LOG_ERROR, "Fraps: plane %i offset is out of bounds\n", i);
  236. return -1;
  237. }
  238. }
  239. offs[planes] = buf_size;
  240. for(i = 0; i < planes; i++){
  241. is_chroma = !!i;
  242. av_fast_padded_malloc(&s->tmpbuf, &s->tmpbuf_size, offs[i + 1] - offs[i] - 1024);
  243. if (!s->tmpbuf)
  244. return AVERROR(ENOMEM);
  245. if(fraps2_decode_plane(s, f->data[i], f->linesize[i], avctx->width >> is_chroma,
  246. avctx->height >> is_chroma, buf + offs[i], offs[i + 1] - offs[i], is_chroma, 1) < 0) {
  247. av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
  248. return -1;
  249. }
  250. }
  251. break;
  252. case 3:
  253. case 5:
  254. /* Virtually the same as version 4, but is for RGB24 */
  255. avctx->pix_fmt = PIX_FMT_BGR24;
  256. if (avctx->reget_buffer(avctx, f)) {
  257. av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
  258. return -1;
  259. }
  260. /* skip frame */
  261. if(buf_size == 8) {
  262. f->pict_type = AV_PICTURE_TYPE_P;
  263. f->key_frame = 0;
  264. break;
  265. }
  266. if (AV_RL32(buf) != FPS_TAG || buf_size < planes*1024 + 24) {
  267. av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
  268. return -1;
  269. }
  270. for(i = 0; i < planes; i++) {
  271. offs[i] = AV_RL32(buf + 4 + i * 4);
  272. if(offs[i] >= buf_size || (i && offs[i] <= offs[i - 1] + 1024)) {
  273. av_log(avctx, AV_LOG_ERROR, "Fraps: plane %i offset is out of bounds\n", i);
  274. return -1;
  275. }
  276. }
  277. offs[planes] = buf_size;
  278. for(i = 0; i < planes; i++){
  279. av_fast_padded_malloc(&s->tmpbuf, &s->tmpbuf_size, offs[i + 1] - offs[i] - 1024);
  280. if (!s->tmpbuf)
  281. return AVERROR(ENOMEM);
  282. if(fraps2_decode_plane(s, f->data[0] + i + (f->linesize[0] * (avctx->height - 1)), -f->linesize[0],
  283. avctx->width, avctx->height, buf + offs[i], offs[i + 1] - offs[i], 0, 3) < 0) {
  284. av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
  285. return -1;
  286. }
  287. }
  288. // convert pseudo-YUV into real RGB
  289. for(j = 0; j < avctx->height; j++){
  290. for(i = 0; i < avctx->width; i++){
  291. f->data[0][0 + i*3 + j*f->linesize[0]] += f->data[0][1 + i*3 + j*f->linesize[0]];
  292. f->data[0][2 + i*3 + j*f->linesize[0]] += f->data[0][1 + i*3 + j*f->linesize[0]];
  293. }
  294. }
  295. break;
  296. }
  297. *frame = *f;
  298. *data_size = sizeof(AVFrame);
  299. return buf_size;
  300. }
  301. /**
  302. * closes decoder
  303. * @param avctx codec context
  304. * @return 0 on success or negative if fails
  305. */
  306. static av_cold int decode_end(AVCodecContext *avctx)
  307. {
  308. FrapsContext *s = (FrapsContext*)avctx->priv_data;
  309. if (s->frame.data[0])
  310. avctx->release_buffer(avctx, &s->frame);
  311. av_freep(&s->tmpbuf);
  312. return 0;
  313. }
  314. AVCodec ff_fraps_decoder = {
  315. .name = "fraps",
  316. .type = AVMEDIA_TYPE_VIDEO,
  317. .id = CODEC_ID_FRAPS,
  318. .priv_data_size = sizeof(FrapsContext),
  319. .init = decode_init,
  320. .close = decode_end,
  321. .decode = decode_frame,
  322. .capabilities = CODEC_CAP_DR1,
  323. .long_name = NULL_IF_CONFIG_SMALL("Fraps"),
  324. };