You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

847 lines
27KB

  1. /*
  2. * SVQ1 decoder
  3. * ported to MPlayer by Arpi <arpi@thot.banki.hu>
  4. * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
  5. *
  6. * Copyright (C) 2002 the xine project
  7. * Copyright (C) 2002 the ffmpeg project
  8. *
  9. * SVQ1 Encoder (c) 2004 Mike Melanson <melanson@pcisys.net>
  10. *
  11. * This file is part of Libav.
  12. *
  13. * Libav is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2.1 of the License, or (at your option) any later version.
  17. *
  18. * Libav is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with Libav; if not, write to the Free Software
  25. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  26. */
  27. /**
  28. * @file
  29. * Sorenson Vector Quantizer #1 (SVQ1) video codec.
  30. * For more information of the SVQ1 algorithm, visit:
  31. * http://www.pcisys.net/~melanson/codecs/
  32. */
  33. #include "avcodec.h"
  34. #include "get_bits.h"
  35. #include "h263.h"
  36. #include "hpeldsp.h"
  37. #include "internal.h"
  38. #include "mathops.h"
  39. #include "svq1.h"
  40. #undef NDEBUG
  41. #include <assert.h>
  42. static VLC svq1_block_type;
  43. static VLC svq1_motion_component;
  44. static VLC svq1_intra_multistage[6];
  45. static VLC svq1_inter_multistage[6];
  46. static VLC svq1_intra_mean;
  47. static VLC svq1_inter_mean;
  48. /* motion vector (prediction) */
  49. typedef struct svq1_pmv_s {
  50. int x;
  51. int y;
  52. } svq1_pmv;
  53. typedef struct SVQ1Context {
  54. HpelDSPContext hdsp;
  55. GetBitContext gb;
  56. AVFrame *prev;
  57. uint8_t *pkt_swapped;
  58. int pkt_swapped_allocated;
  59. int width;
  60. int height;
  61. int frame_code;
  62. int nonref; // 1 if the current frame won't be referenced
  63. } SVQ1Context;
  64. static const uint8_t string_table[256] = {
  65. 0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54,
  66. 0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
  67. 0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06,
  68. 0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
  69. 0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0,
  70. 0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
  71. 0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2,
  72. 0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
  73. 0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9,
  74. 0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
  75. 0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B,
  76. 0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
  77. 0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D,
  78. 0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
  79. 0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F,
  80. 0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
  81. 0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB,
  82. 0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
  83. 0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9,
  84. 0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
  85. 0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F,
  86. 0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
  87. 0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D,
  88. 0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
  89. 0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26,
  90. 0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
  91. 0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74,
  92. 0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
  93. 0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82,
  94. 0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
  95. 0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0,
  96. 0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
  97. };
  98. #define SVQ1_PROCESS_VECTOR() \
  99. for (; level > 0; i++) { \
  100. /* process next depth */ \
  101. if (i == m) { \
  102. m = n; \
  103. if (--level == 0) \
  104. break; \
  105. } \
  106. /* divide block if next bit set */ \
  107. if (get_bits1(bitbuf) == 0) \
  108. break; \
  109. /* add child nodes */ \
  110. list[n++] = list[i]; \
  111. list[n++] = list[i] + \
  112. (((level & 1) ? pitch : 1) << (level / 2 + 1)); \
  113. }
  114. #define SVQ1_ADD_CODEBOOK() \
  115. /* add codebook entries to vector */ \
  116. for (j = 0; j < stages; j++) { \
  117. n3 = codebook[entries[j]] ^ 0x80808080; \
  118. n1 += (n3 & 0xFF00FF00) >> 8; \
  119. n2 += n3 & 0x00FF00FF; \
  120. } \
  121. \
  122. /* clip to [0..255] */ \
  123. if (n1 & 0xFF00FF00) { \
  124. n3 = (n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  125. n1 += 0x7F007F00; \
  126. n1 |= (~n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  127. n1 &= n3 & 0x00FF00FF; \
  128. } \
  129. \
  130. if (n2 & 0xFF00FF00) { \
  131. n3 = (n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  132. n2 += 0x7F007F00; \
  133. n2 |= (~n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  134. n2 &= n3 & 0x00FF00FF; \
  135. }
  136. #define SVQ1_CALC_CODEBOOK_ENTRIES(cbook) \
  137. codebook = (const uint32_t *)cbook[level]; \
  138. if (stages > 0) \
  139. bit_cache = get_bits(bitbuf, 4 * stages); \
  140. /* calculate codebook entries for this vector */ \
  141. for (j = 0; j < stages; j++) { \
  142. entries[j] = (((bit_cache >> (4 * (stages - j - 1))) & 0xF) + \
  143. 16 * j) << (level + 1); \
  144. } \
  145. mean -= stages * 128; \
  146. n4 = mean + (mean >> 31) << 16 | (mean & 0xFFFF);
  147. static int svq1_decode_block_intra(GetBitContext *bitbuf, uint8_t *pixels,
  148. int pitch)
  149. {
  150. uint32_t bit_cache;
  151. uint8_t *list[63];
  152. uint32_t *dst;
  153. const uint32_t *codebook;
  154. int entries[6];
  155. int i, j, m, n;
  156. int mean, stages;
  157. unsigned x, y, width, height, level;
  158. uint32_t n1, n2, n3, n4;
  159. /* initialize list for breadth first processing of vectors */
  160. list[0] = pixels;
  161. /* recursively process vector */
  162. for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
  163. SVQ1_PROCESS_VECTOR();
  164. /* destination address and vector size */
  165. dst = (uint32_t *)list[i];
  166. width = 1 << ((4 + level) / 2);
  167. height = 1 << ((3 + level) / 2);
  168. /* get number of stages (-1 skips vector, 0 for mean only) */
  169. stages = get_vlc2(bitbuf, svq1_intra_multistage[level].table, 3, 3) - 1;
  170. if (stages == -1) {
  171. for (y = 0; y < height; y++)
  172. memset(&dst[y * (pitch / 4)], 0, width);
  173. continue; /* skip vector */
  174. }
  175. if ((stages > 0 && level >= 4) || stages < 0) {
  176. ff_dlog(NULL,
  177. "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n",
  178. stages, level);
  179. return AVERROR_INVALIDDATA; /* invalid vector */
  180. }
  181. mean = get_vlc2(bitbuf, svq1_intra_mean.table, 8, 3);
  182. if (stages == 0) {
  183. for (y = 0; y < height; y++)
  184. memset(&dst[y * (pitch / 4)], mean, width);
  185. } else {
  186. SVQ1_CALC_CODEBOOK_ENTRIES(ff_svq1_intra_codebooks);
  187. for (y = 0; y < height; y++) {
  188. for (x = 0; x < width / 4; x++, codebook++) {
  189. n1 = n4;
  190. n2 = n4;
  191. SVQ1_ADD_CODEBOOK()
  192. /* store result */
  193. dst[x] = n1 << 8 | n2;
  194. }
  195. dst += pitch / 4;
  196. }
  197. }
  198. }
  199. return 0;
  200. }
  201. static int svq1_decode_block_non_intra(GetBitContext *bitbuf, uint8_t *pixels,
  202. int pitch)
  203. {
  204. uint32_t bit_cache;
  205. uint8_t *list[63];
  206. uint32_t *dst;
  207. const uint32_t *codebook;
  208. int entries[6];
  209. int i, j, m, n;
  210. int mean, stages;
  211. int x, y, width, height, level;
  212. uint32_t n1, n2, n3, n4;
  213. /* initialize list for breadth first processing of vectors */
  214. list[0] = pixels;
  215. /* recursively process vector */
  216. for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
  217. SVQ1_PROCESS_VECTOR();
  218. /* destination address and vector size */
  219. dst = (uint32_t *)list[i];
  220. width = 1 << ((4 + level) / 2);
  221. height = 1 << ((3 + level) / 2);
  222. /* get number of stages (-1 skips vector, 0 for mean only) */
  223. stages = get_vlc2(bitbuf, svq1_inter_multistage[level].table, 3, 2) - 1;
  224. if (stages == -1)
  225. continue; /* skip vector */
  226. if ((stages > 0 && level >= 4) || stages < 0) {
  227. ff_dlog(NULL,
  228. "Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n",
  229. stages, level);
  230. return AVERROR_INVALIDDATA; /* invalid vector */
  231. }
  232. mean = get_vlc2(bitbuf, svq1_inter_mean.table, 9, 3) - 256;
  233. SVQ1_CALC_CODEBOOK_ENTRIES(ff_svq1_inter_codebooks);
  234. for (y = 0; y < height; y++) {
  235. for (x = 0; x < width / 4; x++, codebook++) {
  236. n3 = dst[x];
  237. /* add mean value to vector */
  238. n1 = n4 + ((n3 & 0xFF00FF00) >> 8);
  239. n2 = n4 + (n3 & 0x00FF00FF);
  240. SVQ1_ADD_CODEBOOK()
  241. /* store result */
  242. dst[x] = n1 << 8 | n2;
  243. }
  244. dst += pitch / 4;
  245. }
  246. }
  247. return 0;
  248. }
  249. static int svq1_decode_motion_vector(GetBitContext *bitbuf, svq1_pmv *mv,
  250. svq1_pmv **pmv)
  251. {
  252. int diff;
  253. int i;
  254. for (i = 0; i < 2; i++) {
  255. /* get motion code */
  256. diff = get_vlc2(bitbuf, svq1_motion_component.table, 7, 2);
  257. if (diff < 0)
  258. return AVERROR_INVALIDDATA;
  259. else if (diff) {
  260. if (get_bits1(bitbuf))
  261. diff = -diff;
  262. }
  263. /* add median of motion vector predictors and clip result */
  264. if (i == 1)
  265. mv->y = sign_extend(diff + mid_pred(pmv[0]->y, pmv[1]->y, pmv[2]->y), 6);
  266. else
  267. mv->x = sign_extend(diff + mid_pred(pmv[0]->x, pmv[1]->x, pmv[2]->x), 6);
  268. }
  269. return 0;
  270. }
  271. static void svq1_skip_block(uint8_t *current, uint8_t *previous,
  272. int pitch, int x, int y)
  273. {
  274. uint8_t *src;
  275. uint8_t *dst;
  276. int i;
  277. src = &previous[x + y * pitch];
  278. dst = current;
  279. for (i = 0; i < 16; i++) {
  280. memcpy(dst, src, 16);
  281. src += pitch;
  282. dst += pitch;
  283. }
  284. }
  285. static int svq1_motion_inter_block(HpelDSPContext *hdsp, GetBitContext *bitbuf,
  286. uint8_t *current, uint8_t *previous,
  287. int pitch, svq1_pmv *motion, int x, int y,
  288. int width, int height)
  289. {
  290. uint8_t *src;
  291. uint8_t *dst;
  292. svq1_pmv mv;
  293. svq1_pmv *pmv[3];
  294. int result;
  295. /* predict and decode motion vector */
  296. pmv[0] = &motion[0];
  297. if (y == 0) {
  298. pmv[1] =
  299. pmv[2] = pmv[0];
  300. } else {
  301. pmv[1] = &motion[x / 8 + 2];
  302. pmv[2] = &motion[x / 8 + 4];
  303. }
  304. result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
  305. if (result != 0)
  306. return result;
  307. motion[0].x =
  308. motion[x / 8 + 2].x =
  309. motion[x / 8 + 3].x = mv.x;
  310. motion[0].y =
  311. motion[x / 8 + 2].y =
  312. motion[x / 8 + 3].y = mv.y;
  313. mv.x = av_clip(mv.x, -2 * x, 2 * (width - x - 16));
  314. mv.y = av_clip(mv.y, -2 * y, 2 * (height - y - 16));
  315. src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1)) * pitch];
  316. dst = current;
  317. hdsp->put_pixels_tab[0][(mv.y & 1) << 1 | (mv.x & 1)](dst, src, pitch, 16);
  318. return 0;
  319. }
  320. static int svq1_motion_inter_4v_block(HpelDSPContext *hdsp, GetBitContext *bitbuf,
  321. uint8_t *current, uint8_t *previous,
  322. int pitch, svq1_pmv *motion, int x, int y,
  323. int width, int height)
  324. {
  325. uint8_t *src;
  326. uint8_t *dst;
  327. svq1_pmv mv;
  328. svq1_pmv *pmv[4];
  329. int i, result;
  330. /* predict and decode motion vector (0) */
  331. pmv[0] = &motion[0];
  332. if (y == 0) {
  333. pmv[1] =
  334. pmv[2] = pmv[0];
  335. } else {
  336. pmv[1] = &motion[(x / 8) + 2];
  337. pmv[2] = &motion[(x / 8) + 4];
  338. }
  339. result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
  340. if (result != 0)
  341. return result;
  342. /* predict and decode motion vector (1) */
  343. pmv[0] = &mv;
  344. if (y == 0) {
  345. pmv[1] =
  346. pmv[2] = pmv[0];
  347. } else {
  348. pmv[1] = &motion[(x / 8) + 3];
  349. }
  350. result = svq1_decode_motion_vector(bitbuf, &motion[0], pmv);
  351. if (result != 0)
  352. return result;
  353. /* predict and decode motion vector (2) */
  354. pmv[1] = &motion[0];
  355. pmv[2] = &motion[(x / 8) + 1];
  356. result = svq1_decode_motion_vector(bitbuf, &motion[(x / 8) + 2], pmv);
  357. if (result != 0)
  358. return result;
  359. /* predict and decode motion vector (3) */
  360. pmv[2] = &motion[(x / 8) + 2];
  361. pmv[3] = &motion[(x / 8) + 3];
  362. result = svq1_decode_motion_vector(bitbuf, pmv[3], pmv);
  363. if (result != 0)
  364. return result;
  365. /* form predictions */
  366. for (i = 0; i < 4; i++) {
  367. int mvx = pmv[i]->x + (i & 1) * 16;
  368. int mvy = pmv[i]->y + (i >> 1) * 16;
  369. // FIXME: clipping or padding?
  370. mvx = av_clip(mvx, -2 * x, 2 * (width - x - 8));
  371. mvy = av_clip(mvy, -2 * y, 2 * (height - y - 8));
  372. src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1)) * pitch];
  373. dst = current;
  374. hdsp->put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst, src, pitch, 8);
  375. /* select next block */
  376. if (i & 1)
  377. current += 8 * (pitch - 1);
  378. else
  379. current += 8;
  380. }
  381. return 0;
  382. }
  383. static int svq1_decode_delta_block(AVCodecContext *avctx, HpelDSPContext *hdsp,
  384. GetBitContext *bitbuf,
  385. uint8_t *current, uint8_t *previous,
  386. int pitch, svq1_pmv *motion, int x, int y,
  387. int width, int height)
  388. {
  389. uint32_t block_type;
  390. int result = 0;
  391. /* get block type */
  392. block_type = get_vlc2(bitbuf, svq1_block_type.table, 2, 2);
  393. /* reset motion vectors */
  394. if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) {
  395. motion[0].x =
  396. motion[0].y =
  397. motion[x / 8 + 2].x =
  398. motion[x / 8 + 2].y =
  399. motion[x / 8 + 3].x =
  400. motion[x / 8 + 3].y = 0;
  401. }
  402. switch (block_type) {
  403. case SVQ1_BLOCK_SKIP:
  404. svq1_skip_block(current, previous, pitch, x, y);
  405. break;
  406. case SVQ1_BLOCK_INTER:
  407. result = svq1_motion_inter_block(hdsp, bitbuf, current, previous,
  408. pitch, motion, x, y, width, height);
  409. if (result != 0) {
  410. ff_dlog(avctx, "Error in svq1_motion_inter_block %i\n", result);
  411. break;
  412. }
  413. result = svq1_decode_block_non_intra(bitbuf, current, pitch);
  414. break;
  415. case SVQ1_BLOCK_INTER_4V:
  416. result = svq1_motion_inter_4v_block(hdsp, bitbuf, current, previous,
  417. pitch, motion, x, y, width, height);
  418. if (result != 0) {
  419. ff_dlog(avctx, "Error in svq1_motion_inter_4v_block %i\n", result);
  420. break;
  421. }
  422. result = svq1_decode_block_non_intra(bitbuf, current, pitch);
  423. break;
  424. case SVQ1_BLOCK_INTRA:
  425. result = svq1_decode_block_intra(bitbuf, current, pitch);
  426. break;
  427. }
  428. return result;
  429. }
  430. static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
  431. {
  432. uint8_t seed;
  433. int i;
  434. out[0] = get_bits(bitbuf, 8);
  435. seed = string_table[out[0]];
  436. for (i = 1; i <= out[0]; i++) {
  437. out[i] = get_bits(bitbuf, 8) ^ seed;
  438. seed = string_table[out[i] ^ seed];
  439. }
  440. }
  441. static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
  442. {
  443. SVQ1Context *s = avctx->priv_data;
  444. GetBitContext *bitbuf = &s->gb;
  445. int frame_size_code;
  446. skip_bits(bitbuf, 8); /* temporal_reference */
  447. /* frame type */
  448. s->nonref = 0;
  449. switch (get_bits(bitbuf, 2)) {
  450. case 0:
  451. frame->pict_type = AV_PICTURE_TYPE_I;
  452. break;
  453. case 2:
  454. s->nonref = 1;
  455. case 1:
  456. frame->pict_type = AV_PICTURE_TYPE_P;
  457. break;
  458. default:
  459. av_log(avctx, AV_LOG_ERROR, "Invalid frame type.\n");
  460. return AVERROR_INVALIDDATA;
  461. }
  462. if (frame->pict_type == AV_PICTURE_TYPE_I) {
  463. /* unknown fields */
  464. if (s->frame_code == 0x50 || s->frame_code == 0x60) {
  465. int csum = get_bits(bitbuf, 16);
  466. csum = ff_svq1_packet_checksum(bitbuf->buffer,
  467. bitbuf->size_in_bits >> 3,
  468. csum);
  469. ff_dlog(avctx, "%s checksum (%02x) for packet data\n",
  470. (csum == 0) ? "correct" : "incorrect", csum);
  471. }
  472. if ((s->frame_code ^ 0x10) >= 0x50) {
  473. uint8_t msg[256];
  474. svq1_parse_string(bitbuf, msg);
  475. av_log(avctx, AV_LOG_INFO,
  476. "embedded message: \"%s\"\n", (char *)msg);
  477. }
  478. skip_bits(bitbuf, 2);
  479. skip_bits(bitbuf, 2);
  480. skip_bits1(bitbuf);
  481. /* load frame size */
  482. frame_size_code = get_bits(bitbuf, 3);
  483. if (frame_size_code == 7) {
  484. /* load width, height (12 bits each) */
  485. s->width = get_bits(bitbuf, 12);
  486. s->height = get_bits(bitbuf, 12);
  487. if (!s->width || !s->height)
  488. return AVERROR_INVALIDDATA;
  489. } else {
  490. /* get width, height from table */
  491. s->width = ff_svq1_frame_size_table[frame_size_code][0];
  492. s->height = ff_svq1_frame_size_table[frame_size_code][1];
  493. }
  494. }
  495. /* unknown fields */
  496. if (get_bits1(bitbuf) == 1) {
  497. skip_bits1(bitbuf); /* use packet checksum if (1) */
  498. skip_bits1(bitbuf); /* component checksums after image data if (1) */
  499. if (get_bits(bitbuf, 2) != 0)
  500. return AVERROR_INVALIDDATA;
  501. }
  502. if (get_bits1(bitbuf) == 1) {
  503. skip_bits1(bitbuf);
  504. skip_bits(bitbuf, 4);
  505. skip_bits1(bitbuf);
  506. skip_bits(bitbuf, 2);
  507. while (get_bits1(bitbuf) == 1)
  508. skip_bits(bitbuf, 8);
  509. }
  510. return 0;
  511. }
  512. static int svq1_decode_frame(AVCodecContext *avctx, void *data,
  513. int *got_frame, AVPacket *avpkt)
  514. {
  515. const uint8_t *buf = avpkt->data;
  516. int buf_size = avpkt->size;
  517. SVQ1Context *s = avctx->priv_data;
  518. AVFrame *cur = data;
  519. uint8_t *current;
  520. int result, i, x, y, width, height;
  521. svq1_pmv *pmv;
  522. /* initialize bit buffer */
  523. init_get_bits(&s->gb, buf, buf_size * 8);
  524. /* decode frame header */
  525. s->frame_code = get_bits(&s->gb, 22);
  526. if ((s->frame_code & ~0x70) || !(s->frame_code & 0x60))
  527. return AVERROR_INVALIDDATA;
  528. /* swap some header bytes (why?) */
  529. if (s->frame_code != 0x20) {
  530. uint32_t *src;
  531. if (buf_size < 9 * 4) {
  532. av_log(avctx, AV_LOG_ERROR, "Input packet too small\n");
  533. return AVERROR_INVALIDDATA;
  534. }
  535. av_fast_padded_malloc(&s->pkt_swapped,
  536. &s->pkt_swapped_allocated,
  537. buf_size);
  538. if (!s->pkt_swapped)
  539. return AVERROR(ENOMEM);
  540. memcpy(s->pkt_swapped, buf, buf_size);
  541. buf = s->pkt_swapped;
  542. init_get_bits(&s->gb, buf, buf_size * 8);
  543. skip_bits(&s->gb, 22);
  544. src = (uint32_t *)(s->pkt_swapped + 4);
  545. for (i = 0; i < 4; i++)
  546. src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
  547. }
  548. result = svq1_decode_frame_header(avctx, cur);
  549. if (result != 0) {
  550. ff_dlog(avctx, "Error in svq1_decode_frame_header %i\n", result);
  551. return result;
  552. }
  553. result = ff_set_dimensions(avctx, s->width, s->height);
  554. if (result < 0)
  555. return result;
  556. if ((avctx->skip_frame >= AVDISCARD_NONREF && s->nonref) ||
  557. (avctx->skip_frame >= AVDISCARD_NONKEY &&
  558. cur->pict_type != AV_PICTURE_TYPE_I) ||
  559. avctx->skip_frame >= AVDISCARD_ALL)
  560. return buf_size;
  561. result = ff_get_buffer(avctx, cur, s->nonref ? 0 : AV_GET_BUFFER_FLAG_REF);
  562. if (result < 0)
  563. return result;
  564. pmv = av_malloc((FFALIGN(s->width, 16) / 8 + 3) * sizeof(*pmv));
  565. if (!pmv)
  566. return AVERROR(ENOMEM);
  567. /* decode y, u and v components */
  568. for (i = 0; i < 3; i++) {
  569. int linesize = cur->linesize[i];
  570. if (i == 0) {
  571. width = FFALIGN(s->width, 16);
  572. height = FFALIGN(s->height, 16);
  573. } else {
  574. if (avctx->flags & CODEC_FLAG_GRAY)
  575. break;
  576. width = FFALIGN(s->width / 4, 16);
  577. height = FFALIGN(s->height / 4, 16);
  578. }
  579. current = cur->data[i];
  580. if (cur->pict_type == AV_PICTURE_TYPE_I) {
  581. /* keyframe */
  582. for (y = 0; y < height; y += 16) {
  583. for (x = 0; x < width; x += 16) {
  584. result = svq1_decode_block_intra(&s->gb, &current[x],
  585. linesize);
  586. if (result != 0) {
  587. av_log(avctx, AV_LOG_INFO,
  588. "Error in svq1_decode_block %i (keyframe)\n",
  589. result);
  590. goto err;
  591. }
  592. }
  593. current += 16 * linesize;
  594. }
  595. } else {
  596. /* delta frame */
  597. uint8_t *previous = s->prev->data[i];
  598. if (!previous ||
  599. s->prev->width != s->width || s->prev->height != s->height) {
  600. av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
  601. result = AVERROR_INVALIDDATA;
  602. goto err;
  603. }
  604. memset(pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv));
  605. for (y = 0; y < height; y += 16) {
  606. for (x = 0; x < width; x += 16) {
  607. result = svq1_decode_delta_block(avctx, &s->hdsp,
  608. &s->gb, &current[x],
  609. previous, linesize,
  610. pmv, x, y, width, height);
  611. if (result != 0) {
  612. ff_dlog(avctx,
  613. "Error in svq1_decode_delta_block %i\n",
  614. result);
  615. goto err;
  616. }
  617. }
  618. pmv[0].x =
  619. pmv[0].y = 0;
  620. current += 16 * linesize;
  621. }
  622. }
  623. }
  624. if (!s->nonref) {
  625. av_frame_unref(s->prev);
  626. result = av_frame_ref(s->prev, cur);
  627. if (result < 0)
  628. goto err;
  629. }
  630. *got_frame = 1;
  631. result = buf_size;
  632. err:
  633. av_free(pmv);
  634. return result;
  635. }
  636. static av_cold int svq1_decode_init(AVCodecContext *avctx)
  637. {
  638. SVQ1Context *s = avctx->priv_data;
  639. int i;
  640. int offset = 0;
  641. s->prev = av_frame_alloc();
  642. if (!s->prev)
  643. return AVERROR(ENOMEM);
  644. s->width = avctx->width + 3 & ~3;
  645. s->height = avctx->height + 3 & ~3;
  646. avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  647. ff_hpeldsp_init(&s->hdsp, avctx->flags);
  648. INIT_VLC_STATIC(&svq1_block_type, 2, 4,
  649. &ff_svq1_block_type_vlc[0][1], 2, 1,
  650. &ff_svq1_block_type_vlc[0][0], 2, 1, 6);
  651. INIT_VLC_STATIC(&svq1_motion_component, 7, 33,
  652. &ff_mvtab[0][1], 2, 1,
  653. &ff_mvtab[0][0], 2, 1, 176);
  654. for (i = 0; i < 6; i++) {
  655. static const uint8_t sizes[2][6] = { { 14, 10, 14, 18, 16, 18 },
  656. { 10, 10, 14, 14, 14, 16 } };
  657. static VLC_TYPE table[168][2];
  658. svq1_intra_multistage[i].table = &table[offset];
  659. svq1_intra_multistage[i].table_allocated = sizes[0][i];
  660. offset += sizes[0][i];
  661. init_vlc(&svq1_intra_multistage[i], 3, 8,
  662. &ff_svq1_intra_multistage_vlc[i][0][1], 2, 1,
  663. &ff_svq1_intra_multistage_vlc[i][0][0], 2, 1,
  664. INIT_VLC_USE_NEW_STATIC);
  665. svq1_inter_multistage[i].table = &table[offset];
  666. svq1_inter_multistage[i].table_allocated = sizes[1][i];
  667. offset += sizes[1][i];
  668. init_vlc(&svq1_inter_multistage[i], 3, 8,
  669. &ff_svq1_inter_multistage_vlc[i][0][1], 2, 1,
  670. &ff_svq1_inter_multistage_vlc[i][0][0], 2, 1,
  671. INIT_VLC_USE_NEW_STATIC);
  672. }
  673. INIT_VLC_STATIC(&svq1_intra_mean, 8, 256,
  674. &ff_svq1_intra_mean_vlc[0][1], 4, 2,
  675. &ff_svq1_intra_mean_vlc[0][0], 4, 2, 632);
  676. INIT_VLC_STATIC(&svq1_inter_mean, 9, 512,
  677. &ff_svq1_inter_mean_vlc[0][1], 4, 2,
  678. &ff_svq1_inter_mean_vlc[0][0], 4, 2, 1434);
  679. return 0;
  680. }
  681. static av_cold int svq1_decode_end(AVCodecContext *avctx)
  682. {
  683. SVQ1Context *s = avctx->priv_data;
  684. av_frame_free(&s->prev);
  685. av_freep(&s->pkt_swapped);
  686. return 0;
  687. }
  688. static void svq1_flush(AVCodecContext *avctx)
  689. {
  690. SVQ1Context *s = avctx->priv_data;
  691. av_frame_unref(s->prev);
  692. }
  693. AVCodec ff_svq1_decoder = {
  694. .name = "svq1",
  695. .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
  696. .type = AVMEDIA_TYPE_VIDEO,
  697. .id = AV_CODEC_ID_SVQ1,
  698. .priv_data_size = sizeof(SVQ1Context),
  699. .init = svq1_decode_init,
  700. .close = svq1_decode_end,
  701. .decode = svq1_decode_frame,
  702. .capabilities = CODEC_CAP_DR1,
  703. .flush = svq1_flush,
  704. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
  705. AV_PIX_FMT_NONE },
  706. };