You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

799 lines
27KB

  1. /*
  2. * SVQ1 decoder
  3. * ported to MPlayer by Arpi <arpi@thot.banki.hu>
  4. * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
  5. *
  6. * Copyright (C) 2002 the xine project
  7. * Copyright (C) 2002 the ffmpeg project
  8. *
  9. * SVQ1 Encoder (c) 2004 Mike Melanson <melanson@pcisys.net>
  10. *
  11. * This file is part of Libav.
  12. *
  13. * Libav is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2.1 of the License, or (at your option) any later version.
  17. *
  18. * Libav is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with Libav; if not, write to the Free Software
  25. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  26. */
  27. /**
  28. * @file
  29. * Sorenson Vector Quantizer #1 (SVQ1) video codec.
  30. * For more information of the SVQ1 algorithm, visit:
  31. * http://www.pcisys.net/~melanson/codecs/
  32. */
  33. #include "avcodec.h"
  34. #include "dsputil.h"
  35. #include "mpegvideo.h"
  36. #include "mathops.h"
  37. #include "svq1.h"
  38. #undef NDEBUG
  39. #include <assert.h>
  40. extern const uint8_t ff_mvtab[33][2];
  41. static VLC svq1_block_type;
  42. static VLC svq1_motion_component;
  43. static VLC svq1_intra_multistage[6];
  44. static VLC svq1_inter_multistage[6];
  45. static VLC svq1_intra_mean;
  46. static VLC svq1_inter_mean;
  47. /* motion vector (prediction) */
  48. typedef struct svq1_pmv_s {
  49. int x;
  50. int y;
  51. } svq1_pmv;
  52. static const uint8_t string_table[256] = {
  53. 0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54,
  54. 0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
  55. 0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06,
  56. 0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
  57. 0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0,
  58. 0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
  59. 0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2,
  60. 0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
  61. 0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9,
  62. 0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
  63. 0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B,
  64. 0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
  65. 0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D,
  66. 0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
  67. 0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F,
  68. 0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
  69. 0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB,
  70. 0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
  71. 0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9,
  72. 0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
  73. 0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F,
  74. 0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
  75. 0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D,
  76. 0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
  77. 0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26,
  78. 0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
  79. 0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74,
  80. 0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
  81. 0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82,
  82. 0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
  83. 0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0,
  84. 0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
  85. };
  86. #define SVQ1_PROCESS_VECTOR() \
  87. for (; level > 0; i++) { \
  88. /* process next depth */ \
  89. if (i == m) { \
  90. m = n; \
  91. if (--level == 0) \
  92. break; \
  93. } \
  94. /* divide block if next bit set */ \
  95. if (get_bits1(bitbuf) == 0) \
  96. break; \
  97. /* add child nodes */ \
  98. list[n++] = list[i]; \
  99. list[n++] = list[i] + \
  100. (((level & 1) ? pitch : 1) << (level / 2 + 1)); \
  101. }
  102. #define SVQ1_ADD_CODEBOOK() \
  103. /* add codebook entries to vector */ \
  104. for (j = 0; j < stages; j++) { \
  105. n3 = codebook[entries[j]] ^ 0x80808080; \
  106. n1 += (n3 & 0xFF00FF00) >> 8; \
  107. n2 += n3 & 0x00FF00FF; \
  108. } \
  109. \
  110. /* clip to [0..255] */ \
  111. if (n1 & 0xFF00FF00) { \
  112. n3 = (n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  113. n1 += 0x7F007F00; \
  114. n1 |= (~n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  115. n1 &= n3 & 0x00FF00FF; \
  116. } \
  117. \
  118. if (n2 & 0xFF00FF00) { \
  119. n3 = (n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  120. n2 += 0x7F007F00; \
  121. n2 |= (~n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  122. n2 &= n3 & 0x00FF00FF; \
  123. }
  124. #define SVQ1_DO_CODEBOOK_INTRA() \
  125. for (y = 0; y < height; y++) { \
  126. for (x = 0; x < width / 4; x++, codebook++) { \
  127. n1 = n4; \
  128. n2 = n4; \
  129. SVQ1_ADD_CODEBOOK() \
  130. /* store result */ \
  131. dst[x] = n1 << 8 | n2; \
  132. } \
  133. dst += pitch / 4; \
  134. }
  135. #define SVQ1_DO_CODEBOOK_NONINTRA() \
  136. for (y = 0; y < height; y++) { \
  137. for (x = 0; x < width / 4; x++, codebook++) { \
  138. n3 = dst[x]; \
  139. /* add mean value to vector */ \
  140. n1 = n4 + ((n3 & 0xFF00FF00) >> 8); \
  141. n2 = n4 + (n3 & 0x00FF00FF); \
  142. SVQ1_ADD_CODEBOOK() \
  143. /* store result */ \
  144. dst[x] = n1 << 8 | n2; \
  145. } \
  146. dst += pitch / 4; \
  147. }
  148. #define SVQ1_CALC_CODEBOOK_ENTRIES(cbook) \
  149. codebook = (const uint32_t *)cbook[level]; \
  150. if (stages > 0) \
  151. bit_cache = get_bits(bitbuf, 4 * stages); \
  152. /* calculate codebook entries for this vector */ \
  153. for (j = 0; j < stages; j++) { \
  154. entries[j] = (((bit_cache >> (4 * (stages - j - 1))) & 0xF) + \
  155. 16 * j) << (level + 1); \
  156. } \
  157. mean -= stages * 128; \
  158. n4 = mean + (mean >> 31) << 16 | (mean & 0xFFFF);
  159. static int svq1_decode_block_intra(GetBitContext *bitbuf, uint8_t *pixels,
  160. int pitch)
  161. {
  162. uint32_t bit_cache;
  163. uint8_t *list[63];
  164. uint32_t *dst;
  165. const uint32_t *codebook;
  166. int entries[6];
  167. int i, j, m, n;
  168. int mean, stages;
  169. unsigned x, y, width, height, level;
  170. uint32_t n1, n2, n3, n4;
  171. /* initialize list for breadth first processing of vectors */
  172. list[0] = pixels;
  173. /* recursively process vector */
  174. for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
  175. SVQ1_PROCESS_VECTOR();
  176. /* destination address and vector size */
  177. dst = (uint32_t *)list[i];
  178. width = 1 << ((4 + level) / 2);
  179. height = 1 << ((3 + level) / 2);
  180. /* get number of stages (-1 skips vector, 0 for mean only) */
  181. stages = get_vlc2(bitbuf, svq1_intra_multistage[level].table, 3, 3) - 1;
  182. if (stages == -1) {
  183. for (y = 0; y < height; y++)
  184. memset(&dst[y * (pitch / 4)], 0, width);
  185. continue; /* skip vector */
  186. }
  187. if (stages > 0 && level >= 4) {
  188. av_dlog(NULL,
  189. "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n",
  190. stages, level);
  191. return -1; /* invalid vector */
  192. }
  193. mean = get_vlc2(bitbuf, svq1_intra_mean.table, 8, 3);
  194. if (stages == 0) {
  195. for (y = 0; y < height; y++)
  196. memset(&dst[y * (pitch / 4)], mean, width);
  197. } else {
  198. SVQ1_CALC_CODEBOOK_ENTRIES(ff_svq1_intra_codebooks);
  199. SVQ1_DO_CODEBOOK_INTRA()
  200. }
  201. }
  202. return 0;
  203. }
  204. static int svq1_decode_block_non_intra(GetBitContext *bitbuf, uint8_t *pixels,
  205. int pitch)
  206. {
  207. uint32_t bit_cache;
  208. uint8_t *list[63];
  209. uint32_t *dst;
  210. const uint32_t *codebook;
  211. int entries[6];
  212. int i, j, m, n;
  213. int mean, stages;
  214. int x, y, width, height, level;
  215. uint32_t n1, n2, n3, n4;
  216. /* initialize list for breadth first processing of vectors */
  217. list[0] = pixels;
  218. /* recursively process vector */
  219. for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
  220. SVQ1_PROCESS_VECTOR();
  221. /* destination address and vector size */
  222. dst = (uint32_t *)list[i];
  223. width = 1 << ((4 + level) / 2);
  224. height = 1 << ((3 + level) / 2);
  225. /* get number of stages (-1 skips vector, 0 for mean only) */
  226. stages = get_vlc2(bitbuf, svq1_inter_multistage[level].table, 3, 2) - 1;
  227. if (stages == -1)
  228. continue; /* skip vector */
  229. if ((stages > 0) && (level >= 4)) {
  230. av_dlog(NULL,
  231. "Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n",
  232. stages, level);
  233. return -1; /* invalid vector */
  234. }
  235. mean = get_vlc2(bitbuf, svq1_inter_mean.table, 9, 3) - 256;
  236. SVQ1_CALC_CODEBOOK_ENTRIES(ff_svq1_inter_codebooks);
  237. SVQ1_DO_CODEBOOK_NONINTRA()
  238. }
  239. return 0;
  240. }
  241. static int svq1_decode_motion_vector(GetBitContext *bitbuf, svq1_pmv *mv,
  242. svq1_pmv **pmv)
  243. {
  244. int diff;
  245. int i;
  246. for (i = 0; i < 2; i++) {
  247. /* get motion code */
  248. diff = get_vlc2(bitbuf, svq1_motion_component.table, 7, 2);
  249. if (diff < 0)
  250. return -1;
  251. else if (diff) {
  252. if (get_bits1(bitbuf))
  253. diff = -diff;
  254. }
  255. /* add median of motion vector predictors and clip result */
  256. if (i == 1)
  257. mv->y = sign_extend(diff + mid_pred(pmv[0]->y, pmv[1]->y, pmv[2]->y), 6);
  258. else
  259. mv->x = sign_extend(diff + mid_pred(pmv[0]->x, pmv[1]->x, pmv[2]->x), 6);
  260. }
  261. return 0;
  262. }
  263. static void svq1_skip_block(uint8_t *current, uint8_t *previous,
  264. int pitch, int x, int y)
  265. {
  266. uint8_t *src;
  267. uint8_t *dst;
  268. int i;
  269. src = &previous[x + y * pitch];
  270. dst = current;
  271. for (i = 0; i < 16; i++) {
  272. memcpy(dst, src, 16);
  273. src += pitch;
  274. dst += pitch;
  275. }
  276. }
  277. static int svq1_motion_inter_block(MpegEncContext *s, GetBitContext *bitbuf,
  278. uint8_t *current, uint8_t *previous,
  279. int pitch, svq1_pmv *motion, int x, int y)
  280. {
  281. uint8_t *src;
  282. uint8_t *dst;
  283. svq1_pmv mv;
  284. svq1_pmv *pmv[3];
  285. int result;
  286. /* predict and decode motion vector */
  287. pmv[0] = &motion[0];
  288. if (y == 0) {
  289. pmv[1] =
  290. pmv[2] = pmv[0];
  291. } else {
  292. pmv[1] = &motion[x / 8 + 2];
  293. pmv[2] = &motion[x / 8 + 4];
  294. }
  295. result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
  296. if (result != 0)
  297. return result;
  298. motion[0].x =
  299. motion[x / 8 + 2].x =
  300. motion[x / 8 + 3].x = mv.x;
  301. motion[0].y =
  302. motion[x / 8 + 2].y =
  303. motion[x / 8 + 3].y = mv.y;
  304. if (y + (mv.y >> 1) < 0)
  305. mv.y = 0;
  306. if (x + (mv.x >> 1) < 0)
  307. mv.x = 0;
  308. src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1)) * pitch];
  309. dst = current;
  310. s->dsp.put_pixels_tab[0][(mv.y & 1) << 1 | (mv.x & 1)](dst, src, pitch, 16);
  311. return 0;
  312. }
  313. static int svq1_motion_inter_4v_block(MpegEncContext *s, GetBitContext *bitbuf,
  314. uint8_t *current, uint8_t *previous,
  315. int pitch, svq1_pmv *motion, int x, int y)
  316. {
  317. uint8_t *src;
  318. uint8_t *dst;
  319. svq1_pmv mv;
  320. svq1_pmv *pmv[4];
  321. int i, result;
  322. /* predict and decode motion vector (0) */
  323. pmv[0] = &motion[0];
  324. if (y == 0) {
  325. pmv[1] =
  326. pmv[2] = pmv[0];
  327. } else {
  328. pmv[1] = &motion[(x / 8) + 2];
  329. pmv[2] = &motion[(x / 8) + 4];
  330. }
  331. result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
  332. if (result != 0)
  333. return result;
  334. /* predict and decode motion vector (1) */
  335. pmv[0] = &mv;
  336. if (y == 0) {
  337. pmv[1] =
  338. pmv[2] = pmv[0];
  339. } else {
  340. pmv[1] = &motion[(x / 8) + 3];
  341. }
  342. result = svq1_decode_motion_vector(bitbuf, &motion[0], pmv);
  343. if (result != 0)
  344. return result;
  345. /* predict and decode motion vector (2) */
  346. pmv[1] = &motion[0];
  347. pmv[2] = &motion[(x / 8) + 1];
  348. result = svq1_decode_motion_vector(bitbuf, &motion[(x / 8) + 2], pmv);
  349. if (result != 0)
  350. return result;
  351. /* predict and decode motion vector (3) */
  352. pmv[2] = &motion[(x / 8) + 2];
  353. pmv[3] = &motion[(x / 8) + 3];
  354. result = svq1_decode_motion_vector(bitbuf, pmv[3], pmv);
  355. if (result != 0)
  356. return result;
  357. /* form predictions */
  358. for (i = 0; i < 4; i++) {
  359. int mvx = pmv[i]->x + (i & 1) * 16;
  360. int mvy = pmv[i]->y + (i >> 1) * 16;
  361. // FIXME: clipping or padding?
  362. if (y + (mvy >> 1) < 0)
  363. mvy = 0;
  364. if (x + (mvx >> 1) < 0)
  365. mvx = 0;
  366. src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1)) * pitch];
  367. dst = current;
  368. s->dsp.put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst, src, pitch, 8);
  369. /* select next block */
  370. if (i & 1)
  371. current += 8 * (pitch - 1);
  372. else
  373. current += 8;
  374. }
  375. return 0;
  376. }
  377. static int svq1_decode_delta_block(MpegEncContext *s, GetBitContext *bitbuf,
  378. uint8_t *current, uint8_t *previous,
  379. int pitch, svq1_pmv *motion, int x, int y)
  380. {
  381. uint32_t block_type;
  382. int result = 0;
  383. /* get block type */
  384. block_type = get_vlc2(bitbuf, svq1_block_type.table, 2, 2);
  385. /* reset motion vectors */
  386. if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) {
  387. motion[0].x =
  388. motion[0].y =
  389. motion[x / 8 + 2].x =
  390. motion[x / 8 + 2].y =
  391. motion[x / 8 + 3].x =
  392. motion[x / 8 + 3].y = 0;
  393. }
  394. switch (block_type) {
  395. case SVQ1_BLOCK_SKIP:
  396. svq1_skip_block(current, previous, pitch, x, y);
  397. break;
  398. case SVQ1_BLOCK_INTER:
  399. result = svq1_motion_inter_block(s, bitbuf, current, previous,
  400. pitch, motion, x, y);
  401. if (result != 0) {
  402. av_dlog(s->avctx, "Error in svq1_motion_inter_block %i\n", result);
  403. break;
  404. }
  405. result = svq1_decode_block_non_intra(bitbuf, current, pitch);
  406. break;
  407. case SVQ1_BLOCK_INTER_4V:
  408. result = svq1_motion_inter_4v_block(s, bitbuf, current, previous,
  409. pitch, motion, x, y);
  410. if (result != 0) {
  411. av_dlog(s->avctx,
  412. "Error in svq1_motion_inter_4v_block %i\n", result);
  413. break;
  414. }
  415. result = svq1_decode_block_non_intra(bitbuf, current, pitch);
  416. break;
  417. case SVQ1_BLOCK_INTRA:
  418. result = svq1_decode_block_intra(bitbuf, current, pitch);
  419. break;
  420. }
  421. return result;
  422. }
  423. static void svq1_parse_string(GetBitContext *bitbuf, uint8_t *out)
  424. {
  425. uint8_t seed;
  426. int i;
  427. out[0] = get_bits(bitbuf, 8);
  428. seed = string_table[out[0]];
  429. for (i = 1; i <= out[0]; i++) {
  430. out[i] = get_bits(bitbuf, 8) ^ seed;
  431. seed = string_table[out[i] ^ seed];
  432. }
  433. }
  434. static int svq1_decode_frame_header(GetBitContext *bitbuf, MpegEncContext *s)
  435. {
  436. int frame_size_code;
  437. skip_bits(bitbuf, 8); /* temporal_reference */
  438. /* frame type */
  439. s->pict_type = get_bits(bitbuf, 2) + 1;
  440. if (s->pict_type == 4)
  441. return -1;
  442. if (s->pict_type == AV_PICTURE_TYPE_I) {
  443. /* unknown fields */
  444. if (s->f_code == 0x50 || s->f_code == 0x60) {
  445. int csum = get_bits(bitbuf, 16);
  446. csum = ff_svq1_packet_checksum(bitbuf->buffer,
  447. bitbuf->size_in_bits >> 3,
  448. csum);
  449. av_dlog(s->avctx, "%s checksum (%02x) for packet data\n",
  450. (csum == 0) ? "correct" : "incorrect", csum);
  451. }
  452. if ((s->f_code ^ 0x10) >= 0x50) {
  453. uint8_t msg[256];
  454. svq1_parse_string(bitbuf, msg);
  455. av_log(s->avctx, AV_LOG_INFO,
  456. "embedded message: \"%s\"\n", (char *)msg);
  457. }
  458. skip_bits(bitbuf, 2);
  459. skip_bits(bitbuf, 2);
  460. skip_bits1(bitbuf);
  461. /* load frame size */
  462. frame_size_code = get_bits(bitbuf, 3);
  463. if (frame_size_code == 7) {
  464. /* load width, height (12 bits each) */
  465. s->width = get_bits(bitbuf, 12);
  466. s->height = get_bits(bitbuf, 12);
  467. if (!s->width || !s->height)
  468. return -1;
  469. } else {
  470. /* get width, height from table */
  471. s->width = ff_svq1_frame_size_table[frame_size_code].width;
  472. s->height = ff_svq1_frame_size_table[frame_size_code].height;
  473. }
  474. }
  475. /* unknown fields */
  476. if (get_bits1(bitbuf) == 1) {
  477. skip_bits1(bitbuf); /* use packet checksum if (1) */
  478. skip_bits1(bitbuf); /* component checksums after image data if (1) */
  479. if (get_bits(bitbuf, 2) != 0)
  480. return -1;
  481. }
  482. if (get_bits1(bitbuf) == 1) {
  483. skip_bits1(bitbuf);
  484. skip_bits(bitbuf, 4);
  485. skip_bits1(bitbuf);
  486. skip_bits(bitbuf, 2);
  487. while (get_bits1(bitbuf) == 1)
  488. skip_bits(bitbuf, 8);
  489. }
  490. return 0;
  491. }
  492. static int svq1_decode_frame(AVCodecContext *avctx, void *data,
  493. int *data_size, AVPacket *avpkt)
  494. {
  495. const uint8_t *buf = avpkt->data;
  496. int buf_size = avpkt->size;
  497. MpegEncContext *s = avctx->priv_data;
  498. uint8_t *current, *previous;
  499. int result, i, x, y, width, height;
  500. AVFrame *pict = data;
  501. svq1_pmv *pmv;
  502. /* initialize bit buffer */
  503. init_get_bits(&s->gb, buf, buf_size * 8);
  504. /* decode frame header */
  505. s->f_code = get_bits(&s->gb, 22);
  506. if ((s->f_code & ~0x70) || !(s->f_code & 0x60))
  507. return -1;
  508. /* swap some header bytes (why?) */
  509. if (s->f_code != 0x20) {
  510. uint32_t *src = (uint32_t *)(buf + 4);
  511. for (i = 0; i < 4; i++)
  512. src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
  513. }
  514. result = svq1_decode_frame_header(&s->gb, s);
  515. if (result != 0) {
  516. av_dlog(s->avctx, "Error in svq1_decode_frame_header %i\n", result);
  517. return result;
  518. }
  519. avcodec_set_dimensions(avctx, s->width, s->height);
  520. /* FIXME: This avoids some confusion for "B frames" without 2 references.
  521. * This should be removed after libavcodec can handle more flexible
  522. * picture types & ordering */
  523. if (s->pict_type == AV_PICTURE_TYPE_B && s->last_picture_ptr == NULL)
  524. return buf_size;
  525. if ((avctx->skip_frame >= AVDISCARD_NONREF &&
  526. s->pict_type == AV_PICTURE_TYPE_B) ||
  527. (avctx->skip_frame >= AVDISCARD_NONKEY &&
  528. s->pict_type != AV_PICTURE_TYPE_I) ||
  529. avctx->skip_frame >= AVDISCARD_ALL)
  530. return buf_size;
  531. if (ff_MPV_frame_start(s, avctx) < 0)
  532. return -1;
  533. pmv = av_malloc((FFALIGN(s->width, 16) / 8 + 3) * sizeof(*pmv));
  534. if (!pmv)
  535. return -1;
  536. /* decode y, u and v components */
  537. for (i = 0; i < 3; i++) {
  538. int linesize;
  539. if (i == 0) {
  540. width = FFALIGN(s->width, 16);
  541. height = FFALIGN(s->height, 16);
  542. linesize = s->linesize;
  543. } else {
  544. if (s->flags & CODEC_FLAG_GRAY)
  545. break;
  546. width = FFALIGN(s->width / 4, 16);
  547. height = FFALIGN(s->height / 4, 16);
  548. linesize = s->uvlinesize;
  549. }
  550. current = s->current_picture.f.data[i];
  551. if (s->pict_type == AV_PICTURE_TYPE_B)
  552. previous = s->next_picture.f.data[i];
  553. else
  554. previous = s->last_picture.f.data[i];
  555. if (s->pict_type == AV_PICTURE_TYPE_I) {
  556. /* keyframe */
  557. for (y = 0; y < height; y += 16) {
  558. for (x = 0; x < width; x += 16) {
  559. result = svq1_decode_block_intra(&s->gb, &current[x],
  560. linesize);
  561. if (result != 0) {
  562. av_log(s->avctx, AV_LOG_INFO,
  563. "Error in svq1_decode_block %i (keyframe)\n",
  564. result);
  565. goto err;
  566. }
  567. }
  568. current += 16 * linesize;
  569. }
  570. } else {
  571. /* delta frame */
  572. memset(pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv));
  573. for (y = 0; y < height; y += 16) {
  574. for (x = 0; x < width; x += 16) {
  575. result = svq1_decode_delta_block(s, &s->gb, &current[x],
  576. previous, linesize,
  577. pmv, x, y);
  578. if (result != 0) {
  579. av_dlog(s->avctx,
  580. "Error in svq1_decode_delta_block %i\n",
  581. result);
  582. goto err;
  583. }
  584. }
  585. pmv[0].x =
  586. pmv[0].y = 0;
  587. current += 16 * linesize;
  588. }
  589. }
  590. }
  591. *pict = s->current_picture.f;
  592. ff_MPV_frame_end(s);
  593. *data_size = sizeof(AVFrame);
  594. result = buf_size;
  595. err:
  596. av_free(pmv);
  597. return result;
  598. }
  599. static av_cold int svq1_decode_init(AVCodecContext *avctx)
  600. {
  601. MpegEncContext *s = avctx->priv_data;
  602. int i;
  603. int offset = 0;
  604. ff_MPV_decode_defaults(s);
  605. s->avctx = avctx;
  606. s->width = avctx->width + 3 & ~3;
  607. s->height = avctx->height + 3 & ~3;
  608. s->codec_id = avctx->codec->id;
  609. avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  610. /* Not true, but DP frames and these behave like unidirectional B-frames. */
  611. avctx->has_b_frames = 1;
  612. s->flags = avctx->flags;
  613. if (ff_MPV_common_init(s) < 0)
  614. return -1;
  615. INIT_VLC_STATIC(&svq1_block_type, 2, 4,
  616. &ff_svq1_block_type_vlc[0][1], 2, 1,
  617. &ff_svq1_block_type_vlc[0][0], 2, 1, 6);
  618. INIT_VLC_STATIC(&svq1_motion_component, 7, 33,
  619. &ff_mvtab[0][1], 2, 1,
  620. &ff_mvtab[0][0], 2, 1, 176);
  621. for (i = 0; i < 6; i++) {
  622. static const uint8_t sizes[2][6] = { { 14, 10, 14, 18, 16, 18 },
  623. { 10, 10, 14, 14, 14, 16 } };
  624. static VLC_TYPE table[168][2];
  625. svq1_intra_multistage[i].table = &table[offset];
  626. svq1_intra_multistage[i].table_allocated = sizes[0][i];
  627. offset += sizes[0][i];
  628. init_vlc(&svq1_intra_multistage[i], 3, 8,
  629. &ff_svq1_intra_multistage_vlc[i][0][1], 2, 1,
  630. &ff_svq1_intra_multistage_vlc[i][0][0], 2, 1,
  631. INIT_VLC_USE_NEW_STATIC);
  632. svq1_inter_multistage[i].table = &table[offset];
  633. svq1_inter_multistage[i].table_allocated = sizes[1][i];
  634. offset += sizes[1][i];
  635. init_vlc(&svq1_inter_multistage[i], 3, 8,
  636. &ff_svq1_inter_multistage_vlc[i][0][1], 2, 1,
  637. &ff_svq1_inter_multistage_vlc[i][0][0], 2, 1,
  638. INIT_VLC_USE_NEW_STATIC);
  639. }
  640. INIT_VLC_STATIC(&svq1_intra_mean, 8, 256,
  641. &ff_svq1_intra_mean_vlc[0][1], 4, 2,
  642. &ff_svq1_intra_mean_vlc[0][0], 4, 2, 632);
  643. INIT_VLC_STATIC(&svq1_inter_mean, 9, 512,
  644. &ff_svq1_inter_mean_vlc[0][1], 4, 2,
  645. &ff_svq1_inter_mean_vlc[0][0], 4, 2, 1434);
  646. return 0;
  647. }
  648. static av_cold int svq1_decode_end(AVCodecContext *avctx)
  649. {
  650. MpegEncContext *s = avctx->priv_data;
  651. ff_MPV_common_end(s);
  652. return 0;
  653. }
  654. AVCodec ff_svq1_decoder = {
  655. .name = "svq1",
  656. .type = AVMEDIA_TYPE_VIDEO,
  657. .id = AV_CODEC_ID_SVQ1,
  658. .priv_data_size = sizeof(MpegEncContext),
  659. .init = svq1_decode_init,
  660. .close = svq1_decode_end,
  661. .decode = svq1_decode_frame,
  662. .capabilities = CODEC_CAP_DR1,
  663. .flush = ff_mpeg_flush,
  664. .pix_fmts = (const enum PixelFormat[]) { AV_PIX_FMT_YUV410P,
  665. AV_PIX_FMT_NONE },
  666. .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
  667. };