You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

849 lines
27KB

  1. /*
  2. * SVQ1 decoder
  3. * ported to MPlayer by Arpi <arpi@thot.banki.hu>
  4. * ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
  5. *
  6. * Copyright (c) 2002 The Xine Project
  7. * Copyright (c) 2002 The FFmpeg Project
  8. *
  9. * SVQ1 Encoder (c) 2004 Mike Melanson <melanson@pcisys.net>
  10. *
  11. * This file is part of FFmpeg.
  12. *
  13. * FFmpeg is free software; you can redistribute it and/or
  14. * modify it under the terms of the GNU Lesser General Public
  15. * License as published by the Free Software Foundation; either
  16. * version 2.1 of the License, or (at your option) any later version.
  17. *
  18. * FFmpeg is distributed in the hope that it will be useful,
  19. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  20. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  21. * Lesser General Public License for more details.
  22. *
  23. * You should have received a copy of the GNU Lesser General Public
  24. * License along with FFmpeg; if not, write to the Free Software
  25. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  26. */
  27. /**
  28. * @file
  29. * Sorenson Vector Quantizer #1 (SVQ1) video codec.
  30. * For more information of the SVQ1 algorithm, visit:
  31. * http://www.pcisys.net/~melanson/codecs/
  32. */
  33. #include "avcodec.h"
  34. #include "get_bits.h"
  35. #include "h263.h"
  36. #include "hpeldsp.h"
  37. #include "internal.h"
  38. #include "mathops.h"
  39. #include "svq1.h"
  40. #undef NDEBUG
  41. #include <assert.h>
  42. static VLC svq1_block_type;
  43. static VLC svq1_motion_component;
  44. static VLC svq1_intra_multistage[6];
  45. static VLC svq1_inter_multistage[6];
  46. static VLC svq1_intra_mean;
  47. static VLC svq1_inter_mean;
  48. /* motion vector (prediction) */
  49. typedef struct svq1_pmv_s {
  50. int x;
  51. int y;
  52. } svq1_pmv;
  53. typedef struct SVQ1Context {
  54. HpelDSPContext hdsp;
  55. GetBitContext gb;
  56. AVFrame *prev;
  57. uint8_t *pkt_swapped;
  58. int pkt_swapped_allocated;
  59. int width;
  60. int height;
  61. int frame_code;
  62. int nonref; // 1 if the current frame won't be referenced
  63. } SVQ1Context;
  64. static const uint8_t string_table[256] = {
  65. 0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54,
  66. 0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
  67. 0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06,
  68. 0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
  69. 0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0,
  70. 0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
  71. 0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2,
  72. 0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
  73. 0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9,
  74. 0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
  75. 0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B,
  76. 0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
  77. 0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D,
  78. 0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
  79. 0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F,
  80. 0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
  81. 0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB,
  82. 0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
  83. 0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9,
  84. 0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
  85. 0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F,
  86. 0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
  87. 0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D,
  88. 0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
  89. 0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26,
  90. 0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
  91. 0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74,
  92. 0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
  93. 0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82,
  94. 0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
  95. 0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0,
  96. 0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
  97. };
  98. #define SVQ1_PROCESS_VECTOR() \
  99. for (; level > 0; i++) { \
  100. /* process next depth */ \
  101. if (i == m) { \
  102. m = n; \
  103. if (--level == 0) \
  104. break; \
  105. } \
  106. /* divide block if next bit set */ \
  107. if (!get_bits1(bitbuf)) \
  108. break; \
  109. /* add child nodes */ \
  110. list[n++] = list[i]; \
  111. list[n++] = list[i] + (((level & 1) ? pitch : 1) << ((level >> 1) + 1));\
  112. }
  113. #define SVQ1_ADD_CODEBOOK() \
  114. /* add codebook entries to vector */ \
  115. for (j = 0; j < stages; j++) { \
  116. n3 = codebook[entries[j]] ^ 0x80808080; \
  117. n1 += (n3 & 0xFF00FF00) >> 8; \
  118. n2 += n3 & 0x00FF00FF; \
  119. } \
  120. \
  121. /* clip to [0..255] */ \
  122. if (n1 & 0xFF00FF00) { \
  123. n3 = (n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  124. n1 += 0x7F007F00; \
  125. n1 |= (~n1 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  126. n1 &= n3 & 0x00FF00FF; \
  127. } \
  128. \
  129. if (n2 & 0xFF00FF00) { \
  130. n3 = (n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  131. n2 += 0x7F007F00; \
  132. n2 |= (~n2 >> 15 & 0x00010001 | 0x01000100) - 0x00010001; \
  133. n2 &= n3 & 0x00FF00FF; \
  134. }
  135. #define SVQ1_CALC_CODEBOOK_ENTRIES(cbook) \
  136. codebook = (const uint32_t *)cbook[level]; \
  137. if (stages > 0) \
  138. bit_cache = get_bits(bitbuf, 4 * stages); \
  139. /* calculate codebook entries for this vector */ \
  140. for (j = 0; j < stages; j++) { \
  141. entries[j] = (((bit_cache >> (4 * (stages - j - 1))) & 0xF) + \
  142. 16 * j) << (level + 1); \
  143. } \
  144. mean -= stages * 128; \
  145. n4 = (mean << 16) + mean;
  146. static int svq1_decode_block_intra(GetBitContext *bitbuf, uint8_t *pixels,
  147. int pitch)
  148. {
  149. uint32_t bit_cache;
  150. uint8_t *list[63];
  151. uint32_t *dst;
  152. const uint32_t *codebook;
  153. int entries[6];
  154. int i, j, m, n;
  155. int mean, stages;
  156. unsigned x, y, width, height, level;
  157. uint32_t n1, n2, n3, n4;
  158. /* initialize list for breadth first processing of vectors */
  159. list[0] = pixels;
  160. /* recursively process vector */
  161. for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
  162. SVQ1_PROCESS_VECTOR();
  163. /* destination address and vector size */
  164. dst = (uint32_t *)list[i];
  165. width = 1 << ((4 + level) / 2);
  166. height = 1 << ((3 + level) / 2);
  167. /* get number of stages (-1 skips vector, 0 for mean only) */
  168. stages = get_vlc2(bitbuf, svq1_intra_multistage[level].table, 3, 3) - 1;
  169. if (stages == -1) {
  170. for (y = 0; y < height; y++)
  171. memset(&dst[y * (pitch / 4)], 0, width);
  172. continue; /* skip vector */
  173. }
  174. if (stages > 0 && level >= 4) {
  175. av_dlog(NULL,
  176. "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n",
  177. stages, level);
  178. return AVERROR_INVALIDDATA; /* invalid vector */
  179. }
  180. mean = get_vlc2(bitbuf, svq1_intra_mean.table, 8, 3);
  181. if (stages == 0) {
  182. for (y = 0; y < height; y++)
  183. memset(&dst[y * (pitch / 4)], mean, width);
  184. } else {
  185. SVQ1_CALC_CODEBOOK_ENTRIES(ff_svq1_intra_codebooks);
  186. for (y = 0; y < height; y++) {
  187. for (x = 0; x < width / 4; x++, codebook++) {
  188. n1 = n4;
  189. n2 = n4;
  190. SVQ1_ADD_CODEBOOK()
  191. /* store result */
  192. dst[x] = n1 << 8 | n2;
  193. }
  194. dst += pitch / 4;
  195. }
  196. }
  197. }
  198. return 0;
  199. }
  200. static int svq1_decode_block_non_intra(GetBitContext *bitbuf, uint8_t *pixels,
  201. int pitch)
  202. {
  203. uint32_t bit_cache;
  204. uint8_t *list[63];
  205. uint32_t *dst;
  206. const uint32_t *codebook;
  207. int entries[6];
  208. int i, j, m, n;
  209. int mean, stages;
  210. int x, y, width, height, level;
  211. uint32_t n1, n2, n3, n4;
  212. /* initialize list for breadth first processing of vectors */
  213. list[0] = pixels;
  214. /* recursively process vector */
  215. for (i = 0, m = 1, n = 1, level = 5; i < n; i++) {
  216. SVQ1_PROCESS_VECTOR();
  217. /* destination address and vector size */
  218. dst = (uint32_t *)list[i];
  219. width = 1 << ((4 + level) / 2);
  220. height = 1 << ((3 + level) / 2);
  221. /* get number of stages (-1 skips vector, 0 for mean only) */
  222. stages = get_vlc2(bitbuf, svq1_inter_multistage[level].table, 3, 2) - 1;
  223. if (stages == -1)
  224. continue; /* skip vector */
  225. if ((stages > 0) && (level >= 4)) {
  226. av_dlog(NULL,
  227. "Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n",
  228. stages, level);
  229. return AVERROR_INVALIDDATA; /* invalid vector */
  230. }
  231. mean = get_vlc2(bitbuf, svq1_inter_mean.table, 9, 3) - 256;
  232. SVQ1_CALC_CODEBOOK_ENTRIES(ff_svq1_inter_codebooks);
  233. for (y = 0; y < height; y++) {
  234. for (x = 0; x < width / 4; x++, codebook++) {
  235. n3 = dst[x];
  236. /* add mean value to vector */
  237. n1 = n4 + ((n3 & 0xFF00FF00) >> 8);
  238. n2 = n4 + (n3 & 0x00FF00FF);
  239. SVQ1_ADD_CODEBOOK()
  240. /* store result */
  241. dst[x] = n1 << 8 | n2;
  242. }
  243. dst += pitch / 4;
  244. }
  245. }
  246. return 0;
  247. }
  248. static int svq1_decode_motion_vector(GetBitContext *bitbuf, svq1_pmv *mv,
  249. svq1_pmv **pmv)
  250. {
  251. int diff;
  252. int i;
  253. for (i = 0; i < 2; i++) {
  254. /* get motion code */
  255. diff = get_vlc2(bitbuf, svq1_motion_component.table, 7, 2);
  256. if (diff < 0)
  257. return AVERROR_INVALIDDATA;
  258. else if (diff) {
  259. if (get_bits1(bitbuf))
  260. diff = -diff;
  261. }
  262. /* add median of motion vector predictors and clip result */
  263. if (i == 1)
  264. mv->y = sign_extend(diff + mid_pred(pmv[0]->y, pmv[1]->y, pmv[2]->y), 6);
  265. else
  266. mv->x = sign_extend(diff + mid_pred(pmv[0]->x, pmv[1]->x, pmv[2]->x), 6);
  267. }
  268. return 0;
  269. }
  270. static void svq1_skip_block(uint8_t *current, uint8_t *previous,
  271. int pitch, int x, int y)
  272. {
  273. uint8_t *src;
  274. uint8_t *dst;
  275. int i;
  276. src = &previous[x + y * pitch];
  277. dst = current;
  278. for (i = 0; i < 16; i++) {
  279. memcpy(dst, src, 16);
  280. src += pitch;
  281. dst += pitch;
  282. }
  283. }
  284. static int svq1_motion_inter_block(HpelDSPContext *hdsp, GetBitContext *bitbuf,
  285. uint8_t *current, uint8_t *previous,
  286. int pitch, svq1_pmv *motion, int x, int y,
  287. int width, int height)
  288. {
  289. uint8_t *src;
  290. uint8_t *dst;
  291. svq1_pmv mv;
  292. svq1_pmv *pmv[3];
  293. int result;
  294. /* predict and decode motion vector */
  295. pmv[0] = &motion[0];
  296. if (y == 0) {
  297. pmv[1] =
  298. pmv[2] = pmv[0];
  299. } else {
  300. pmv[1] = &motion[x / 8 + 2];
  301. pmv[2] = &motion[x / 8 + 4];
  302. }
  303. result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
  304. if (result)
  305. return result;
  306. motion[0].x =
  307. motion[x / 8 + 2].x =
  308. motion[x / 8 + 3].x = mv.x;
  309. motion[0].y =
  310. motion[x / 8 + 2].y =
  311. motion[x / 8 + 3].y = mv.y;
  312. mv.x = av_clip(mv.x, -2 * x, 2 * (width - x - 16));
  313. mv.y = av_clip(mv.y, -2 * y, 2 * (height - y - 16));
  314. src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1)) * pitch];
  315. dst = current;
  316. hdsp->put_pixels_tab[0][(mv.y & 1) << 1 | (mv.x & 1)](dst, src, pitch, 16);
  317. return 0;
  318. }
  319. static int svq1_motion_inter_4v_block(HpelDSPContext *hdsp, GetBitContext *bitbuf,
  320. uint8_t *current, uint8_t *previous,
  321. int pitch, svq1_pmv *motion, int x, int y,
  322. int width, int height)
  323. {
  324. uint8_t *src;
  325. uint8_t *dst;
  326. svq1_pmv mv;
  327. svq1_pmv *pmv[4];
  328. int i, result;
  329. /* predict and decode motion vector (0) */
  330. pmv[0] = &motion[0];
  331. if (y == 0) {
  332. pmv[1] =
  333. pmv[2] = pmv[0];
  334. } else {
  335. pmv[1] = &motion[(x / 8) + 2];
  336. pmv[2] = &motion[(x / 8) + 4];
  337. }
  338. result = svq1_decode_motion_vector(bitbuf, &mv, pmv);
  339. if (result)
  340. return result;
  341. /* predict and decode motion vector (1) */
  342. pmv[0] = &mv;
  343. if (y == 0) {
  344. pmv[1] =
  345. pmv[2] = pmv[0];
  346. } else {
  347. pmv[1] = &motion[(x / 8) + 3];
  348. }
  349. result = svq1_decode_motion_vector(bitbuf, &motion[0], pmv);
  350. if (result)
  351. return result;
  352. /* predict and decode motion vector (2) */
  353. pmv[1] = &motion[0];
  354. pmv[2] = &motion[(x / 8) + 1];
  355. result = svq1_decode_motion_vector(bitbuf, &motion[(x / 8) + 2], pmv);
  356. if (result)
  357. return result;
  358. /* predict and decode motion vector (3) */
  359. pmv[2] = &motion[(x / 8) + 2];
  360. pmv[3] = &motion[(x / 8) + 3];
  361. result = svq1_decode_motion_vector(bitbuf, pmv[3], pmv);
  362. if (result)
  363. return result;
  364. /* form predictions */
  365. for (i = 0; i < 4; i++) {
  366. int mvx = pmv[i]->x + (i & 1) * 16;
  367. int mvy = pmv[i]->y + (i >> 1) * 16;
  368. // FIXME: clipping or padding?
  369. mvx = av_clip(mvx, -2 * x, 2 * (width - x - 8));
  370. mvy = av_clip(mvy, -2 * y, 2 * (height - y - 8));
  371. src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1)) * pitch];
  372. dst = current;
  373. hdsp->put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst, src, pitch, 8);
  374. /* select next block */
  375. if (i & 1)
  376. current += 8 * (pitch - 1);
  377. else
  378. current += 8;
  379. }
  380. return 0;
  381. }
  382. static int svq1_decode_delta_block(AVCodecContext *avctx, HpelDSPContext *hdsp,
  383. GetBitContext *bitbuf,
  384. uint8_t *current, uint8_t *previous,
  385. int pitch, svq1_pmv *motion, int x, int y,
  386. int width, int height)
  387. {
  388. uint32_t block_type;
  389. int result = 0;
  390. /* get block type */
  391. block_type = get_vlc2(bitbuf, svq1_block_type.table, 2, 2);
  392. /* reset motion vectors */
  393. if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) {
  394. motion[0].x =
  395. motion[0].y =
  396. motion[x / 8 + 2].x =
  397. motion[x / 8 + 2].y =
  398. motion[x / 8 + 3].x =
  399. motion[x / 8 + 3].y = 0;
  400. }
  401. switch (block_type) {
  402. case SVQ1_BLOCK_SKIP:
  403. svq1_skip_block(current, previous, pitch, x, y);
  404. break;
  405. case SVQ1_BLOCK_INTER:
  406. result = svq1_motion_inter_block(hdsp, bitbuf, current, previous,
  407. pitch, motion, x, y, width, height);
  408. if (result != 0) {
  409. av_dlog(avctx, "Error in svq1_motion_inter_block %i\n", result);
  410. break;
  411. }
  412. result = svq1_decode_block_non_intra(bitbuf, current, pitch);
  413. break;
  414. case SVQ1_BLOCK_INTER_4V:
  415. result = svq1_motion_inter_4v_block(hdsp, bitbuf, current, previous,
  416. pitch, motion, x, y, width, height);
  417. if (result != 0) {
  418. av_dlog(avctx, "Error in svq1_motion_inter_4v_block %i\n", result);
  419. break;
  420. }
  421. result = svq1_decode_block_non_intra(bitbuf, current, pitch);
  422. break;
  423. case SVQ1_BLOCK_INTRA:
  424. result = svq1_decode_block_intra(bitbuf, current, pitch);
  425. break;
  426. }
  427. return result;
  428. }
  429. static void svq1_parse_string(GetBitContext *bitbuf, uint8_t out[257])
  430. {
  431. uint8_t seed;
  432. int i;
  433. out[0] = get_bits(bitbuf, 8);
  434. seed = string_table[out[0]];
  435. for (i = 1; i <= out[0]; i++) {
  436. out[i] = get_bits(bitbuf, 8) ^ seed;
  437. seed = string_table[out[i] ^ seed];
  438. }
  439. out[i] = 0;
  440. }
  441. static int svq1_decode_frame_header(AVCodecContext *avctx, AVFrame *frame)
  442. {
  443. SVQ1Context *s = avctx->priv_data;
  444. GetBitContext *bitbuf = &s->gb;
  445. int frame_size_code;
  446. int width = s->width;
  447. int height = s->height;
  448. skip_bits(bitbuf, 8); /* temporal_reference */
  449. /* frame type */
  450. s->nonref = 0;
  451. switch (get_bits(bitbuf, 2)) {
  452. case 0:
  453. frame->pict_type = AV_PICTURE_TYPE_I;
  454. break;
  455. case 2:
  456. s->nonref = 1;
  457. case 1:
  458. frame->pict_type = AV_PICTURE_TYPE_P;
  459. break;
  460. default:
  461. av_log(avctx, AV_LOG_ERROR, "Invalid frame type.\n");
  462. return AVERROR_INVALIDDATA;
  463. }
  464. if (frame->pict_type == AV_PICTURE_TYPE_I) {
  465. /* unknown fields */
  466. if (s->frame_code == 0x50 || s->frame_code == 0x60) {
  467. int csum = get_bits(bitbuf, 16);
  468. csum = ff_svq1_packet_checksum(bitbuf->buffer,
  469. bitbuf->size_in_bits >> 3,
  470. csum);
  471. av_dlog(avctx, "%s checksum (%02x) for packet data\n",
  472. (csum == 0) ? "correct" : "incorrect", csum);
  473. }
  474. if ((s->frame_code ^ 0x10) >= 0x50) {
  475. uint8_t msg[257];
  476. svq1_parse_string(bitbuf, msg);
  477. av_log(avctx, AV_LOG_INFO,
  478. "embedded message:\n%s\n", ((char *)msg) + 1);
  479. }
  480. skip_bits(bitbuf, 2);
  481. skip_bits(bitbuf, 2);
  482. skip_bits1(bitbuf);
  483. /* load frame size */
  484. frame_size_code = get_bits(bitbuf, 3);
  485. if (frame_size_code == 7) {
  486. /* load width, height (12 bits each) */
  487. width = get_bits(bitbuf, 12);
  488. height = get_bits(bitbuf, 12);
  489. if (!width || !height)
  490. return AVERROR_INVALIDDATA;
  491. } else {
  492. /* get width, height from table */
  493. width = ff_svq1_frame_size_table[frame_size_code][0];
  494. height = ff_svq1_frame_size_table[frame_size_code][1];
  495. }
  496. }
  497. /* unknown fields */
  498. if (get_bits1(bitbuf)) {
  499. skip_bits1(bitbuf); /* use packet checksum if (1) */
  500. skip_bits1(bitbuf); /* component checksums after image data if (1) */
  501. if (get_bits(bitbuf, 2) != 0)
  502. return AVERROR_INVALIDDATA;
  503. }
  504. if (get_bits1(bitbuf)) {
  505. skip_bits1(bitbuf);
  506. skip_bits(bitbuf, 4);
  507. skip_bits1(bitbuf);
  508. skip_bits(bitbuf, 2);
  509. if (skip_1stop_8data_bits(bitbuf) < 0)
  510. return AVERROR_INVALIDDATA;
  511. }
  512. s->width = width;
  513. s->height = height;
  514. return 0;
  515. }
  516. static int svq1_decode_frame(AVCodecContext *avctx, void *data,
  517. int *got_frame, AVPacket *avpkt)
  518. {
  519. const uint8_t *buf = avpkt->data;
  520. int buf_size = avpkt->size;
  521. SVQ1Context *s = avctx->priv_data;
  522. AVFrame *cur = data;
  523. uint8_t *current;
  524. int result, i, x, y, width, height;
  525. svq1_pmv *pmv;
  526. /* initialize bit buffer */
  527. init_get_bits8(&s->gb, buf, buf_size);
  528. /* decode frame header */
  529. s->frame_code = get_bits(&s->gb, 22);
  530. if ((s->frame_code & ~0x70) || !(s->frame_code & 0x60))
  531. return AVERROR_INVALIDDATA;
  532. /* swap some header bytes (why?) */
  533. if (s->frame_code != 0x20) {
  534. uint32_t *src;
  535. if (buf_size < 9 * 4) {
  536. av_log(avctx, AV_LOG_ERROR, "Input packet too small\n");
  537. return AVERROR_INVALIDDATA;
  538. }
  539. av_fast_padded_malloc(&s->pkt_swapped,
  540. &s->pkt_swapped_allocated,
  541. buf_size);
  542. if (!s->pkt_swapped)
  543. return AVERROR(ENOMEM);
  544. memcpy(s->pkt_swapped, buf, buf_size);
  545. buf = s->pkt_swapped;
  546. init_get_bits(&s->gb, buf, buf_size * 8);
  547. skip_bits(&s->gb, 22);
  548. src = (uint32_t *)(s->pkt_swapped + 4);
  549. if (buf_size < 36)
  550. return AVERROR_INVALIDDATA;
  551. for (i = 0; i < 4; i++)
  552. src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
  553. }
  554. result = svq1_decode_frame_header(avctx, cur);
  555. if (result != 0) {
  556. av_dlog(avctx, "Error in svq1_decode_frame_header %i\n", result);
  557. return result;
  558. }
  559. result = ff_set_dimensions(avctx, s->width, s->height);
  560. if (result < 0)
  561. return result;
  562. if ((avctx->skip_frame >= AVDISCARD_NONREF && s->nonref) ||
  563. (avctx->skip_frame >= AVDISCARD_NONKEY &&
  564. cur->pict_type != AV_PICTURE_TYPE_I) ||
  565. avctx->skip_frame >= AVDISCARD_ALL)
  566. return buf_size;
  567. result = ff_get_buffer(avctx, cur, s->nonref ? 0 : AV_GET_BUFFER_FLAG_REF);
  568. if (result < 0)
  569. return result;
  570. pmv = av_malloc((FFALIGN(s->width, 16) / 8 + 3) * sizeof(*pmv));
  571. if (!pmv)
  572. return AVERROR(ENOMEM);
  573. /* decode y, u and v components */
  574. for (i = 0; i < 3; i++) {
  575. int linesize = cur->linesize[i];
  576. if (i == 0) {
  577. width = FFALIGN(s->width, 16);
  578. height = FFALIGN(s->height, 16);
  579. } else {
  580. if (avctx->flags & CODEC_FLAG_GRAY)
  581. break;
  582. width = FFALIGN(s->width / 4, 16);
  583. height = FFALIGN(s->height / 4, 16);
  584. }
  585. current = cur->data[i];
  586. if (cur->pict_type == AV_PICTURE_TYPE_I) {
  587. /* keyframe */
  588. for (y = 0; y < height; y += 16) {
  589. for (x = 0; x < width; x += 16) {
  590. result = svq1_decode_block_intra(&s->gb, &current[x],
  591. linesize);
  592. if (result) {
  593. av_log(avctx, AV_LOG_ERROR,
  594. "Error in svq1_decode_block %i (keyframe)\n",
  595. result);
  596. goto err;
  597. }
  598. }
  599. current += 16 * linesize;
  600. }
  601. } else {
  602. /* delta frame */
  603. uint8_t *previous = s->prev->data[i];
  604. if (!previous ||
  605. s->prev->width != s->width || s->prev->height != s->height) {
  606. av_log(avctx, AV_LOG_ERROR, "Missing reference frame.\n");
  607. result = AVERROR_INVALIDDATA;
  608. goto err;
  609. }
  610. memset(pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv));
  611. for (y = 0; y < height; y += 16) {
  612. for (x = 0; x < width; x += 16) {
  613. result = svq1_decode_delta_block(avctx, &s->hdsp,
  614. &s->gb, &current[x],
  615. previous, linesize,
  616. pmv, x, y, width, height);
  617. if (result != 0) {
  618. av_dlog(avctx,
  619. "Error in svq1_decode_delta_block %i\n",
  620. result);
  621. goto err;
  622. }
  623. }
  624. pmv[0].x =
  625. pmv[0].y = 0;
  626. current += 16 * linesize;
  627. }
  628. }
  629. }
  630. if (!s->nonref) {
  631. av_frame_unref(s->prev);
  632. result = av_frame_ref(s->prev, cur);
  633. if (result < 0)
  634. goto err;
  635. }
  636. *got_frame = 1;
  637. result = buf_size;
  638. err:
  639. av_free(pmv);
  640. return result;
  641. }
  642. static av_cold int svq1_decode_init(AVCodecContext *avctx)
  643. {
  644. SVQ1Context *s = avctx->priv_data;
  645. int i;
  646. int offset = 0;
  647. s->prev = av_frame_alloc();
  648. if (!s->prev)
  649. return AVERROR(ENOMEM);
  650. s->width = avctx->width + 3 & ~3;
  651. s->height = avctx->height + 3 & ~3;
  652. avctx->pix_fmt = AV_PIX_FMT_YUV410P;
  653. ff_hpeldsp_init(&s->hdsp, avctx->flags);
  654. INIT_VLC_STATIC(&svq1_block_type, 2, 4,
  655. &ff_svq1_block_type_vlc[0][1], 2, 1,
  656. &ff_svq1_block_type_vlc[0][0], 2, 1, 6);
  657. INIT_VLC_STATIC(&svq1_motion_component, 7, 33,
  658. &ff_mvtab[0][1], 2, 1,
  659. &ff_mvtab[0][0], 2, 1, 176);
  660. for (i = 0; i < 6; i++) {
  661. static const uint8_t sizes[2][6] = { { 14, 10, 14, 18, 16, 18 },
  662. { 10, 10, 14, 14, 14, 16 } };
  663. static VLC_TYPE table[168][2];
  664. svq1_intra_multistage[i].table = &table[offset];
  665. svq1_intra_multistage[i].table_allocated = sizes[0][i];
  666. offset += sizes[0][i];
  667. init_vlc(&svq1_intra_multistage[i], 3, 8,
  668. &ff_svq1_intra_multistage_vlc[i][0][1], 2, 1,
  669. &ff_svq1_intra_multistage_vlc[i][0][0], 2, 1,
  670. INIT_VLC_USE_NEW_STATIC);
  671. svq1_inter_multistage[i].table = &table[offset];
  672. svq1_inter_multistage[i].table_allocated = sizes[1][i];
  673. offset += sizes[1][i];
  674. init_vlc(&svq1_inter_multistage[i], 3, 8,
  675. &ff_svq1_inter_multistage_vlc[i][0][1], 2, 1,
  676. &ff_svq1_inter_multistage_vlc[i][0][0], 2, 1,
  677. INIT_VLC_USE_NEW_STATIC);
  678. }
  679. INIT_VLC_STATIC(&svq1_intra_mean, 8, 256,
  680. &ff_svq1_intra_mean_vlc[0][1], 4, 2,
  681. &ff_svq1_intra_mean_vlc[0][0], 4, 2, 632);
  682. INIT_VLC_STATIC(&svq1_inter_mean, 9, 512,
  683. &ff_svq1_inter_mean_vlc[0][1], 4, 2,
  684. &ff_svq1_inter_mean_vlc[0][0], 4, 2, 1434);
  685. return 0;
  686. }
  687. static av_cold int svq1_decode_end(AVCodecContext *avctx)
  688. {
  689. SVQ1Context *s = avctx->priv_data;
  690. av_frame_free(&s->prev);
  691. av_freep(&s->pkt_swapped);
  692. s->pkt_swapped_allocated = 0;
  693. return 0;
  694. }
  695. static void svq1_flush(AVCodecContext *avctx)
  696. {
  697. SVQ1Context *s = avctx->priv_data;
  698. av_frame_unref(s->prev);
  699. }
  700. AVCodec ff_svq1_decoder = {
  701. .name = "svq1",
  702. .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
  703. .type = AVMEDIA_TYPE_VIDEO,
  704. .id = AV_CODEC_ID_SVQ1,
  705. .priv_data_size = sizeof(SVQ1Context),
  706. .init = svq1_decode_init,
  707. .close = svq1_decode_end,
  708. .decode = svq1_decode_frame,
  709. .capabilities = CODEC_CAP_DR1,
  710. .flush = svq1_flush,
  711. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
  712. AV_PIX_FMT_NONE },
  713. };