You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1425 lines
43KB

  1. /*
  2. *
  3. * Copyright (C) 2002 the xine project
  4. * Copyright (C) 2002 the ffmpeg project
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. *
  22. * (SVQ1 Decoder)
  23. * Ported to mplayer by Arpi <arpi@thot.banki.hu>
  24. * Ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
  25. *
  26. * SVQ1 Encoder (c) 2004 Mike Melanson <melanson@pcisys.net>
  27. */
  28. /**
  29. * @file svq1.c
  30. * Sorenson Vector Quantizer #1 (SVQ1) video codec.
  31. * For more information of the SVQ1 algorithm, visit:
  32. * http://www.pcisys.net/~melanson/codecs/
  33. */
  34. //#define DEBUG_SVQ1
  35. #include <stdio.h>
  36. #include <stdlib.h>
  37. #include <string.h>
  38. #include <unistd.h>
  39. #include <limits.h>
  40. #include "avcodec.h"
  41. #include "dsputil.h"
  42. #include "mpegvideo.h"
  43. #include "bswap.h"
  44. #undef NDEBUG
  45. #include <assert.h>
  46. extern const uint8_t mvtab[33][2];
  47. static VLC svq1_block_type;
  48. static VLC svq1_motion_component;
  49. static VLC svq1_intra_multistage[6];
  50. static VLC svq1_inter_multistage[6];
  51. static VLC svq1_intra_mean;
  52. static VLC svq1_inter_mean;
  53. #define SVQ1_BLOCK_SKIP 0
  54. #define SVQ1_BLOCK_INTER 1
  55. #define SVQ1_BLOCK_INTER_4V 2
  56. #define SVQ1_BLOCK_INTRA 3
  57. typedef struct SVQ1Context {
  58. MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to make the motion estimation eventually independent of MpegEncContext, so this will be removed then (FIXME/XXX)
  59. AVCodecContext *avctx;
  60. DSPContext dsp;
  61. AVFrame picture;
  62. AVFrame current_picture;
  63. AVFrame last_picture;
  64. PutBitContext pb;
  65. GetBitContext gb;
  66. PutBitContext reorder_pb[6]; //why ooh why this sick breadth first order, everything is slower and more complex
  67. int frame_width;
  68. int frame_height;
  69. /* Y plane block dimensions */
  70. int y_block_width;
  71. int y_block_height;
  72. /* U & V plane (C planes) block dimensions */
  73. int c_block_width;
  74. int c_block_height;
  75. uint16_t *mb_type;
  76. uint32_t *dummy;
  77. int16_t (*motion_val8[3])[2];
  78. int16_t (*motion_val16[3])[2];
  79. int64_t rd_total;
  80. } SVQ1Context;
  81. /* motion vector (prediction) */
  82. typedef struct svq1_pmv_s {
  83. int x;
  84. int y;
  85. } svq1_pmv_t;
  86. #include "svq1_cb.h"
  87. #include "svq1_vlc.h"
  88. static const uint16_t checksum_table[256] = {
  89. 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
  90. 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
  91. 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
  92. 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
  93. 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
  94. 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
  95. 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
  96. 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
  97. 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
  98. 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
  99. 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
  100. 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
  101. 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
  102. 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
  103. 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
  104. 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
  105. 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
  106. 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
  107. 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
  108. 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
  109. 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
  110. 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
  111. 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
  112. 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
  113. 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
  114. 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
  115. 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
  116. 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
  117. 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
  118. 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
  119. 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
  120. 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
  121. };
  122. static const uint8_t string_table[256] = {
  123. 0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54,
  124. 0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
  125. 0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06,
  126. 0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
  127. 0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0,
  128. 0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
  129. 0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2,
  130. 0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
  131. 0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9,
  132. 0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
  133. 0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B,
  134. 0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
  135. 0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D,
  136. 0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
  137. 0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F,
  138. 0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
  139. 0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB,
  140. 0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
  141. 0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9,
  142. 0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
  143. 0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F,
  144. 0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
  145. 0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D,
  146. 0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
  147. 0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26,
  148. 0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
  149. 0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74,
  150. 0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
  151. 0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82,
  152. 0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
  153. 0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0,
  154. 0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
  155. };
  156. #define SVQ1_PROCESS_VECTOR()\
  157. for (; level > 0; i++) {\
  158. /* process next depth */\
  159. if (i == m) {\
  160. m = n;\
  161. if (--level == 0)\
  162. break;\
  163. }\
  164. /* divide block if next bit set */\
  165. if (get_bits (bitbuf, 1) == 0)\
  166. break;\
  167. /* add child nodes */\
  168. list[n++] = list[i];\
  169. list[n++] = list[i] + (((level & 1) ? pitch : 1) << ((level / 2) + 1));\
  170. }
  171. #define SVQ1_ADD_CODEBOOK()\
  172. /* add codebook entries to vector */\
  173. for (j=0; j < stages; j++) {\
  174. n3 = codebook[entries[j]] ^ 0x80808080;\
  175. n1 += ((n3 & 0xFF00FF00) >> 8);\
  176. n2 += (n3 & 0x00FF00FF);\
  177. }\
  178. \
  179. /* clip to [0..255] */\
  180. if (n1 & 0xFF00FF00) {\
  181. n3 = ((( n1 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
  182. n1 += 0x7F007F00;\
  183. n1 |= (((~n1 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
  184. n1 &= (n3 & 0x00FF00FF);\
  185. }\
  186. \
  187. if (n2 & 0xFF00FF00) {\
  188. n3 = ((( n2 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
  189. n2 += 0x7F007F00;\
  190. n2 |= (((~n2 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
  191. n2 &= (n3 & 0x00FF00FF);\
  192. }
  193. #define SVQ1_DO_CODEBOOK_INTRA()\
  194. for (y=0; y < height; y++) {\
  195. for (x=0; x < (width / 4); x++, codebook++) {\
  196. n1 = n4;\
  197. n2 = n4;\
  198. SVQ1_ADD_CODEBOOK()\
  199. /* store result */\
  200. dst[x] = (n1 << 8) | n2;\
  201. }\
  202. dst += (pitch / 4);\
  203. }
  204. #define SVQ1_DO_CODEBOOK_NONINTRA()\
  205. for (y=0; y < height; y++) {\
  206. for (x=0; x < (width / 4); x++, codebook++) {\
  207. n3 = dst[x];\
  208. /* add mean value to vector */\
  209. n1 = ((n3 & 0xFF00FF00) >> 8) + n4;\
  210. n2 = (n3 & 0x00FF00FF) + n4;\
  211. SVQ1_ADD_CODEBOOK()\
  212. /* store result */\
  213. dst[x] = (n1 << 8) | n2;\
  214. }\
  215. dst += (pitch / 4);\
  216. }
  217. #define SVQ1_CALC_CODEBOOK_ENTRIES(cbook)\
  218. codebook = (const uint32_t *) cbook[level];\
  219. bit_cache = get_bits (bitbuf, 4*stages);\
  220. /* calculate codebook entries for this vector */\
  221. for (j=0; j < stages; j++) {\
  222. entries[j] = (((bit_cache >> (4*(stages - j - 1))) & 0xF) + 16*j) << (level + 1);\
  223. }\
  224. mean -= (stages * 128);\
  225. n4 = ((mean + (mean >> 31)) << 16) | (mean & 0xFFFF);
  226. static int svq1_decode_block_intra (GetBitContext *bitbuf, uint8_t *pixels, int pitch ) {
  227. uint32_t bit_cache;
  228. uint8_t *list[63];
  229. uint32_t *dst;
  230. const uint32_t *codebook;
  231. int entries[6];
  232. int i, j, m, n;
  233. int mean, stages;
  234. unsigned x, y, width, height, level;
  235. uint32_t n1, n2, n3, n4;
  236. /* initialize list for breadth first processing of vectors */
  237. list[0] = pixels;
  238. /* recursively process vector */
  239. for (i=0, m=1, n=1, level=5; i < n; i++) {
  240. SVQ1_PROCESS_VECTOR();
  241. /* destination address and vector size */
  242. dst = (uint32_t *) list[i];
  243. width = 1 << ((4 + level) /2);
  244. height = 1 << ((3 + level) /2);
  245. /* get number of stages (-1 skips vector, 0 for mean only) */
  246. stages = get_vlc2(bitbuf, svq1_intra_multistage[level].table, 3, 3) - 1;
  247. if (stages == -1) {
  248. for (y=0; y < height; y++) {
  249. memset (&dst[y*(pitch / 4)], 0, width);
  250. }
  251. continue; /* skip vector */
  252. }
  253. if ((stages > 0) && (level >= 4)) {
  254. #ifdef DEBUG_SVQ1
  255. av_log(s->avctx, AV_LOG_INFO, "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n",stages,level);
  256. #endif
  257. return -1; /* invalid vector */
  258. }
  259. mean = get_vlc2(bitbuf, svq1_intra_mean.table, 8, 3);
  260. if (stages == 0) {
  261. for (y=0; y < height; y++) {
  262. memset (&dst[y*(pitch / 4)], mean, width);
  263. }
  264. } else {
  265. SVQ1_CALC_CODEBOOK_ENTRIES(svq1_intra_codebooks);
  266. SVQ1_DO_CODEBOOK_INTRA()
  267. }
  268. }
  269. return 0;
  270. }
  271. static int svq1_decode_block_non_intra (GetBitContext *bitbuf, uint8_t *pixels, int pitch ) {
  272. uint32_t bit_cache;
  273. uint8_t *list[63];
  274. uint32_t *dst;
  275. const uint32_t *codebook;
  276. int entries[6];
  277. int i, j, m, n;
  278. int mean, stages;
  279. int x, y, width, height, level;
  280. uint32_t n1, n2, n3, n4;
  281. /* initialize list for breadth first processing of vectors */
  282. list[0] = pixels;
  283. /* recursively process vector */
  284. for (i=0, m=1, n=1, level=5; i < n; i++) {
  285. SVQ1_PROCESS_VECTOR();
  286. /* destination address and vector size */
  287. dst = (uint32_t *) list[i];
  288. width = 1 << ((4 + level) /2);
  289. height = 1 << ((3 + level) /2);
  290. /* get number of stages (-1 skips vector, 0 for mean only) */
  291. stages = get_vlc2(bitbuf, svq1_inter_multistage[level].table, 3, 2) - 1;
  292. if (stages == -1) continue; /* skip vector */
  293. if ((stages > 0) && (level >= 4)) {
  294. #ifdef DEBUG_SVQ1
  295. av_log(s->avctx, AV_LOG_INFO, "Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n",stages,level);
  296. #endif
  297. return -1; /* invalid vector */
  298. }
  299. mean = get_vlc2(bitbuf, svq1_inter_mean.table, 9, 3) - 256;
  300. SVQ1_CALC_CODEBOOK_ENTRIES(svq1_inter_codebooks);
  301. SVQ1_DO_CODEBOOK_NONINTRA()
  302. }
  303. return 0;
  304. }
  305. static int svq1_decode_motion_vector (GetBitContext *bitbuf, svq1_pmv_t *mv, svq1_pmv_t **pmv) {
  306. int diff;
  307. int i;
  308. for (i=0; i < 2; i++) {
  309. /* get motion code */
  310. diff = get_vlc2(bitbuf, svq1_motion_component.table, 7, 2);
  311. if(diff<0)
  312. return -1;
  313. else if(diff){
  314. if(get_bits1(bitbuf)) diff= -diff;
  315. }
  316. /* add median of motion vector predictors and clip result */
  317. if (i == 1)
  318. mv->y = ((diff + mid_pred(pmv[0]->y, pmv[1]->y, pmv[2]->y)) << 26) >> 26;
  319. else
  320. mv->x = ((diff + mid_pred(pmv[0]->x, pmv[1]->x, pmv[2]->x)) << 26) >> 26;
  321. }
  322. return 0;
  323. }
  324. static void svq1_skip_block (uint8_t *current, uint8_t *previous, int pitch, int x, int y) {
  325. uint8_t *src;
  326. uint8_t *dst;
  327. int i;
  328. src = &previous[x + y*pitch];
  329. dst = current;
  330. for (i=0; i < 16; i++) {
  331. memcpy (dst, src, 16);
  332. src += pitch;
  333. dst += pitch;
  334. }
  335. }
  336. static int svq1_motion_inter_block (MpegEncContext *s, GetBitContext *bitbuf,
  337. uint8_t *current, uint8_t *previous, int pitch,
  338. svq1_pmv_t *motion, int x, int y) {
  339. uint8_t *src;
  340. uint8_t *dst;
  341. svq1_pmv_t mv;
  342. svq1_pmv_t *pmv[3];
  343. int result;
  344. /* predict and decode motion vector */
  345. pmv[0] = &motion[0];
  346. if (y == 0) {
  347. pmv[1] =
  348. pmv[2] = pmv[0];
  349. }
  350. else {
  351. pmv[1] = &motion[(x / 8) + 2];
  352. pmv[2] = &motion[(x / 8) + 4];
  353. }
  354. result = svq1_decode_motion_vector (bitbuf, &mv, pmv);
  355. if (result != 0)
  356. return result;
  357. motion[0].x =
  358. motion[(x / 8) + 2].x =
  359. motion[(x / 8) + 3].x = mv.x;
  360. motion[0].y =
  361. motion[(x / 8) + 2].y =
  362. motion[(x / 8) + 3].y = mv.y;
  363. if(y + (mv.y >> 1)<0)
  364. mv.y= 0;
  365. if(x + (mv.x >> 1)<0)
  366. mv.x= 0;
  367. #if 0
  368. int w= (s->width+15)&~15;
  369. int h= (s->height+15)&~15;
  370. if(x + (mv.x >> 1)<0 || y + (mv.y >> 1)<0 || x + (mv.x >> 1) + 16 > w || y + (mv.y >> 1) + 16> h)
  371. av_log(s->avctx, AV_LOG_INFO, "%d %d %d %d\n", x, y, x + (mv.x >> 1), y + (mv.y >> 1));
  372. #endif
  373. src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1))*pitch];
  374. dst = current;
  375. s->dsp.put_pixels_tab[0][((mv.y & 1) << 1) | (mv.x & 1)](dst,src,pitch,16);
  376. return 0;
  377. }
  378. static int svq1_motion_inter_4v_block (MpegEncContext *s, GetBitContext *bitbuf,
  379. uint8_t *current, uint8_t *previous, int pitch,
  380. svq1_pmv_t *motion,int x, int y) {
  381. uint8_t *src;
  382. uint8_t *dst;
  383. svq1_pmv_t mv;
  384. svq1_pmv_t *pmv[4];
  385. int i, result;
  386. /* predict and decode motion vector (0) */
  387. pmv[0] = &motion[0];
  388. if (y == 0) {
  389. pmv[1] =
  390. pmv[2] = pmv[0];
  391. }
  392. else {
  393. pmv[1] = &motion[(x / 8) + 2];
  394. pmv[2] = &motion[(x / 8) + 4];
  395. }
  396. result = svq1_decode_motion_vector (bitbuf, &mv, pmv);
  397. if (result != 0)
  398. return result;
  399. /* predict and decode motion vector (1) */
  400. pmv[0] = &mv;
  401. if (y == 0) {
  402. pmv[1] =
  403. pmv[2] = pmv[0];
  404. }
  405. else {
  406. pmv[1] = &motion[(x / 8) + 3];
  407. }
  408. result = svq1_decode_motion_vector (bitbuf, &motion[0], pmv);
  409. if (result != 0)
  410. return result;
  411. /* predict and decode motion vector (2) */
  412. pmv[1] = &motion[0];
  413. pmv[2] = &motion[(x / 8) + 1];
  414. result = svq1_decode_motion_vector (bitbuf, &motion[(x / 8) + 2], pmv);
  415. if (result != 0)
  416. return result;
  417. /* predict and decode motion vector (3) */
  418. pmv[2] = &motion[(x / 8) + 2];
  419. pmv[3] = &motion[(x / 8) + 3];
  420. result = svq1_decode_motion_vector (bitbuf, pmv[3], pmv);
  421. if (result != 0)
  422. return result;
  423. /* form predictions */
  424. for (i=0; i < 4; i++) {
  425. int mvx= pmv[i]->x + (i&1)*16;
  426. int mvy= pmv[i]->y + (i>>1)*16;
  427. ///XXX /FIXME cliping or padding?
  428. if(y + (mvy >> 1)<0)
  429. mvy= 0;
  430. if(x + (mvx >> 1)<0)
  431. mvx= 0;
  432. #if 0
  433. int w= (s->width+15)&~15;
  434. int h= (s->height+15)&~15;
  435. if(x + (mvx >> 1)<0 || y + (mvy >> 1)<0 || x + (mvx >> 1) + 8 > w || y + (mvy >> 1) + 8> h)
  436. av_log(s->avctx, AV_LOG_INFO, "%d %d %d %d\n", x, y, x + (mvx >> 1), y + (mvy >> 1));
  437. #endif
  438. src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1))*pitch];
  439. dst = current;
  440. s->dsp.put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst,src,pitch,8);
  441. /* select next block */
  442. if (i & 1) {
  443. current += 8*(pitch - 1);
  444. } else {
  445. current += 8;
  446. }
  447. }
  448. return 0;
  449. }
  450. static int svq1_decode_delta_block (MpegEncContext *s, GetBitContext *bitbuf,
  451. uint8_t *current, uint8_t *previous, int pitch,
  452. svq1_pmv_t *motion, int x, int y) {
  453. uint32_t block_type;
  454. int result = 0;
  455. /* get block type */
  456. block_type = get_vlc2(bitbuf, svq1_block_type.table, 2, 2);
  457. /* reset motion vectors */
  458. if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) {
  459. motion[0].x =
  460. motion[0].y =
  461. motion[(x / 8) + 2].x =
  462. motion[(x / 8) + 2].y =
  463. motion[(x / 8) + 3].x =
  464. motion[(x / 8) + 3].y = 0;
  465. }
  466. switch (block_type) {
  467. case SVQ1_BLOCK_SKIP:
  468. svq1_skip_block (current, previous, pitch, x, y);
  469. break;
  470. case SVQ1_BLOCK_INTER:
  471. result = svq1_motion_inter_block (s, bitbuf, current, previous, pitch, motion, x, y);
  472. if (result != 0)
  473. {
  474. #ifdef DEBUG_SVQ1
  475. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_block %i\n",result);
  476. #endif
  477. break;
  478. }
  479. result = svq1_decode_block_non_intra (bitbuf, current, pitch);
  480. break;
  481. case SVQ1_BLOCK_INTER_4V:
  482. result = svq1_motion_inter_4v_block (s, bitbuf, current, previous, pitch, motion, x, y);
  483. if (result != 0)
  484. {
  485. #ifdef DEBUG_SVQ1
  486. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_4v_block %i\n",result);
  487. #endif
  488. break;
  489. }
  490. result = svq1_decode_block_non_intra (bitbuf, current, pitch);
  491. break;
  492. case SVQ1_BLOCK_INTRA:
  493. result = svq1_decode_block_intra (bitbuf, current, pitch);
  494. break;
  495. }
  496. return result;
  497. }
  498. /* standard video sizes */
  499. static struct { int width; int height; } svq1_frame_size_table[8] = {
  500. { 160, 120 }, { 128, 96 }, { 176, 144 }, { 352, 288 },
  501. { 704, 576 }, { 240, 180 }, { 320, 240 }, { -1, -1 }
  502. };
  503. static uint16_t svq1_packet_checksum (uint8_t *data, int length, int value) {
  504. int i;
  505. for (i=0; i < length; i++) {
  506. value = checksum_table[data[i] ^ (value >> 8)] ^ ((value & 0xFF) << 8);
  507. }
  508. return value;
  509. }
  510. #if 0 /* unused, remove? */
  511. static uint16_t svq1_component_checksum (uint16_t *pixels, int pitch,
  512. int width, int height, int value) {
  513. int x, y;
  514. for (y=0; y < height; y++) {
  515. for (x=0; x < width; x++) {
  516. value = checksum_table[pixels[x] ^ (value >> 8)] ^ ((value & 0xFF) << 8);
  517. }
  518. pixels += pitch;
  519. }
  520. return value;
  521. }
  522. #endif
  523. #ifdef CONFIG_DECODERS
  524. static void svq1_parse_string (GetBitContext *bitbuf, uint8_t *out) {
  525. uint8_t seed;
  526. int i;
  527. out[0] = get_bits (bitbuf, 8);
  528. seed = string_table[out[0]];
  529. for (i=1; i <= out[0]; i++) {
  530. out[i] = get_bits (bitbuf, 8) ^ seed;
  531. seed = string_table[out[i] ^ seed];
  532. }
  533. }
  534. static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
  535. int frame_size_code;
  536. int temporal_reference;
  537. temporal_reference = get_bits (bitbuf, 8);
  538. /* frame type */
  539. s->pict_type= get_bits (bitbuf, 2)+1;
  540. if(s->pict_type==4)
  541. return -1;
  542. if (s->pict_type == I_TYPE) {
  543. /* unknown fields */
  544. if (s->f_code == 0x50 || s->f_code == 0x60) {
  545. int csum = get_bits (bitbuf, 16);
  546. csum = svq1_packet_checksum ((uint8_t *)bitbuf->buffer, bitbuf->size_in_bits>>3, csum);
  547. // av_log(s->avctx, AV_LOG_INFO, "%s checksum (%02x) for packet data\n",
  548. // (csum == 0) ? "correct" : "incorrect", csum);
  549. }
  550. if ((s->f_code ^ 0x10) >= 0x50) {
  551. uint8_t msg[256];
  552. svq1_parse_string (bitbuf, msg);
  553. av_log(s->avctx, AV_LOG_INFO, "embedded message: \"%s\"\n", (char *) msg);
  554. }
  555. skip_bits (bitbuf, 2);
  556. skip_bits (bitbuf, 2);
  557. skip_bits1 (bitbuf);
  558. /* load frame size */
  559. frame_size_code = get_bits (bitbuf, 3);
  560. if (frame_size_code == 7) {
  561. /* load width, height (12 bits each) */
  562. s->width = get_bits (bitbuf, 12);
  563. s->height = get_bits (bitbuf, 12);
  564. if (!s->width || !s->height)
  565. return -1;
  566. } else {
  567. /* get width, height from table */
  568. s->width = svq1_frame_size_table[frame_size_code].width;
  569. s->height = svq1_frame_size_table[frame_size_code].height;
  570. }
  571. }
  572. /* unknown fields */
  573. if (get_bits (bitbuf, 1) == 1) {
  574. skip_bits1 (bitbuf); /* use packet checksum if (1) */
  575. skip_bits1 (bitbuf); /* component checksums after image data if (1) */
  576. if (get_bits (bitbuf, 2) != 0)
  577. return -1;
  578. }
  579. if (get_bits (bitbuf, 1) == 1) {
  580. skip_bits1 (bitbuf);
  581. skip_bits (bitbuf, 4);
  582. skip_bits1 (bitbuf);
  583. skip_bits (bitbuf, 2);
  584. while (get_bits (bitbuf, 1) == 1) {
  585. skip_bits (bitbuf, 8);
  586. }
  587. }
  588. return 0;
  589. }
  590. static int svq1_decode_frame(AVCodecContext *avctx,
  591. void *data, int *data_size,
  592. uint8_t *buf, int buf_size)
  593. {
  594. MpegEncContext *s=avctx->priv_data;
  595. uint8_t *current, *previous;
  596. int result, i, x, y, width, height;
  597. AVFrame *pict = data;
  598. /* initialize bit buffer */
  599. init_get_bits(&s->gb,buf,buf_size*8);
  600. /* decode frame header */
  601. s->f_code = get_bits (&s->gb, 22);
  602. if ((s->f_code & ~0x70) || !(s->f_code & 0x60))
  603. return -1;
  604. /* swap some header bytes (why?) */
  605. if (s->f_code != 0x20) {
  606. uint32_t *src = (uint32_t *) (buf + 4);
  607. for (i=0; i < 4; i++) {
  608. src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
  609. }
  610. }
  611. result = svq1_decode_frame_header (&s->gb, s);
  612. if (result != 0)
  613. {
  614. #ifdef DEBUG_SVQ1
  615. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_frame_header %i\n",result);
  616. #endif
  617. return result;
  618. }
  619. //FIXME this avoids some confusion for "B frames" without 2 references
  620. //this should be removed after libavcodec can handle more flexible picture types & ordering
  621. if(s->pict_type==B_TYPE && s->last_picture_ptr==NULL) return buf_size;
  622. if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size;
  623. if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
  624. ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
  625. || avctx->skip_frame >= AVDISCARD_ALL)
  626. return buf_size;
  627. if(MPV_frame_start(s, avctx) < 0)
  628. return -1;
  629. /* decode y, u and v components */
  630. for (i=0; i < 3; i++) {
  631. int linesize;
  632. if (i == 0) {
  633. width = (s->width+15)&~15;
  634. height = (s->height+15)&~15;
  635. linesize= s->linesize;
  636. } else {
  637. if(s->flags&CODEC_FLAG_GRAY) break;
  638. width = (s->width/4+15)&~15;
  639. height = (s->height/4+15)&~15;
  640. linesize= s->uvlinesize;
  641. }
  642. current = s->current_picture.data[i];
  643. if(s->pict_type==B_TYPE){
  644. previous = s->next_picture.data[i];
  645. }else{
  646. previous = s->last_picture.data[i];
  647. }
  648. if (s->pict_type == I_TYPE) {
  649. /* keyframe */
  650. for (y=0; y < height; y+=16) {
  651. for (x=0; x < width; x+=16) {
  652. result = svq1_decode_block_intra (&s->gb, &current[x], linesize);
  653. if (result != 0)
  654. {
  655. //#ifdef DEBUG_SVQ1
  656. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_block %i (keyframe)\n",result);
  657. //#endif
  658. return result;
  659. }
  660. }
  661. current += 16*linesize;
  662. }
  663. } else {
  664. svq1_pmv_t pmv[width/8+3];
  665. /* delta frame */
  666. memset (pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv_t));
  667. for (y=0; y < height; y+=16) {
  668. for (x=0; x < width; x+=16) {
  669. result = svq1_decode_delta_block (s, &s->gb, &current[x], previous,
  670. linesize, pmv, x, y);
  671. if (result != 0)
  672. {
  673. #ifdef DEBUG_SVQ1
  674. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_delta_block %i\n",result);
  675. #endif
  676. return result;
  677. }
  678. }
  679. pmv[0].x =
  680. pmv[0].y = 0;
  681. current += 16*linesize;
  682. }
  683. }
  684. }
  685. *pict = *(AVFrame*)&s->current_picture;
  686. MPV_frame_end(s);
  687. *data_size=sizeof(AVFrame);
  688. return buf_size;
  689. }
  690. static int svq1_decode_init(AVCodecContext *avctx)
  691. {
  692. MpegEncContext *s = avctx->priv_data;
  693. int i;
  694. MPV_decode_defaults(s);
  695. s->avctx = avctx;
  696. s->width = (avctx->width+3)&~3;
  697. s->height = (avctx->height+3)&~3;
  698. s->codec_id= avctx->codec->id;
  699. avctx->pix_fmt = PIX_FMT_YUV410P;
  700. avctx->has_b_frames= 1; // not true, but DP frames and these behave like unidirectional b frames
  701. s->flags= avctx->flags;
  702. if (MPV_common_init(s) < 0) return -1;
  703. init_vlc(&svq1_block_type, 2, 4,
  704. &svq1_block_type_vlc[0][1], 2, 1,
  705. &svq1_block_type_vlc[0][0], 2, 1, 1);
  706. init_vlc(&svq1_motion_component, 7, 33,
  707. &mvtab[0][1], 2, 1,
  708. &mvtab[0][0], 2, 1, 1);
  709. for (i = 0; i < 6; i++) {
  710. init_vlc(&svq1_intra_multistage[i], 3, 8,
  711. &svq1_intra_multistage_vlc[i][0][1], 2, 1,
  712. &svq1_intra_multistage_vlc[i][0][0], 2, 1, 1);
  713. init_vlc(&svq1_inter_multistage[i], 3, 8,
  714. &svq1_inter_multistage_vlc[i][0][1], 2, 1,
  715. &svq1_inter_multistage_vlc[i][0][0], 2, 1, 1);
  716. }
  717. init_vlc(&svq1_intra_mean, 8, 256,
  718. &svq1_intra_mean_vlc[0][1], 4, 2,
  719. &svq1_intra_mean_vlc[0][0], 4, 2, 1);
  720. init_vlc(&svq1_inter_mean, 9, 512,
  721. &svq1_inter_mean_vlc[0][1], 4, 2,
  722. &svq1_inter_mean_vlc[0][0], 4, 2, 1);
  723. return 0;
  724. }
  725. static int svq1_decode_end(AVCodecContext *avctx)
  726. {
  727. MpegEncContext *s = avctx->priv_data;
  728. MPV_common_end(s);
  729. return 0;
  730. }
  731. #endif /* CONFIG_DECODERS */
  732. #ifdef CONFIG_ENCODERS
  733. static void svq1_write_header(SVQ1Context *s, int frame_type)
  734. {
  735. int i;
  736. /* frame code */
  737. put_bits(&s->pb, 22, 0x20);
  738. /* temporal reference (sure hope this is a "don't care") */
  739. put_bits(&s->pb, 8, 0x00);
  740. /* frame type */
  741. put_bits(&s->pb, 2, frame_type - 1);
  742. if (frame_type == I_TYPE) {
  743. /* no checksum since frame code is 0x20 */
  744. /* no embedded string either */
  745. /* output 5 unknown bits (2 + 2 + 1) */
  746. put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
  747. for (i = 0; i < 7; i++)
  748. {
  749. if ((svq1_frame_size_table[i].width == s->frame_width) &&
  750. (svq1_frame_size_table[i].height == s->frame_height))
  751. {
  752. put_bits(&s->pb, 3, i);
  753. break;
  754. }
  755. }
  756. if (i == 7)
  757. {
  758. put_bits(&s->pb, 3, 7);
  759. put_bits(&s->pb, 12, s->frame_width);
  760. put_bits(&s->pb, 12, s->frame_height);
  761. }
  762. }
  763. /* no checksum or extra data (next 2 bits get 0) */
  764. put_bits(&s->pb, 2, 0);
  765. }
  766. #define QUALITY_THRESHOLD 100
  767. #define THRESHOLD_MULTIPLIER 0.6
  768. #if defined(HAVE_ALTIVEC)
  769. #undef vector
  770. #endif
  771. static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra){
  772. int count, y, x, i, j, split, best_mean, best_score, best_count;
  773. int best_vector[6];
  774. int block_sum[7]= {0, 0, 0, 0, 0, 0};
  775. int w= 2<<((level+2)>>1);
  776. int h= 2<<((level+1)>>1);
  777. int size=w*h;
  778. int16_t block[7][256];
  779. const int8_t *codebook_sum, *codebook;
  780. const uint16_t (*mean_vlc)[2];
  781. const uint8_t (*multistage_vlc)[2];
  782. best_score=0;
  783. //FIXME optimize, this doenst need to be done multiple times
  784. if(intra){
  785. codebook_sum= svq1_intra_codebook_sum[level];
  786. codebook= svq1_intra_codebooks[level];
  787. mean_vlc= svq1_intra_mean_vlc;
  788. multistage_vlc= svq1_intra_multistage_vlc[level];
  789. for(y=0; y<h; y++){
  790. for(x=0; x<w; x++){
  791. int v= src[x + y*stride];
  792. block[0][x + w*y]= v;
  793. best_score += v*v;
  794. block_sum[0] += v;
  795. }
  796. }
  797. }else{
  798. codebook_sum= svq1_inter_codebook_sum[level];
  799. codebook= svq1_inter_codebooks[level];
  800. mean_vlc= svq1_inter_mean_vlc + 256;
  801. multistage_vlc= svq1_inter_multistage_vlc[level];
  802. for(y=0; y<h; y++){
  803. for(x=0; x<w; x++){
  804. int v= src[x + y*stride] - ref[x + y*stride];
  805. block[0][x + w*y]= v;
  806. best_score += v*v;
  807. block_sum[0] += v;
  808. }
  809. }
  810. }
  811. best_count=0;
  812. best_score -= ((block_sum[0]*block_sum[0])>>(level+3));
  813. best_mean= (block_sum[0] + (size>>1)) >> (level+3);
  814. if(level<4){
  815. for(count=1; count<7; count++){
  816. int best_vector_score= INT_MAX;
  817. int best_vector_sum=-999, best_vector_mean=-999;
  818. const int stage= count-1;
  819. const int8_t *vector;
  820. for(i=0; i<16; i++){
  821. int sum= codebook_sum[stage*16 + i];
  822. int sqr, diff, score;
  823. vector = codebook + stage*size*16 + i*size;
  824. sqr = s->dsp.ssd_int8_vs_int16(vector, block[stage], size);
  825. diff= block_sum[stage] - sum;
  826. score= sqr - ((diff*(int64_t)diff)>>(level+3)); //FIXME 64bit slooow
  827. if(score < best_vector_score){
  828. int mean= (diff + (size>>1)) >> (level+3);
  829. assert(mean >-300 && mean<300);
  830. mean= av_clip(mean, intra?0:-256, 255);
  831. best_vector_score= score;
  832. best_vector[stage]= i;
  833. best_vector_sum= sum;
  834. best_vector_mean= mean;
  835. }
  836. }
  837. assert(best_vector_mean != -999);
  838. vector= codebook + stage*size*16 + best_vector[stage]*size;
  839. for(j=0; j<size; j++){
  840. block[stage+1][j] = block[stage][j] - vector[j];
  841. }
  842. block_sum[stage+1]= block_sum[stage] - best_vector_sum;
  843. best_vector_score +=
  844. lambda*(+ 1 + 4*count
  845. + multistage_vlc[1+count][1]
  846. + mean_vlc[best_vector_mean][1]);
  847. if(best_vector_score < best_score){
  848. best_score= best_vector_score;
  849. best_count= count;
  850. best_mean= best_vector_mean;
  851. }
  852. }
  853. }
  854. split=0;
  855. if(best_score > threshold && level){
  856. int score=0;
  857. int offset= (level&1) ? stride*h/2 : w/2;
  858. PutBitContext backup[6];
  859. for(i=level-1; i>=0; i--){
  860. backup[i]= s->reorder_pb[i];
  861. }
  862. score += encode_block(s, src , ref , decoded , stride, level-1, threshold>>1, lambda, intra);
  863. score += encode_block(s, src + offset, ref + offset, decoded + offset, stride, level-1, threshold>>1, lambda, intra);
  864. score += lambda;
  865. if(score < best_score){
  866. best_score= score;
  867. split=1;
  868. }else{
  869. for(i=level-1; i>=0; i--){
  870. s->reorder_pb[i]= backup[i];
  871. }
  872. }
  873. }
  874. if (level > 0)
  875. put_bits(&s->reorder_pb[level], 1, split);
  876. if(!split){
  877. assert((best_mean >= 0 && best_mean<256) || !intra);
  878. assert(best_mean >= -256 && best_mean<256);
  879. assert(best_count >=0 && best_count<7);
  880. assert(level<4 || best_count==0);
  881. /* output the encoding */
  882. put_bits(&s->reorder_pb[level],
  883. multistage_vlc[1 + best_count][1],
  884. multistage_vlc[1 + best_count][0]);
  885. put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
  886. mean_vlc[best_mean][0]);
  887. for (i = 0; i < best_count; i++){
  888. assert(best_vector[i]>=0 && best_vector[i]<16);
  889. put_bits(&s->reorder_pb[level], 4, best_vector[i]);
  890. }
  891. for(y=0; y<h; y++){
  892. for(x=0; x<w; x++){
  893. decoded[x + y*stride]= src[x + y*stride] - block[best_count][x + w*y] + best_mean;
  894. }
  895. }
  896. }
  897. return best_score;
  898. }
  899. static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane,
  900. int width, int height, int src_stride, int stride)
  901. {
  902. int x, y;
  903. int i;
  904. int block_width, block_height;
  905. int level;
  906. int threshold[6];
  907. const int lambda= (s->picture.quality*s->picture.quality) >> (2*FF_LAMBDA_SHIFT);
  908. /* figure out the acceptable level thresholds in advance */
  909. threshold[5] = QUALITY_THRESHOLD;
  910. for (level = 4; level >= 0; level--)
  911. threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
  912. block_width = (width + 15) / 16;
  913. block_height = (height + 15) / 16;
  914. if(s->picture.pict_type == P_TYPE){
  915. s->m.avctx= s->avctx;
  916. s->m.current_picture_ptr= &s->m.current_picture;
  917. s->m.last_picture_ptr = &s->m.last_picture;
  918. s->m.last_picture.data[0]= ref_plane;
  919. s->m.linesize=
  920. s->m.last_picture.linesize[0]=
  921. s->m.new_picture.linesize[0]=
  922. s->m.current_picture.linesize[0]= stride;
  923. s->m.width= width;
  924. s->m.height= height;
  925. s->m.mb_width= block_width;
  926. s->m.mb_height= block_height;
  927. s->m.mb_stride= s->m.mb_width+1;
  928. s->m.b8_stride= 2*s->m.mb_width+1;
  929. s->m.f_code=1;
  930. s->m.pict_type= s->picture.pict_type;
  931. s->m.me_method= s->avctx->me_method;
  932. s->m.me.scene_change_score=0;
  933. s->m.flags= s->avctx->flags;
  934. // s->m.out_format = FMT_H263;
  935. // s->m.unrestricted_mv= 1;
  936. s->m.lambda= s->picture.quality;
  937. s->m.qscale= (s->m.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
  938. s->m.lambda2= (s->m.lambda*s->m.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
  939. if(!s->motion_val8[plane]){
  940. s->motion_val8 [plane]= av_mallocz((s->m.b8_stride*block_height*2 + 2)*2*sizeof(int16_t));
  941. s->motion_val16[plane]= av_mallocz((s->m.mb_stride*(block_height + 2) + 1)*2*sizeof(int16_t));
  942. }
  943. s->m.mb_type= s->mb_type;
  944. //dummies, to avoid segfaults
  945. s->m.current_picture.mb_mean= (uint8_t *)s->dummy;
  946. s->m.current_picture.mb_var= (uint16_t*)s->dummy;
  947. s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy;
  948. s->m.current_picture.mb_type= s->dummy;
  949. s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2;
  950. s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1;
  951. s->m.dsp= s->dsp; //move
  952. ff_init_me(&s->m);
  953. s->m.me.dia_size= s->avctx->dia_size;
  954. s->m.first_slice_line=1;
  955. for (y = 0; y < block_height; y++) {
  956. uint8_t src[stride*16];
  957. s->m.new_picture.data[0]= src - y*16*stride; //ugly
  958. s->m.mb_y= y;
  959. for(i=0; i<16 && i + 16*y<height; i++){
  960. memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
  961. for(x=width; x<16*block_width; x++)
  962. src[i*stride+x]= src[i*stride+x-1];
  963. }
  964. for(; i<16 && i + 16*y<16*block_height; i++)
  965. memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width);
  966. for (x = 0; x < block_width; x++) {
  967. s->m.mb_x= x;
  968. ff_init_block_index(&s->m);
  969. ff_update_block_index(&s->m);
  970. ff_estimate_p_frame_motion(&s->m, x, y);
  971. }
  972. s->m.first_slice_line=0;
  973. }
  974. ff_fix_long_p_mvs(&s->m);
  975. ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, CANDIDATE_MB_TYPE_INTER, 0);
  976. }
  977. s->m.first_slice_line=1;
  978. for (y = 0; y < block_height; y++) {
  979. uint8_t src[stride*16];
  980. for(i=0; i<16 && i + 16*y<height; i++){
  981. memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
  982. for(x=width; x<16*block_width; x++)
  983. src[i*stride+x]= src[i*stride+x-1];
  984. }
  985. for(; i<16 && i + 16*y<16*block_height; i++)
  986. memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width);
  987. s->m.mb_y= y;
  988. for (x = 0; x < block_width; x++) {
  989. uint8_t reorder_buffer[3][6][7*32];
  990. int count[3][6];
  991. int offset = y * 16 * stride + x * 16;
  992. uint8_t *decoded= decoded_plane + offset;
  993. uint8_t *ref= ref_plane + offset;
  994. int score[4]={0,0,0,0}, best;
  995. uint8_t temp[16*stride];
  996. if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3000){ //FIXME check size
  997. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  998. return -1;
  999. }
  1000. s->m.mb_x= x;
  1001. ff_init_block_index(&s->m);
  1002. ff_update_block_index(&s->m);
  1003. if(s->picture.pict_type == I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){
  1004. for(i=0; i<6; i++){
  1005. init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32);
  1006. }
  1007. if(s->picture.pict_type == P_TYPE){
  1008. const uint8_t *vlc= svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
  1009. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  1010. score[0]= vlc[1]*lambda;
  1011. }
  1012. score[0]+= encode_block(s, src+16*x, NULL, temp, stride, 5, 64, lambda, 1);
  1013. for(i=0; i<6; i++){
  1014. count[0][i]= put_bits_count(&s->reorder_pb[i]);
  1015. flush_put_bits(&s->reorder_pb[i]);
  1016. }
  1017. }else
  1018. score[0]= INT_MAX;
  1019. best=0;
  1020. if(s->picture.pict_type == P_TYPE){
  1021. const uint8_t *vlc= svq1_block_type_vlc[SVQ1_BLOCK_INTER];
  1022. int mx, my, pred_x, pred_y, dxy;
  1023. int16_t *motion_ptr;
  1024. motion_ptr= h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
  1025. if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){
  1026. for(i=0; i<6; i++)
  1027. init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32);
  1028. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  1029. s->m.pb= s->reorder_pb[5];
  1030. mx= motion_ptr[0];
  1031. my= motion_ptr[1];
  1032. assert(mx>=-32 && mx<=31);
  1033. assert(my>=-32 && my<=31);
  1034. assert(pred_x>=-32 && pred_x<=31);
  1035. assert(pred_y>=-32 && pred_y<=31);
  1036. ff_h263_encode_motion(&s->m, mx - pred_x, 1);
  1037. ff_h263_encode_motion(&s->m, my - pred_y, 1);
  1038. s->reorder_pb[5]= s->m.pb;
  1039. score[1] += lambda*put_bits_count(&s->reorder_pb[5]);
  1040. dxy= (mx&1) + 2*(my&1);
  1041. s->dsp.put_pixels_tab[0][dxy](temp+16, ref + (mx>>1) + stride*(my>>1), stride, 16);
  1042. score[1]+= encode_block(s, src+16*x, temp+16, decoded, stride, 5, 64, lambda, 0);
  1043. best= score[1] <= score[0];
  1044. vlc= svq1_block_type_vlc[SVQ1_BLOCK_SKIP];
  1045. score[2]= s->dsp.sse[0](NULL, src+16*x, ref, stride, 16);
  1046. score[2]+= vlc[1]*lambda;
  1047. if(score[2] < score[best] && mx==0 && my==0){
  1048. best=2;
  1049. s->dsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
  1050. for(i=0; i<6; i++){
  1051. count[2][i]=0;
  1052. }
  1053. put_bits(&s->pb, vlc[1], vlc[0]);
  1054. }
  1055. }
  1056. if(best==1){
  1057. for(i=0; i<6; i++){
  1058. count[1][i]= put_bits_count(&s->reorder_pb[i]);
  1059. flush_put_bits(&s->reorder_pb[i]);
  1060. }
  1061. }else{
  1062. motion_ptr[0 ] = motion_ptr[1 ]=
  1063. motion_ptr[2 ] = motion_ptr[3 ]=
  1064. motion_ptr[0+2*s->m.b8_stride] = motion_ptr[1+2*s->m.b8_stride]=
  1065. motion_ptr[2+2*s->m.b8_stride] = motion_ptr[3+2*s->m.b8_stride]=0;
  1066. }
  1067. }
  1068. s->rd_total += score[best];
  1069. for(i=5; i>=0; i--){
  1070. ff_copy_bits(&s->pb, reorder_buffer[best][i], count[best][i]);
  1071. }
  1072. if(best==0){
  1073. s->dsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
  1074. }
  1075. }
  1076. s->m.first_slice_line=0;
  1077. }
  1078. return 0;
  1079. }
  1080. static int svq1_encode_init(AVCodecContext *avctx)
  1081. {
  1082. SVQ1Context * const s = avctx->priv_data;
  1083. dsputil_init(&s->dsp, avctx);
  1084. avctx->coded_frame= (AVFrame*)&s->picture;
  1085. s->frame_width = avctx->width;
  1086. s->frame_height = avctx->height;
  1087. s->y_block_width = (s->frame_width + 15) / 16;
  1088. s->y_block_height = (s->frame_height + 15) / 16;
  1089. s->c_block_width = (s->frame_width / 4 + 15) / 16;
  1090. s->c_block_height = (s->frame_height / 4 + 15) / 16;
  1091. s->avctx= avctx;
  1092. s->m.avctx= avctx;
  1093. s->m.me.scratchpad= av_mallocz((avctx->width+64)*2*16*2*sizeof(uint8_t));
  1094. s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
  1095. s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
  1096. s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t));
  1097. s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t));
  1098. h263_encode_init(&s->m); //mv_penalty
  1099. return 0;
  1100. }
  1101. static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
  1102. int buf_size, void *data)
  1103. {
  1104. SVQ1Context * const s = avctx->priv_data;
  1105. AVFrame *pict = data;
  1106. AVFrame * const p= (AVFrame*)&s->picture;
  1107. AVFrame temp;
  1108. int i;
  1109. if(avctx->pix_fmt != PIX_FMT_YUV410P){
  1110. av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
  1111. return -1;
  1112. }
  1113. if(!s->current_picture.data[0]){
  1114. avctx->get_buffer(avctx, &s->current_picture);
  1115. avctx->get_buffer(avctx, &s->last_picture);
  1116. }
  1117. temp= s->current_picture;
  1118. s->current_picture= s->last_picture;
  1119. s->last_picture= temp;
  1120. init_put_bits(&s->pb, buf, buf_size);
  1121. *p = *pict;
  1122. p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? P_TYPE : I_TYPE;
  1123. p->key_frame = p->pict_type == I_TYPE;
  1124. svq1_write_header(s, p->pict_type);
  1125. for(i=0; i<3; i++){
  1126. if(svq1_encode_plane(s, i,
  1127. s->picture.data[i], s->last_picture.data[i], s->current_picture.data[i],
  1128. s->frame_width / (i?4:1), s->frame_height / (i?4:1),
  1129. s->picture.linesize[i], s->current_picture.linesize[i]) < 0)
  1130. return -1;
  1131. }
  1132. // align_put_bits(&s->pb);
  1133. while(put_bits_count(&s->pb) & 31)
  1134. put_bits(&s->pb, 1, 0);
  1135. flush_put_bits(&s->pb);
  1136. return (put_bits_count(&s->pb) / 8);
  1137. }
  1138. static int svq1_encode_end(AVCodecContext *avctx)
  1139. {
  1140. SVQ1Context * const s = avctx->priv_data;
  1141. int i;
  1142. av_log(avctx, AV_LOG_DEBUG, "RD: %f\n", s->rd_total/(double)(avctx->width*avctx->height*avctx->frame_number));
  1143. av_freep(&s->m.me.scratchpad);
  1144. av_freep(&s->m.me.map);
  1145. av_freep(&s->m.me.score_map);
  1146. av_freep(&s->mb_type);
  1147. av_freep(&s->dummy);
  1148. for(i=0; i<3; i++){
  1149. av_freep(&s->motion_val8[i]);
  1150. av_freep(&s->motion_val16[i]);
  1151. }
  1152. return 0;
  1153. }
  1154. #endif //CONFIG_ENCODERS
  1155. #ifdef CONFIG_DECODERS
  1156. AVCodec svq1_decoder = {
  1157. "svq1",
  1158. CODEC_TYPE_VIDEO,
  1159. CODEC_ID_SVQ1,
  1160. sizeof(MpegEncContext),
  1161. svq1_decode_init,
  1162. NULL,
  1163. svq1_decode_end,
  1164. svq1_decode_frame,
  1165. CODEC_CAP_DR1,
  1166. .flush= ff_mpeg_flush,
  1167. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV410P, -1},
  1168. };
  1169. #endif
  1170. #ifdef CONFIG_ENCODERS
  1171. AVCodec svq1_encoder = {
  1172. "svq1",
  1173. CODEC_TYPE_VIDEO,
  1174. CODEC_ID_SVQ1,
  1175. sizeof(SVQ1Context),
  1176. svq1_encode_init,
  1177. svq1_encode_frame,
  1178. svq1_encode_end,
  1179. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV410P, -1},
  1180. };
  1181. #endif //CONFIG_ENCODERS