You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1419 lines
42KB

  1. /*
  2. *
  3. * Copyright (C) 2002 the xine project
  4. * Copyright (C) 2002 the ffmpeg project
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19. *
  20. * (SVQ1 Decoder)
  21. * Ported to mplayer by Arpi <arpi@thot.banki.hu>
  22. * Ported to libavcodec by Nick Kurshev <nickols_k@mail.ru>
  23. *
  24. * SVQ1 Encoder (c) 2004 Mike Melanson <melanson@pcisys.net>
  25. */
  26. /**
  27. * @file svq1.c
  28. * Sorenson Vector Quantizer #1 (SVQ1) video codec.
  29. * For more information of the SVQ1 algorithm, visit:
  30. * http://www.pcisys.net/~melanson/codecs/
  31. */
  32. //#define DEBUG_SVQ1
  33. #include <stdio.h>
  34. #include <stdlib.h>
  35. #include <string.h>
  36. #include <unistd.h>
  37. #include <limits.h>
  38. #include "common.h"
  39. #include "avcodec.h"
  40. #include "dsputil.h"
  41. #include "mpegvideo.h"
  42. #include "bswap.h"
  43. #undef NDEBUG
  44. #include <assert.h>
  45. extern const uint8_t mvtab[33][2];
  46. static VLC svq1_block_type;
  47. static VLC svq1_motion_component;
  48. static VLC svq1_intra_multistage[6];
  49. static VLC svq1_inter_multistage[6];
  50. static VLC svq1_intra_mean;
  51. static VLC svq1_inter_mean;
  52. #define SVQ1_BLOCK_SKIP 0
  53. #define SVQ1_BLOCK_INTER 1
  54. #define SVQ1_BLOCK_INTER_4V 2
  55. #define SVQ1_BLOCK_INTRA 3
  56. typedef struct SVQ1Context {
  57. MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to make the motion estimation eventually independant of MpegEncContext, so this will be removed then (FIXME/XXX)
  58. AVCodecContext *avctx;
  59. DSPContext dsp;
  60. AVFrame picture;
  61. AVFrame current_picture;
  62. AVFrame last_picture;
  63. PutBitContext pb;
  64. GetBitContext gb;
  65. PutBitContext reorder_pb[6]; //why ooh why this sick breadth first order, everything is slower and more complex
  66. int frame_width;
  67. int frame_height;
  68. /* Y plane block dimensions */
  69. int y_block_width;
  70. int y_block_height;
  71. /* U & V plane (C planes) block dimensions */
  72. int c_block_width;
  73. int c_block_height;
  74. uint16_t *mb_type;
  75. uint32_t *dummy;
  76. int16_t (*motion_val8[3])[2];
  77. int16_t (*motion_val16[3])[2];
  78. int64_t rd_total;
  79. } SVQ1Context;
  80. /* motion vector (prediction) */
  81. typedef struct svq1_pmv_s {
  82. int x;
  83. int y;
  84. } svq1_pmv_t;
  85. #include "svq1_cb.h"
  86. #include "svq1_vlc.h"
  87. static const uint16_t checksum_table[256] = {
  88. 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
  89. 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
  90. 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
  91. 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
  92. 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
  93. 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
  94. 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
  95. 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
  96. 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
  97. 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
  98. 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
  99. 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
  100. 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
  101. 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
  102. 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
  103. 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
  104. 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
  105. 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
  106. 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
  107. 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
  108. 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
  109. 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
  110. 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
  111. 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
  112. 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
  113. 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
  114. 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
  115. 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
  116. 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
  117. 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
  118. 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
  119. 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
  120. };
  121. static const uint8_t string_table[256] = {
  122. 0x00, 0xD5, 0x7F, 0xAA, 0xFE, 0x2B, 0x81, 0x54,
  123. 0x29, 0xFC, 0x56, 0x83, 0xD7, 0x02, 0xA8, 0x7D,
  124. 0x52, 0x87, 0x2D, 0xF8, 0xAC, 0x79, 0xD3, 0x06,
  125. 0x7B, 0xAE, 0x04, 0xD1, 0x85, 0x50, 0xFA, 0x2F,
  126. 0xA4, 0x71, 0xDB, 0x0E, 0x5A, 0x8F, 0x25, 0xF0,
  127. 0x8D, 0x58, 0xF2, 0x27, 0x73, 0xA6, 0x0C, 0xD9,
  128. 0xF6, 0x23, 0x89, 0x5C, 0x08, 0xDD, 0x77, 0xA2,
  129. 0xDF, 0x0A, 0xA0, 0x75, 0x21, 0xF4, 0x5E, 0x8B,
  130. 0x9D, 0x48, 0xE2, 0x37, 0x63, 0xB6, 0x1C, 0xC9,
  131. 0xB4, 0x61, 0xCB, 0x1E, 0x4A, 0x9F, 0x35, 0xE0,
  132. 0xCF, 0x1A, 0xB0, 0x65, 0x31, 0xE4, 0x4E, 0x9B,
  133. 0xE6, 0x33, 0x99, 0x4C, 0x18, 0xCD, 0x67, 0xB2,
  134. 0x39, 0xEC, 0x46, 0x93, 0xC7, 0x12, 0xB8, 0x6D,
  135. 0x10, 0xC5, 0x6F, 0xBA, 0xEE, 0x3B, 0x91, 0x44,
  136. 0x6B, 0xBE, 0x14, 0xC1, 0x95, 0x40, 0xEA, 0x3F,
  137. 0x42, 0x97, 0x3D, 0xE8, 0xBC, 0x69, 0xC3, 0x16,
  138. 0xEF, 0x3A, 0x90, 0x45, 0x11, 0xC4, 0x6E, 0xBB,
  139. 0xC6, 0x13, 0xB9, 0x6C, 0x38, 0xED, 0x47, 0x92,
  140. 0xBD, 0x68, 0xC2, 0x17, 0x43, 0x96, 0x3C, 0xE9,
  141. 0x94, 0x41, 0xEB, 0x3E, 0x6A, 0xBF, 0x15, 0xC0,
  142. 0x4B, 0x9E, 0x34, 0xE1, 0xB5, 0x60, 0xCA, 0x1F,
  143. 0x62, 0xB7, 0x1D, 0xC8, 0x9C, 0x49, 0xE3, 0x36,
  144. 0x19, 0xCC, 0x66, 0xB3, 0xE7, 0x32, 0x98, 0x4D,
  145. 0x30, 0xE5, 0x4F, 0x9A, 0xCE, 0x1B, 0xB1, 0x64,
  146. 0x72, 0xA7, 0x0D, 0xD8, 0x8C, 0x59, 0xF3, 0x26,
  147. 0x5B, 0x8E, 0x24, 0xF1, 0xA5, 0x70, 0xDA, 0x0F,
  148. 0x20, 0xF5, 0x5F, 0x8A, 0xDE, 0x0B, 0xA1, 0x74,
  149. 0x09, 0xDC, 0x76, 0xA3, 0xF7, 0x22, 0x88, 0x5D,
  150. 0xD6, 0x03, 0xA9, 0x7C, 0x28, 0xFD, 0x57, 0x82,
  151. 0xFF, 0x2A, 0x80, 0x55, 0x01, 0xD4, 0x7E, 0xAB,
  152. 0x84, 0x51, 0xFB, 0x2E, 0x7A, 0xAF, 0x05, 0xD0,
  153. 0xAD, 0x78, 0xD2, 0x07, 0x53, 0x86, 0x2C, 0xF9
  154. };
  155. #define SVQ1_PROCESS_VECTOR()\
  156. for (; level > 0; i++) {\
  157. /* process next depth */\
  158. if (i == m) {\
  159. m = n;\
  160. if (--level == 0)\
  161. break;\
  162. }\
  163. /* divide block if next bit set */\
  164. if (get_bits (bitbuf, 1) == 0)\
  165. break;\
  166. /* add child nodes */\
  167. list[n++] = list[i];\
  168. list[n++] = list[i] + (((level & 1) ? pitch : 1) << ((level / 2) + 1));\
  169. }
  170. #define SVQ1_ADD_CODEBOOK()\
  171. /* add codebook entries to vector */\
  172. for (j=0; j < stages; j++) {\
  173. n3 = codebook[entries[j]] ^ 0x80808080;\
  174. n1 += ((n3 & 0xFF00FF00) >> 8);\
  175. n2 += (n3 & 0x00FF00FF);\
  176. }\
  177. \
  178. /* clip to [0..255] */\
  179. if (n1 & 0xFF00FF00) {\
  180. n3 = ((( n1 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
  181. n1 += 0x7F007F00;\
  182. n1 |= (((~n1 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
  183. n1 &= (n3 & 0x00FF00FF);\
  184. }\
  185. \
  186. if (n2 & 0xFF00FF00) {\
  187. n3 = ((( n2 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
  188. n2 += 0x7F007F00;\
  189. n2 |= (((~n2 >> 15) & 0x00010001) | 0x01000100) - 0x00010001;\
  190. n2 &= (n3 & 0x00FF00FF);\
  191. }
  192. #define SVQ1_DO_CODEBOOK_INTRA()\
  193. for (y=0; y < height; y++) {\
  194. for (x=0; x < (width / 4); x++, codebook++) {\
  195. n1 = n4;\
  196. n2 = n4;\
  197. SVQ1_ADD_CODEBOOK()\
  198. /* store result */\
  199. dst[x] = (n1 << 8) | n2;\
  200. }\
  201. dst += (pitch / 4);\
  202. }
  203. #define SVQ1_DO_CODEBOOK_NONINTRA()\
  204. for (y=0; y < height; y++) {\
  205. for (x=0; x < (width / 4); x++, codebook++) {\
  206. n3 = dst[x];\
  207. /* add mean value to vector */\
  208. n1 = ((n3 & 0xFF00FF00) >> 8) + n4;\
  209. n2 = (n3 & 0x00FF00FF) + n4;\
  210. SVQ1_ADD_CODEBOOK()\
  211. /* store result */\
  212. dst[x] = (n1 << 8) | n2;\
  213. }\
  214. dst += (pitch / 4);\
  215. }
  216. #define SVQ1_CALC_CODEBOOK_ENTRIES(cbook)\
  217. codebook = (const uint32_t *) cbook[level];\
  218. bit_cache = get_bits (bitbuf, 4*stages);\
  219. /* calculate codebook entries for this vector */\
  220. for (j=0; j < stages; j++) {\
  221. entries[j] = (((bit_cache >> (4*(stages - j - 1))) & 0xF) + 16*j) << (level + 1);\
  222. }\
  223. mean -= (stages * 128);\
  224. n4 = ((mean + (mean >> 31)) << 16) | (mean & 0xFFFF);
  225. static int svq1_decode_block_intra (GetBitContext *bitbuf, uint8_t *pixels, int pitch ) {
  226. uint32_t bit_cache;
  227. uint8_t *list[63];
  228. uint32_t *dst;
  229. const uint32_t *codebook;
  230. int entries[6];
  231. int i, j, m, n;
  232. int mean, stages;
  233. unsigned x, y, width, height, level;
  234. uint32_t n1, n2, n3, n4;
  235. /* initialize list for breadth first processing of vectors */
  236. list[0] = pixels;
  237. /* recursively process vector */
  238. for (i=0, m=1, n=1, level=5; i < n; i++) {
  239. SVQ1_PROCESS_VECTOR();
  240. /* destination address and vector size */
  241. dst = (uint32_t *) list[i];
  242. width = 1 << ((4 + level) /2);
  243. height = 1 << ((3 + level) /2);
  244. /* get number of stages (-1 skips vector, 0 for mean only) */
  245. stages = get_vlc2(bitbuf, svq1_intra_multistage[level].table, 3, 3) - 1;
  246. if (stages == -1) {
  247. for (y=0; y < height; y++) {
  248. memset (&dst[y*(pitch / 4)], 0, width);
  249. }
  250. continue; /* skip vector */
  251. }
  252. if ((stages > 0) && (level >= 4)) {
  253. #ifdef DEBUG_SVQ1
  254. av_log(s->avctx, AV_LOG_INFO, "Error (svq1_decode_block_intra): invalid vector: stages=%i level=%i\n",stages,level);
  255. #endif
  256. return -1; /* invalid vector */
  257. }
  258. mean = get_vlc2(bitbuf, svq1_intra_mean.table, 8, 3);
  259. if (stages == 0) {
  260. for (y=0; y < height; y++) {
  261. memset (&dst[y*(pitch / 4)], mean, width);
  262. }
  263. } else {
  264. SVQ1_CALC_CODEBOOK_ENTRIES(svq1_intra_codebooks);
  265. SVQ1_DO_CODEBOOK_INTRA()
  266. }
  267. }
  268. return 0;
  269. }
  270. static int svq1_decode_block_non_intra (GetBitContext *bitbuf, uint8_t *pixels, int pitch ) {
  271. uint32_t bit_cache;
  272. uint8_t *list[63];
  273. uint32_t *dst;
  274. const uint32_t *codebook;
  275. int entries[6];
  276. int i, j, m, n;
  277. int mean, stages;
  278. int x, y, width, height, level;
  279. uint32_t n1, n2, n3, n4;
  280. /* initialize list for breadth first processing of vectors */
  281. list[0] = pixels;
  282. /* recursively process vector */
  283. for (i=0, m=1, n=1, level=5; i < n; i++) {
  284. SVQ1_PROCESS_VECTOR();
  285. /* destination address and vector size */
  286. dst = (uint32_t *) list[i];
  287. width = 1 << ((4 + level) /2);
  288. height = 1 << ((3 + level) /2);
  289. /* get number of stages (-1 skips vector, 0 for mean only) */
  290. stages = get_vlc2(bitbuf, svq1_inter_multistage[level].table, 3, 2) - 1;
  291. if (stages == -1) continue; /* skip vector */
  292. if ((stages > 0) && (level >= 4)) {
  293. #ifdef DEBUG_SVQ1
  294. av_log(s->avctx, AV_LOG_INFO, "Error (svq1_decode_block_non_intra): invalid vector: stages=%i level=%i\n",stages,level);
  295. #endif
  296. return -1; /* invalid vector */
  297. }
  298. mean = get_vlc2(bitbuf, svq1_inter_mean.table, 9, 3) - 256;
  299. SVQ1_CALC_CODEBOOK_ENTRIES(svq1_inter_codebooks);
  300. SVQ1_DO_CODEBOOK_NONINTRA()
  301. }
  302. return 0;
  303. }
  304. static int svq1_decode_motion_vector (GetBitContext *bitbuf, svq1_pmv_t *mv, svq1_pmv_t **pmv) {
  305. int diff;
  306. int i;
  307. for (i=0; i < 2; i++) {
  308. /* get motion code */
  309. diff = get_vlc2(bitbuf, svq1_motion_component.table, 7, 2);
  310. if(diff<0)
  311. return -1;
  312. else if(diff){
  313. if(get_bits1(bitbuf)) diff= -diff;
  314. }
  315. /* add median of motion vector predictors and clip result */
  316. if (i == 1)
  317. mv->y = ((diff + mid_pred(pmv[0]->y, pmv[1]->y, pmv[2]->y)) << 26) >> 26;
  318. else
  319. mv->x = ((diff + mid_pred(pmv[0]->x, pmv[1]->x, pmv[2]->x)) << 26) >> 26;
  320. }
  321. return 0;
  322. }
  323. static void svq1_skip_block (uint8_t *current, uint8_t *previous, int pitch, int x, int y) {
  324. uint8_t *src;
  325. uint8_t *dst;
  326. int i;
  327. src = &previous[x + y*pitch];
  328. dst = current;
  329. for (i=0; i < 16; i++) {
  330. memcpy (dst, src, 16);
  331. src += pitch;
  332. dst += pitch;
  333. }
  334. }
  335. static int svq1_motion_inter_block (MpegEncContext *s, GetBitContext *bitbuf,
  336. uint8_t *current, uint8_t *previous, int pitch,
  337. svq1_pmv_t *motion, int x, int y) {
  338. uint8_t *src;
  339. uint8_t *dst;
  340. svq1_pmv_t mv;
  341. svq1_pmv_t *pmv[3];
  342. int result;
  343. /* predict and decode motion vector */
  344. pmv[0] = &motion[0];
  345. if (y == 0) {
  346. pmv[1] =
  347. pmv[2] = pmv[0];
  348. }
  349. else {
  350. pmv[1] = &motion[(x / 8) + 2];
  351. pmv[2] = &motion[(x / 8) + 4];
  352. }
  353. result = svq1_decode_motion_vector (bitbuf, &mv, pmv);
  354. if (result != 0)
  355. return result;
  356. motion[0].x =
  357. motion[(x / 8) + 2].x =
  358. motion[(x / 8) + 3].x = mv.x;
  359. motion[0].y =
  360. motion[(x / 8) + 2].y =
  361. motion[(x / 8) + 3].y = mv.y;
  362. if(y + (mv.y >> 1)<0)
  363. mv.y= 0;
  364. if(x + (mv.x >> 1)<0)
  365. mv.x= 0;
  366. #if 0
  367. int w= (s->width+15)&~15;
  368. int h= (s->height+15)&~15;
  369. if(x + (mv.x >> 1)<0 || y + (mv.y >> 1)<0 || x + (mv.x >> 1) + 16 > w || y + (mv.y >> 1) + 16> h)
  370. av_log(s->avctx, AV_LOG_INFO, "%d %d %d %d\n", x, y, x + (mv.x >> 1), y + (mv.y >> 1));
  371. #endif
  372. src = &previous[(x + (mv.x >> 1)) + (y + (mv.y >> 1))*pitch];
  373. dst = current;
  374. s->dsp.put_pixels_tab[0][((mv.y & 1) << 1) | (mv.x & 1)](dst,src,pitch,16);
  375. return 0;
  376. }
  377. static int svq1_motion_inter_4v_block (MpegEncContext *s, GetBitContext *bitbuf,
  378. uint8_t *current, uint8_t *previous, int pitch,
  379. svq1_pmv_t *motion,int x, int y) {
  380. uint8_t *src;
  381. uint8_t *dst;
  382. svq1_pmv_t mv;
  383. svq1_pmv_t *pmv[4];
  384. int i, result;
  385. /* predict and decode motion vector (0) */
  386. pmv[0] = &motion[0];
  387. if (y == 0) {
  388. pmv[1] =
  389. pmv[2] = pmv[0];
  390. }
  391. else {
  392. pmv[1] = &motion[(x / 8) + 2];
  393. pmv[2] = &motion[(x / 8) + 4];
  394. }
  395. result = svq1_decode_motion_vector (bitbuf, &mv, pmv);
  396. if (result != 0)
  397. return result;
  398. /* predict and decode motion vector (1) */
  399. pmv[0] = &mv;
  400. if (y == 0) {
  401. pmv[1] =
  402. pmv[2] = pmv[0];
  403. }
  404. else {
  405. pmv[1] = &motion[(x / 8) + 3];
  406. }
  407. result = svq1_decode_motion_vector (bitbuf, &motion[0], pmv);
  408. if (result != 0)
  409. return result;
  410. /* predict and decode motion vector (2) */
  411. pmv[1] = &motion[0];
  412. pmv[2] = &motion[(x / 8) + 1];
  413. result = svq1_decode_motion_vector (bitbuf, &motion[(x / 8) + 2], pmv);
  414. if (result != 0)
  415. return result;
  416. /* predict and decode motion vector (3) */
  417. pmv[2] = &motion[(x / 8) + 2];
  418. pmv[3] = &motion[(x / 8) + 3];
  419. result = svq1_decode_motion_vector (bitbuf, pmv[3], pmv);
  420. if (result != 0)
  421. return result;
  422. /* form predictions */
  423. for (i=0; i < 4; i++) {
  424. int mvx= pmv[i]->x + (i&1)*16;
  425. int mvy= pmv[i]->y + (i>>1)*16;
  426. ///XXX /FIXME cliping or padding?
  427. if(y + (mvy >> 1)<0)
  428. mvy= 0;
  429. if(x + (mvx >> 1)<0)
  430. mvx= 0;
  431. #if 0
  432. int w= (s->width+15)&~15;
  433. int h= (s->height+15)&~15;
  434. if(x + (mvx >> 1)<0 || y + (mvy >> 1)<0 || x + (mvx >> 1) + 8 > w || y + (mvy >> 1) + 8> h)
  435. av_log(s->avctx, AV_LOG_INFO, "%d %d %d %d\n", x, y, x + (mvx >> 1), y + (mvy >> 1));
  436. #endif
  437. src = &previous[(x + (mvx >> 1)) + (y + (mvy >> 1))*pitch];
  438. dst = current;
  439. s->dsp.put_pixels_tab[1][((mvy & 1) << 1) | (mvx & 1)](dst,src,pitch,8);
  440. /* select next block */
  441. if (i & 1) {
  442. current += 8*(pitch - 1);
  443. } else {
  444. current += 8;
  445. }
  446. }
  447. return 0;
  448. }
  449. static int svq1_decode_delta_block (MpegEncContext *s, GetBitContext *bitbuf,
  450. uint8_t *current, uint8_t *previous, int pitch,
  451. svq1_pmv_t *motion, int x, int y) {
  452. uint32_t block_type;
  453. int result = 0;
  454. /* get block type */
  455. block_type = get_vlc2(bitbuf, svq1_block_type.table, 2, 2);
  456. /* reset motion vectors */
  457. if (block_type == SVQ1_BLOCK_SKIP || block_type == SVQ1_BLOCK_INTRA) {
  458. motion[0].x =
  459. motion[0].y =
  460. motion[(x / 8) + 2].x =
  461. motion[(x / 8) + 2].y =
  462. motion[(x / 8) + 3].x =
  463. motion[(x / 8) + 3].y = 0;
  464. }
  465. switch (block_type) {
  466. case SVQ1_BLOCK_SKIP:
  467. svq1_skip_block (current, previous, pitch, x, y);
  468. break;
  469. case SVQ1_BLOCK_INTER:
  470. result = svq1_motion_inter_block (s, bitbuf, current, previous, pitch, motion, x, y);
  471. if (result != 0)
  472. {
  473. #ifdef DEBUG_SVQ1
  474. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_block %i\n",result);
  475. #endif
  476. break;
  477. }
  478. result = svq1_decode_block_non_intra (bitbuf, current, pitch);
  479. break;
  480. case SVQ1_BLOCK_INTER_4V:
  481. result = svq1_motion_inter_4v_block (s, bitbuf, current, previous, pitch, motion, x, y);
  482. if (result != 0)
  483. {
  484. #ifdef DEBUG_SVQ1
  485. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_motion_inter_4v_block %i\n",result);
  486. #endif
  487. break;
  488. }
  489. result = svq1_decode_block_non_intra (bitbuf, current, pitch);
  490. break;
  491. case SVQ1_BLOCK_INTRA:
  492. result = svq1_decode_block_intra (bitbuf, current, pitch);
  493. break;
  494. }
  495. return result;
  496. }
  497. /* standard video sizes */
  498. static struct { int width; int height; } svq1_frame_size_table[8] = {
  499. { 160, 120 }, { 128, 96 }, { 176, 144 }, { 352, 288 },
  500. { 704, 576 }, { 240, 180 }, { 320, 240 }, { -1, -1 }
  501. };
  502. static uint16_t svq1_packet_checksum (uint8_t *data, int length, int value) {
  503. int i;
  504. for (i=0; i < length; i++) {
  505. value = checksum_table[data[i] ^ (value >> 8)] ^ ((value & 0xFF) << 8);
  506. }
  507. return value;
  508. }
  509. #if 0 /* unused, remove? */
  510. static uint16_t svq1_component_checksum (uint16_t *pixels, int pitch,
  511. int width, int height, int value) {
  512. int x, y;
  513. for (y=0; y < height; y++) {
  514. for (x=0; x < width; x++) {
  515. value = checksum_table[pixels[x] ^ (value >> 8)] ^ ((value & 0xFF) << 8);
  516. }
  517. pixels += pitch;
  518. }
  519. return value;
  520. }
  521. #endif
  522. static void svq1_parse_string (GetBitContext *bitbuf, uint8_t *out) {
  523. uint8_t seed;
  524. int i;
  525. out[0] = get_bits (bitbuf, 8);
  526. seed = string_table[out[0]];
  527. for (i=1; i <= out[0]; i++) {
  528. out[i] = get_bits (bitbuf, 8) ^ seed;
  529. seed = string_table[out[i] ^ seed];
  530. }
  531. }
  532. static int svq1_decode_frame_header (GetBitContext *bitbuf,MpegEncContext *s) {
  533. int frame_size_code;
  534. int temporal_reference;
  535. temporal_reference = get_bits (bitbuf, 8);
  536. /* frame type */
  537. s->pict_type= get_bits (bitbuf, 2)+1;
  538. if(s->pict_type==4)
  539. return -1;
  540. if (s->pict_type == I_TYPE) {
  541. /* unknown fields */
  542. if (s->f_code == 0x50 || s->f_code == 0x60) {
  543. int csum = get_bits (bitbuf, 16);
  544. csum = svq1_packet_checksum ((uint8_t *)bitbuf->buffer, bitbuf->size_in_bits>>3, csum);
  545. // av_log(s->avctx, AV_LOG_INFO, "%s checksum (%02x) for packet data\n",
  546. // (csum == 0) ? "correct" : "incorrect", csum);
  547. }
  548. if ((s->f_code ^ 0x10) >= 0x50) {
  549. char msg[256];
  550. svq1_parse_string (bitbuf, (char *) msg);
  551. av_log(s->avctx, AV_LOG_INFO, "embedded message: \"%s\"\n", (char *) msg);
  552. }
  553. skip_bits (bitbuf, 2);
  554. skip_bits (bitbuf, 2);
  555. skip_bits1 (bitbuf);
  556. /* load frame size */
  557. frame_size_code = get_bits (bitbuf, 3);
  558. if (frame_size_code == 7) {
  559. /* load width, height (12 bits each) */
  560. s->width = get_bits (bitbuf, 12);
  561. s->height = get_bits (bitbuf, 12);
  562. if (!s->width || !s->height)
  563. return -1;
  564. } else {
  565. /* get width, height from table */
  566. s->width = svq1_frame_size_table[frame_size_code].width;
  567. s->height = svq1_frame_size_table[frame_size_code].height;
  568. }
  569. }
  570. /* unknown fields */
  571. if (get_bits (bitbuf, 1) == 1) {
  572. skip_bits1 (bitbuf); /* use packet checksum if (1) */
  573. skip_bits1 (bitbuf); /* component checksums after image data if (1) */
  574. if (get_bits (bitbuf, 2) != 0)
  575. return -1;
  576. }
  577. if (get_bits (bitbuf, 1) == 1) {
  578. skip_bits1 (bitbuf);
  579. skip_bits (bitbuf, 4);
  580. skip_bits1 (bitbuf);
  581. skip_bits (bitbuf, 2);
  582. while (get_bits (bitbuf, 1) == 1) {
  583. skip_bits (bitbuf, 8);
  584. }
  585. }
  586. return 0;
  587. }
  588. static int svq1_decode_frame(AVCodecContext *avctx,
  589. void *data, int *data_size,
  590. uint8_t *buf, int buf_size)
  591. {
  592. MpegEncContext *s=avctx->priv_data;
  593. uint8_t *current, *previous;
  594. int result, i, x, y, width, height;
  595. AVFrame *pict = data;
  596. /* initialize bit buffer */
  597. init_get_bits(&s->gb,buf,buf_size*8);
  598. /* decode frame header */
  599. s->f_code = get_bits (&s->gb, 22);
  600. if ((s->f_code & ~0x70) || !(s->f_code & 0x60))
  601. return -1;
  602. /* swap some header bytes (why?) */
  603. if (s->f_code != 0x20) {
  604. uint32_t *src = (uint32_t *) (buf + 4);
  605. for (i=0; i < 4; i++) {
  606. src[i] = ((src[i] << 16) | (src[i] >> 16)) ^ src[7 - i];
  607. }
  608. }
  609. result = svq1_decode_frame_header (&s->gb, s);
  610. if (result != 0)
  611. {
  612. #ifdef DEBUG_SVQ1
  613. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_frame_header %i\n",result);
  614. #endif
  615. return result;
  616. }
  617. //FIXME this avoids some confusion for "B frames" without 2 references
  618. //this should be removed after libavcodec can handle more flexible picture types & ordering
  619. if(s->pict_type==B_TYPE && s->last_picture_ptr==NULL) return buf_size;
  620. if(avctx->hurry_up && s->pict_type==B_TYPE) return buf_size;
  621. if( (avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type==B_TYPE)
  622. ||(avctx->skip_frame >= AVDISCARD_NONKEY && s->pict_type!=I_TYPE)
  623. || avctx->skip_frame >= AVDISCARD_ALL)
  624. return buf_size;
  625. if(MPV_frame_start(s, avctx) < 0)
  626. return -1;
  627. /* decode y, u and v components */
  628. for (i=0; i < 3; i++) {
  629. int linesize;
  630. if (i == 0) {
  631. width = (s->width+15)&~15;
  632. height = (s->height+15)&~15;
  633. linesize= s->linesize;
  634. } else {
  635. if(s->flags&CODEC_FLAG_GRAY) break;
  636. width = (s->width/4+15)&~15;
  637. height = (s->height/4+15)&~15;
  638. linesize= s->uvlinesize;
  639. }
  640. current = s->current_picture.data[i];
  641. if(s->pict_type==B_TYPE){
  642. previous = s->next_picture.data[i];
  643. }else{
  644. previous = s->last_picture.data[i];
  645. }
  646. if (s->pict_type == I_TYPE) {
  647. /* keyframe */
  648. for (y=0; y < height; y+=16) {
  649. for (x=0; x < width; x+=16) {
  650. result = svq1_decode_block_intra (&s->gb, &current[x], linesize);
  651. if (result != 0)
  652. {
  653. //#ifdef DEBUG_SVQ1
  654. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_block %i (keyframe)\n",result);
  655. //#endif
  656. return result;
  657. }
  658. }
  659. current += 16*linesize;
  660. }
  661. } else {
  662. svq1_pmv_t pmv[width/8+3];
  663. /* delta frame */
  664. memset (pmv, 0, ((width / 8) + 3) * sizeof(svq1_pmv_t));
  665. for (y=0; y < height; y+=16) {
  666. for (x=0; x < width; x+=16) {
  667. result = svq1_decode_delta_block (s, &s->gb, &current[x], previous,
  668. linesize, pmv, x, y);
  669. if (result != 0)
  670. {
  671. #ifdef DEBUG_SVQ1
  672. av_log(s->avctx, AV_LOG_INFO, "Error in svq1_decode_delta_block %i\n",result);
  673. #endif
  674. return result;
  675. }
  676. }
  677. pmv[0].x =
  678. pmv[0].y = 0;
  679. current += 16*linesize;
  680. }
  681. }
  682. }
  683. *pict = *(AVFrame*)&s->current_picture;
  684. MPV_frame_end(s);
  685. *data_size=sizeof(AVFrame);
  686. return buf_size;
  687. }
  688. static int svq1_decode_init(AVCodecContext *avctx)
  689. {
  690. MpegEncContext *s = avctx->priv_data;
  691. int i;
  692. MPV_decode_defaults(s);
  693. s->avctx = avctx;
  694. s->width = (avctx->width+3)&~3;
  695. s->height = (avctx->height+3)&~3;
  696. s->codec_id= avctx->codec->id;
  697. avctx->pix_fmt = PIX_FMT_YUV410P;
  698. avctx->has_b_frames= 1; // not true, but DP frames and these behave like unidirectional b frames
  699. s->flags= avctx->flags;
  700. if (MPV_common_init(s) < 0) return -1;
  701. init_vlc(&svq1_block_type, 2, 4,
  702. &svq1_block_type_vlc[0][1], 2, 1,
  703. &svq1_block_type_vlc[0][0], 2, 1, 1);
  704. init_vlc(&svq1_motion_component, 7, 33,
  705. &mvtab[0][1], 2, 1,
  706. &mvtab[0][0], 2, 1, 1);
  707. for (i = 0; i < 6; i++) {
  708. init_vlc(&svq1_intra_multistage[i], 3, 8,
  709. &svq1_intra_multistage_vlc[i][0][1], 2, 1,
  710. &svq1_intra_multistage_vlc[i][0][0], 2, 1, 1);
  711. init_vlc(&svq1_inter_multistage[i], 3, 8,
  712. &svq1_inter_multistage_vlc[i][0][1], 2, 1,
  713. &svq1_inter_multistage_vlc[i][0][0], 2, 1, 1);
  714. }
  715. init_vlc(&svq1_intra_mean, 8, 256,
  716. &svq1_intra_mean_vlc[0][1], 4, 2,
  717. &svq1_intra_mean_vlc[0][0], 4, 2, 1);
  718. init_vlc(&svq1_inter_mean, 9, 512,
  719. &svq1_inter_mean_vlc[0][1], 4, 2,
  720. &svq1_inter_mean_vlc[0][0], 4, 2, 1);
  721. return 0;
  722. }
  723. static int svq1_decode_end(AVCodecContext *avctx)
  724. {
  725. MpegEncContext *s = avctx->priv_data;
  726. MPV_common_end(s);
  727. return 0;
  728. }
  729. static void svq1_write_header(SVQ1Context *s, int frame_type)
  730. {
  731. int i;
  732. /* frame code */
  733. put_bits(&s->pb, 22, 0x20);
  734. /* temporal reference (sure hope this is a "don't care") */
  735. put_bits(&s->pb, 8, 0x00);
  736. /* frame type */
  737. put_bits(&s->pb, 2, frame_type - 1);
  738. if (frame_type == I_TYPE) {
  739. /* no checksum since frame code is 0x20 */
  740. /* no embedded string either */
  741. /* output 5 unknown bits (2 + 2 + 1) */
  742. put_bits(&s->pb, 5, 0);
  743. for (i = 0; i < 7; i++)
  744. {
  745. if ((svq1_frame_size_table[i].width == s->frame_width) &&
  746. (svq1_frame_size_table[i].height == s->frame_height))
  747. {
  748. put_bits(&s->pb, 3, i);
  749. break;
  750. }
  751. }
  752. if (i == 7)
  753. {
  754. put_bits(&s->pb, 3, 7);
  755. put_bits(&s->pb, 12, s->frame_width);
  756. put_bits(&s->pb, 12, s->frame_height);
  757. }
  758. }
  759. /* no checksum or extra data (next 2 bits get 0) */
  760. put_bits(&s->pb, 2, 0);
  761. }
  762. #define QUALITY_THRESHOLD 100
  763. #define THRESHOLD_MULTIPLIER 0.6
  764. #if defined(HAVE_ALTIVEC)
  765. #undef vector
  766. #endif
  767. static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, uint8_t *decoded, int stride, int level, int threshold, int lambda, int intra){
  768. int count, y, x, i, j, split, best_mean, best_score, best_count;
  769. int best_vector[6];
  770. int block_sum[7]= {0, 0, 0, 0, 0, 0};
  771. int w= 2<<((level+2)>>1);
  772. int h= 2<<((level+1)>>1);
  773. int size=w*h;
  774. int16_t block[7][256];
  775. const int8_t *codebook_sum, *codebook;
  776. const uint16_t (*mean_vlc)[2];
  777. const uint8_t (*multistage_vlc)[2];
  778. best_score=0;
  779. //FIXME optimize, this doenst need to be done multiple times
  780. if(intra){
  781. codebook_sum= svq1_intra_codebook_sum[level];
  782. codebook= svq1_intra_codebooks[level];
  783. mean_vlc= svq1_intra_mean_vlc;
  784. multistage_vlc= svq1_intra_multistage_vlc[level];
  785. for(y=0; y<h; y++){
  786. for(x=0; x<w; x++){
  787. int v= src[x + y*stride];
  788. block[0][x + w*y]= v;
  789. best_score += v*v;
  790. block_sum[0] += v;
  791. }
  792. }
  793. }else{
  794. codebook_sum= svq1_inter_codebook_sum[level];
  795. codebook= svq1_inter_codebooks[level];
  796. mean_vlc= svq1_inter_mean_vlc + 256;
  797. multistage_vlc= svq1_inter_multistage_vlc[level];
  798. for(y=0; y<h; y++){
  799. for(x=0; x<w; x++){
  800. int v= src[x + y*stride] - ref[x + y*stride];
  801. block[0][x + w*y]= v;
  802. best_score += v*v;
  803. block_sum[0] += v;
  804. }
  805. }
  806. }
  807. best_count=0;
  808. best_score -= ((block_sum[0]*block_sum[0])>>(level+3));
  809. best_mean= (block_sum[0] + (size>>1)) >> (level+3);
  810. if(level<4){
  811. for(count=1; count<7; count++){
  812. int best_vector_score= INT_MAX;
  813. int best_vector_sum=-999, best_vector_mean=-999;
  814. const int stage= count-1;
  815. const int8_t *vector;
  816. for(i=0; i<16; i++){
  817. int sum= codebook_sum[stage*16 + i];
  818. int sqr=0;
  819. int diff, mean, score;
  820. vector = codebook + stage*size*16 + i*size;
  821. for(j=0; j<size; j++){
  822. int v= vector[j];
  823. sqr += (v - block[stage][j])*(v - block[stage][j]);
  824. }
  825. diff= block_sum[stage] - sum;
  826. mean= (diff + (size>>1)) >> (level+3);
  827. assert(mean >-300 && mean<300);
  828. if(intra) mean= clip(mean, 0, 255);
  829. else mean= clip(mean, -256, 255);
  830. score= sqr - ((diff*(int64_t)diff)>>(level+3)); //FIXME 64bit slooow
  831. if(score < best_vector_score){
  832. best_vector_score= score;
  833. best_vector[stage]= i;
  834. best_vector_sum= sum;
  835. best_vector_mean= mean;
  836. }
  837. }
  838. assert(best_vector_mean != -999);
  839. vector= codebook + stage*size*16 + best_vector[stage]*size;
  840. for(j=0; j<size; j++){
  841. block[stage+1][j] = block[stage][j] - vector[j];
  842. }
  843. block_sum[stage+1]= block_sum[stage] - best_vector_sum;
  844. best_vector_score +=
  845. lambda*(+ 1 + 4*count
  846. + multistage_vlc[1+count][1]
  847. + mean_vlc[best_vector_mean][1]);
  848. if(best_vector_score < best_score){
  849. best_score= best_vector_score;
  850. best_count= count;
  851. best_mean= best_vector_mean;
  852. }
  853. }
  854. }
  855. split=0;
  856. if(best_score > threshold && level){
  857. int score=0;
  858. int offset= (level&1) ? stride*h/2 : w/2;
  859. PutBitContext backup[6];
  860. for(i=level-1; i>=0; i--){
  861. backup[i]= s->reorder_pb[i];
  862. }
  863. score += encode_block(s, src , ref , decoded , stride, level-1, threshold>>1, lambda, intra);
  864. score += encode_block(s, src + offset, ref + offset, decoded + offset, stride, level-1, threshold>>1, lambda, intra);
  865. score += lambda;
  866. if(score < best_score){
  867. best_score= score;
  868. split=1;
  869. }else{
  870. for(i=level-1; i>=0; i--){
  871. s->reorder_pb[i]= backup[i];
  872. }
  873. }
  874. }
  875. if (level > 0)
  876. put_bits(&s->reorder_pb[level], 1, split);
  877. if(!split){
  878. assert((best_mean >= 0 && best_mean<256) || !intra);
  879. assert(best_mean >= -256 && best_mean<256);
  880. assert(best_count >=0 && best_count<7);
  881. assert(level<4 || best_count==0);
  882. /* output the encoding */
  883. put_bits(&s->reorder_pb[level],
  884. multistage_vlc[1 + best_count][1],
  885. multistage_vlc[1 + best_count][0]);
  886. put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
  887. mean_vlc[best_mean][0]);
  888. for (i = 0; i < best_count; i++){
  889. assert(best_vector[i]>=0 && best_vector[i]<16);
  890. put_bits(&s->reorder_pb[level], 4, best_vector[i]);
  891. }
  892. for(y=0; y<h; y++){
  893. for(x=0; x<w; x++){
  894. decoded[x + y*stride]= src[x + y*stride] - block[best_count][x + w*y] + best_mean;
  895. }
  896. }
  897. }
  898. return best_score;
  899. }
  900. #ifdef CONFIG_ENCODERS
  901. static int svq1_encode_plane(SVQ1Context *s, int plane, unsigned char *src_plane, unsigned char *ref_plane, unsigned char *decoded_plane,
  902. int width, int height, int src_stride, int stride)
  903. {
  904. int x, y;
  905. int i;
  906. int block_width, block_height;
  907. int level;
  908. int threshold[6];
  909. const int lambda= (s->picture.quality*s->picture.quality) >> (2*FF_LAMBDA_SHIFT);
  910. /* figure out the acceptable level thresholds in advance */
  911. threshold[5] = QUALITY_THRESHOLD;
  912. for (level = 4; level >= 0; level--)
  913. threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
  914. block_width = (width + 15) / 16;
  915. block_height = (height + 15) / 16;
  916. if(s->picture.pict_type == P_TYPE){
  917. s->m.avctx= s->avctx;
  918. s->m.current_picture_ptr= &s->m.current_picture;
  919. s->m.last_picture_ptr = &s->m.last_picture;
  920. s->m.last_picture.data[0]= ref_plane;
  921. s->m.linesize=
  922. s->m.last_picture.linesize[0]=
  923. s->m.new_picture.linesize[0]=
  924. s->m.current_picture.linesize[0]= stride;
  925. s->m.width= width;
  926. s->m.height= height;
  927. s->m.mb_width= block_width;
  928. s->m.mb_height= block_height;
  929. s->m.mb_stride= s->m.mb_width+1;
  930. s->m.b8_stride= 2*s->m.mb_width+1;
  931. s->m.f_code=1;
  932. s->m.pict_type= s->picture.pict_type;
  933. s->m.qscale= s->picture.quality/FF_QP2LAMBDA;
  934. s->m.me_method= s->avctx->me_method;
  935. if(!s->motion_val8[plane]){
  936. s->motion_val8 [plane]= av_mallocz((s->m.b8_stride*block_height*2 + 2)*2*sizeof(int16_t));
  937. s->motion_val16[plane]= av_mallocz((s->m.mb_stride*(block_height + 2) + 1)*2*sizeof(int16_t));
  938. }
  939. s->m.mb_type= s->mb_type;
  940. //dummies, to avoid segfaults
  941. s->m.current_picture.mb_mean= (uint8_t *)s->dummy;
  942. s->m.current_picture.mb_var= (uint16_t*)s->dummy;
  943. s->m.current_picture.mc_mb_var= (uint16_t*)s->dummy;
  944. s->m.current_picture.mb_type= s->dummy;
  945. s->m.current_picture.motion_val[0]= s->motion_val8[plane] + 2;
  946. s->m.p_mv_table= s->motion_val16[plane] + s->m.mb_stride + 1;
  947. s->m.dsp= s->dsp; //move
  948. ff_init_me(&s->m);
  949. s->m.me.dia_size= s->avctx->dia_size;
  950. s->m.first_slice_line=1;
  951. for (y = 0; y < block_height; y++) {
  952. uint8_t src[stride*16];
  953. s->m.new_picture.data[0]= src - y*16*stride; //ugly
  954. s->m.mb_y= y;
  955. for(i=0; i<16 && i + 16*y<height; i++){
  956. memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
  957. for(x=width; x<16*block_width; x++)
  958. src[i*stride+x]= src[i*stride+x-1];
  959. }
  960. for(; i<16 && i + 16*y<16*block_height; i++)
  961. memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width);
  962. for (x = 0; x < block_width; x++) {
  963. s->m.mb_x= x;
  964. ff_init_block_index(&s->m);
  965. ff_update_block_index(&s->m);
  966. ff_estimate_p_frame_motion(&s->m, x, y);
  967. }
  968. s->m.first_slice_line=0;
  969. }
  970. ff_fix_long_p_mvs(&s->m);
  971. ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, CANDIDATE_MB_TYPE_INTER, 0);
  972. }
  973. s->m.first_slice_line=1;
  974. for (y = 0; y < block_height; y++) {
  975. uint8_t src[stride*16];
  976. for(i=0; i<16 && i + 16*y<height; i++){
  977. memcpy(&src[i*stride], &src_plane[(i+16*y)*src_stride], width);
  978. for(x=width; x<16*block_width; x++)
  979. src[i*stride+x]= src[i*stride+x-1];
  980. }
  981. for(; i<16 && i + 16*y<16*block_height; i++)
  982. memcpy(&src[i*stride], &src[(i-1)*stride], 16*block_width);
  983. s->m.mb_y= y;
  984. for (x = 0; x < block_width; x++) {
  985. uint8_t reorder_buffer[3][6][7*32];
  986. int count[3][6];
  987. int offset = y * 16 * stride + x * 16;
  988. uint8_t *decoded= decoded_plane + offset;
  989. uint8_t *ref= ref_plane + offset;
  990. int score[4]={0,0,0,0}, best;
  991. uint8_t temp[16*stride];
  992. if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 3000){ //FIXME check size
  993. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  994. return -1;
  995. }
  996. s->m.mb_x= x;
  997. ff_init_block_index(&s->m);
  998. ff_update_block_index(&s->m);
  999. if(s->picture.pict_type == I_TYPE || (s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTRA)){
  1000. for(i=0; i<6; i++){
  1001. init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], 7*32);
  1002. }
  1003. if(s->picture.pict_type == P_TYPE){
  1004. const uint8_t *vlc= svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
  1005. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  1006. score[0]= vlc[1]*lambda;
  1007. }
  1008. score[0]+= encode_block(s, src+16*x, NULL, temp, stride, 5, 64, lambda, 1);
  1009. for(i=0; i<6; i++){
  1010. count[0][i]= put_bits_count(&s->reorder_pb[i]);
  1011. flush_put_bits(&s->reorder_pb[i]);
  1012. }
  1013. }else
  1014. score[0]= INT_MAX;
  1015. best=0;
  1016. if(s->picture.pict_type == P_TYPE){
  1017. const uint8_t *vlc= svq1_block_type_vlc[SVQ1_BLOCK_INTER];
  1018. int mx, my, pred_x, pred_y, dxy;
  1019. int16_t *motion_ptr;
  1020. motion_ptr= h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
  1021. if(s->m.mb_type[x + y*s->m.mb_stride]&CANDIDATE_MB_TYPE_INTER){
  1022. for(i=0; i<6; i++)
  1023. init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], 7*32);
  1024. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  1025. s->m.pb= s->reorder_pb[5];
  1026. mx= motion_ptr[0];
  1027. my= motion_ptr[1];
  1028. assert(mx>=-32 && mx<=31);
  1029. assert(my>=-32 && my<=31);
  1030. assert(pred_x>=-32 && pred_x<=31);
  1031. assert(pred_y>=-32 && pred_y<=31);
  1032. ff_h263_encode_motion(&s->m, mx - pred_x, 1);
  1033. ff_h263_encode_motion(&s->m, my - pred_y, 1);
  1034. s->reorder_pb[5]= s->m.pb;
  1035. score[1] += lambda*put_bits_count(&s->reorder_pb[5]);
  1036. dxy= (mx&1) + 2*(my&1);
  1037. s->dsp.put_pixels_tab[0][dxy](temp+16, ref + (mx>>1) + stride*(my>>1), stride, 16);
  1038. score[1]+= encode_block(s, src+16*x, temp+16, decoded, stride, 5, 64, lambda, 0);
  1039. best= score[1] <= score[0];
  1040. vlc= svq1_block_type_vlc[SVQ1_BLOCK_SKIP];
  1041. score[2]= s->dsp.sse[0](NULL, src+16*x, ref, stride, 16);
  1042. score[2]+= vlc[1]*lambda;
  1043. if(score[2] < score[best] && mx==0 && my==0){
  1044. best=2;
  1045. s->dsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
  1046. for(i=0; i<6; i++){
  1047. count[2][i]=0;
  1048. }
  1049. put_bits(&s->pb, vlc[1], vlc[0]);
  1050. }
  1051. }
  1052. if(best==1){
  1053. for(i=0; i<6; i++){
  1054. count[1][i]= put_bits_count(&s->reorder_pb[i]);
  1055. flush_put_bits(&s->reorder_pb[i]);
  1056. }
  1057. }else{
  1058. motion_ptr[0 ] = motion_ptr[1 ]=
  1059. motion_ptr[2 ] = motion_ptr[3 ]=
  1060. motion_ptr[0+2*s->m.b8_stride] = motion_ptr[1+2*s->m.b8_stride]=
  1061. motion_ptr[2+2*s->m.b8_stride] = motion_ptr[3+2*s->m.b8_stride]=0;
  1062. }
  1063. }
  1064. s->rd_total += score[best];
  1065. for(i=5; i>=0; i--){
  1066. ff_copy_bits(&s->pb, reorder_buffer[best][i], count[best][i]);
  1067. }
  1068. if(best==0){
  1069. s->dsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
  1070. }
  1071. }
  1072. s->m.first_slice_line=0;
  1073. }
  1074. return 0;
  1075. }
  1076. static int svq1_encode_init(AVCodecContext *avctx)
  1077. {
  1078. SVQ1Context * const s = avctx->priv_data;
  1079. dsputil_init(&s->dsp, avctx);
  1080. avctx->coded_frame= (AVFrame*)&s->picture;
  1081. s->frame_width = avctx->width;
  1082. s->frame_height = avctx->height;
  1083. s->y_block_width = (s->frame_width + 15) / 16;
  1084. s->y_block_height = (s->frame_height + 15) / 16;
  1085. s->c_block_width = (s->frame_width / 4 + 15) / 16;
  1086. s->c_block_height = (s->frame_height / 4 + 15) / 16;
  1087. s->avctx= avctx;
  1088. s->m.avctx= avctx;
  1089. s->m.me.scratchpad= av_mallocz((avctx->width+64)*2*16*2*sizeof(uint8_t));
  1090. s->m.me.map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
  1091. s->m.me.score_map = av_mallocz(ME_MAP_SIZE*sizeof(uint32_t));
  1092. s->mb_type = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int16_t));
  1093. s->dummy = av_mallocz((s->y_block_width+1)*s->y_block_height*sizeof(int32_t));
  1094. h263_encode_init(&s->m); //mv_penalty
  1095. return 0;
  1096. }
  1097. static int svq1_encode_frame(AVCodecContext *avctx, unsigned char *buf,
  1098. int buf_size, void *data)
  1099. {
  1100. SVQ1Context * const s = avctx->priv_data;
  1101. AVFrame *pict = data;
  1102. AVFrame * const p= (AVFrame*)&s->picture;
  1103. AVFrame temp;
  1104. int i;
  1105. if(avctx->pix_fmt != PIX_FMT_YUV410P){
  1106. av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
  1107. return -1;
  1108. }
  1109. if(!s->current_picture.data[0]){
  1110. avctx->get_buffer(avctx, &s->current_picture);
  1111. avctx->get_buffer(avctx, &s->last_picture);
  1112. }
  1113. temp= s->current_picture;
  1114. s->current_picture= s->last_picture;
  1115. s->last_picture= temp;
  1116. init_put_bits(&s->pb, buf, buf_size);
  1117. *p = *pict;
  1118. p->pict_type = avctx->frame_number % avctx->gop_size ? P_TYPE : I_TYPE;
  1119. p->key_frame = p->pict_type == I_TYPE;
  1120. svq1_write_header(s, p->pict_type);
  1121. for(i=0; i<3; i++){
  1122. if(svq1_encode_plane(s, i,
  1123. s->picture.data[i], s->last_picture.data[i], s->current_picture.data[i],
  1124. s->frame_width / (i?4:1), s->frame_height / (i?4:1),
  1125. s->picture.linesize[i], s->current_picture.linesize[i]) < 0)
  1126. return -1;
  1127. }
  1128. // align_put_bits(&s->pb);
  1129. while(put_bits_count(&s->pb) & 31)
  1130. put_bits(&s->pb, 1, 0);
  1131. flush_put_bits(&s->pb);
  1132. return (put_bits_count(&s->pb) / 8);
  1133. }
  1134. static int svq1_encode_end(AVCodecContext *avctx)
  1135. {
  1136. SVQ1Context * const s = avctx->priv_data;
  1137. int i;
  1138. av_log(avctx, AV_LOG_DEBUG, "RD: %f\n", s->rd_total/(double)(avctx->width*avctx->height*avctx->frame_number));
  1139. av_freep(&s->m.me.scratchpad);
  1140. av_freep(&s->m.me.map);
  1141. av_freep(&s->m.me.score_map);
  1142. av_freep(&s->mb_type);
  1143. av_freep(&s->dummy);
  1144. for(i=0; i<3; i++){
  1145. av_freep(&s->motion_val8[i]);
  1146. av_freep(&s->motion_val16[i]);
  1147. }
  1148. return 0;
  1149. }
  1150. #endif //CONFIG_ENCODERS
  1151. AVCodec svq1_decoder = {
  1152. "svq1",
  1153. CODEC_TYPE_VIDEO,
  1154. CODEC_ID_SVQ1,
  1155. sizeof(MpegEncContext),
  1156. svq1_decode_init,
  1157. NULL,
  1158. svq1_decode_end,
  1159. svq1_decode_frame,
  1160. CODEC_CAP_DR1,
  1161. .flush= ff_mpeg_flush,
  1162. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV410P, -1},
  1163. };
  1164. #ifdef CONFIG_ENCODERS
  1165. AVCodec svq1_encoder = {
  1166. "svq1",
  1167. CODEC_TYPE_VIDEO,
  1168. CODEC_ID_SVQ1,
  1169. sizeof(SVQ1Context),
  1170. svq1_encode_init,
  1171. svq1_encode_frame,
  1172. svq1_encode_end,
  1173. .pix_fmts= (enum PixelFormat[]){PIX_FMT_YUV410P, -1},
  1174. };
  1175. #endif //CONFIG_ENCODERS