You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

704 lines
26KB

  1. /*
  2. * SVQ1 Encoder
  3. * Copyright (C) 2004 Mike Melanson <melanson@pcisys.net>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Sorenson Vector Quantizer #1 (SVQ1) video codec.
  24. * For more information of the SVQ1 algorithm, visit:
  25. * http://www.pcisys.net/~melanson/codecs/
  26. */
  27. #include "avcodec.h"
  28. #include "hpeldsp.h"
  29. #include "me_cmp.h"
  30. #include "mpegvideo.h"
  31. #include "h263.h"
  32. #include "internal.h"
  33. #include "mpegutils.h"
  34. #include "svq1.h"
  35. #include "svq1enc.h"
  36. #include "svq1enc_cb.h"
  37. #include "libavutil/avassert.h"
  38. static void svq1_write_header(SVQ1EncContext *s, int frame_type)
  39. {
  40. int i;
  41. /* frame code */
  42. put_bits(&s->pb, 22, 0x20);
  43. /* temporal reference (sure hope this is a "don't care") */
  44. put_bits(&s->pb, 8, 0x00);
  45. /* frame type */
  46. put_bits(&s->pb, 2, frame_type - 1);
  47. if (frame_type == AV_PICTURE_TYPE_I) {
  48. /* no checksum since frame code is 0x20 */
  49. /* no embedded string either */
  50. /* output 5 unknown bits (2 + 2 + 1) */
  51. put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
  52. i = ff_match_2uint16((void*)ff_svq1_frame_size_table,
  53. FF_ARRAY_ELEMS(ff_svq1_frame_size_table),
  54. s->frame_width, s->frame_height);
  55. put_bits(&s->pb, 3, i);
  56. if (i == 7) {
  57. put_bits(&s->pb, 12, s->frame_width);
  58. put_bits(&s->pb, 12, s->frame_height);
  59. }
  60. }
  61. /* no checksum or extra data (next 2 bits get 0) */
  62. put_bits(&s->pb, 2, 0);
  63. }
  64. #define QUALITY_THRESHOLD 100
  65. #define THRESHOLD_MULTIPLIER 0.6
  66. static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
  67. intptr_t size)
  68. {
  69. int score = 0, i;
  70. for (i = 0; i < size; i++)
  71. score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
  72. return score;
  73. }
  74. static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref,
  75. uint8_t *decoded, int stride, int level,
  76. int threshold, int lambda, int intra)
  77. {
  78. int count, y, x, i, j, split, best_mean, best_score, best_count;
  79. int best_vector[6];
  80. int block_sum[7] = { 0, 0, 0, 0, 0, 0 };
  81. int w = 2 << (level + 2 >> 1);
  82. int h = 2 << (level + 1 >> 1);
  83. int size = w * h;
  84. int16_t (*block)[256] = s->encoded_block_levels[level];
  85. const int8_t *codebook_sum, *codebook;
  86. const uint16_t(*mean_vlc)[2];
  87. const uint8_t(*multistage_vlc)[2];
  88. best_score = 0;
  89. // FIXME: Optimize, this does not need to be done multiple times.
  90. if (intra) {
  91. codebook_sum = svq1_intra_codebook_sum[level];
  92. codebook = ff_svq1_intra_codebooks[level];
  93. mean_vlc = ff_svq1_intra_mean_vlc;
  94. multistage_vlc = ff_svq1_intra_multistage_vlc[level];
  95. for (y = 0; y < h; y++) {
  96. for (x = 0; x < w; x++) {
  97. int v = src[x + y * stride];
  98. block[0][x + w * y] = v;
  99. best_score += v * v;
  100. block_sum[0] += v;
  101. }
  102. }
  103. } else {
  104. codebook_sum = svq1_inter_codebook_sum[level];
  105. codebook = ff_svq1_inter_codebooks[level];
  106. mean_vlc = ff_svq1_inter_mean_vlc + 256;
  107. multistage_vlc = ff_svq1_inter_multistage_vlc[level];
  108. for (y = 0; y < h; y++) {
  109. for (x = 0; x < w; x++) {
  110. int v = src[x + y * stride] - ref[x + y * stride];
  111. block[0][x + w * y] = v;
  112. best_score += v * v;
  113. block_sum[0] += v;
  114. }
  115. }
  116. }
  117. best_count = 0;
  118. best_score -= (int)((unsigned)block_sum[0] * block_sum[0] >> (level + 3));
  119. best_mean = block_sum[0] + (size >> 1) >> (level + 3);
  120. if (level < 4) {
  121. for (count = 1; count < 7; count++) {
  122. int best_vector_score = INT_MAX;
  123. int best_vector_sum = -999, best_vector_mean = -999;
  124. const int stage = count - 1;
  125. const int8_t *vector;
  126. for (i = 0; i < 16; i++) {
  127. int sum = codebook_sum[stage * 16 + i];
  128. int sqr, diff, score;
  129. vector = codebook + stage * size * 16 + i * size;
  130. sqr = s->ssd_int8_vs_int16(vector, block[stage], size);
  131. diff = block_sum[stage] - sum;
  132. score = sqr - (diff * (int64_t)diff >> (level + 3)); // FIXME: 64bit slooow
  133. if (score < best_vector_score) {
  134. int mean = diff + (size >> 1) >> (level + 3);
  135. av_assert2(mean > -300 && mean < 300);
  136. mean = av_clip(mean, intra ? 0 : -256, 255);
  137. best_vector_score = score;
  138. best_vector[stage] = i;
  139. best_vector_sum = sum;
  140. best_vector_mean = mean;
  141. }
  142. }
  143. av_assert0(best_vector_mean != -999);
  144. vector = codebook + stage * size * 16 + best_vector[stage] * size;
  145. for (j = 0; j < size; j++)
  146. block[stage + 1][j] = block[stage][j] - vector[j];
  147. block_sum[stage + 1] = block_sum[stage] - best_vector_sum;
  148. best_vector_score += lambda *
  149. (+1 + 4 * count +
  150. multistage_vlc[1 + count][1]
  151. + mean_vlc[best_vector_mean][1]);
  152. if (best_vector_score < best_score) {
  153. best_score = best_vector_score;
  154. best_count = count;
  155. best_mean = best_vector_mean;
  156. }
  157. }
  158. }
  159. split = 0;
  160. if (best_score > threshold && level) {
  161. int score = 0;
  162. int offset = level & 1 ? stride * h / 2 : w / 2;
  163. PutBitContext backup[6];
  164. for (i = level - 1; i >= 0; i--)
  165. backup[i] = s->reorder_pb[i];
  166. score += encode_block(s, src, ref, decoded, stride, level - 1,
  167. threshold >> 1, lambda, intra);
  168. score += encode_block(s, src + offset, ref + offset, decoded + offset,
  169. stride, level - 1, threshold >> 1, lambda, intra);
  170. score += lambda;
  171. if (score < best_score) {
  172. best_score = score;
  173. split = 1;
  174. } else {
  175. for (i = level - 1; i >= 0; i--)
  176. s->reorder_pb[i] = backup[i];
  177. }
  178. }
  179. if (level > 0)
  180. put_bits(&s->reorder_pb[level], 1, split);
  181. if (!split) {
  182. av_assert1(best_mean >= 0 && best_mean < 256 || !intra);
  183. av_assert1(best_mean >= -256 && best_mean < 256);
  184. av_assert1(best_count >= 0 && best_count < 7);
  185. av_assert1(level < 4 || best_count == 0);
  186. /* output the encoding */
  187. put_bits(&s->reorder_pb[level],
  188. multistage_vlc[1 + best_count][1],
  189. multistage_vlc[1 + best_count][0]);
  190. put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
  191. mean_vlc[best_mean][0]);
  192. for (i = 0; i < best_count; i++) {
  193. av_assert2(best_vector[i] >= 0 && best_vector[i] < 16);
  194. put_bits(&s->reorder_pb[level], 4, best_vector[i]);
  195. }
  196. for (y = 0; y < h; y++)
  197. for (x = 0; x < w; x++)
  198. decoded[x + y * stride] = src[x + y * stride] -
  199. block[best_count][x + w * y] +
  200. best_mean;
  201. }
  202. return best_score;
  203. }
  204. static void init_block_index(MpegEncContext *s){
  205. s->block_index[0]= s->b8_stride*(s->mb_y*2 ) + s->mb_x*2;
  206. s->block_index[1]= s->b8_stride*(s->mb_y*2 ) + 1 + s->mb_x*2;
  207. s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) + s->mb_x*2;
  208. s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) + 1 + s->mb_x*2;
  209. s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x;
  210. s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x;
  211. }
  212. static int svq1_encode_plane(SVQ1EncContext *s, int plane,
  213. unsigned char *src_plane,
  214. unsigned char *ref_plane,
  215. unsigned char *decoded_plane,
  216. int width, int height, int src_stride, int stride)
  217. {
  218. int x, y;
  219. int i;
  220. int block_width, block_height;
  221. int level;
  222. int threshold[6];
  223. uint8_t *src = s->scratchbuf + stride * 32;
  224. const int lambda = (s->quality * s->quality) >>
  225. (2 * FF_LAMBDA_SHIFT);
  226. /* figure out the acceptable level thresholds in advance */
  227. threshold[5] = QUALITY_THRESHOLD;
  228. for (level = 4; level >= 0; level--)
  229. threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
  230. block_width = (width + 15) / 16;
  231. block_height = (height + 15) / 16;
  232. if (s->pict_type == AV_PICTURE_TYPE_P) {
  233. s->m.avctx = s->avctx;
  234. s->m.current_picture_ptr = &s->m.current_picture;
  235. s->m.last_picture_ptr = &s->m.last_picture;
  236. s->m.last_picture.f->data[0] = ref_plane;
  237. s->m.linesize =
  238. s->m.last_picture.f->linesize[0] =
  239. s->m.new_picture.f->linesize[0] =
  240. s->m.current_picture.f->linesize[0] = stride;
  241. s->m.width = width;
  242. s->m.height = height;
  243. s->m.mb_width = block_width;
  244. s->m.mb_height = block_height;
  245. s->m.mb_stride = s->m.mb_width + 1;
  246. s->m.b8_stride = 2 * s->m.mb_width + 1;
  247. s->m.f_code = 1;
  248. s->m.pict_type = s->pict_type;
  249. #if FF_API_MOTION_EST
  250. FF_DISABLE_DEPRECATION_WARNINGS
  251. s->m.me_method = s->avctx->me_method;
  252. if (s->motion_est == FF_ME_EPZS) {
  253. if (s->avctx->me_method == ME_ZERO)
  254. s->motion_est = FF_ME_ZERO;
  255. else if (s->avctx->me_method == ME_EPZS)
  256. s->motion_est = FF_ME_EPZS;
  257. else if (s->avctx->me_method == ME_X1)
  258. s->motion_est = FF_ME_XONE;
  259. }
  260. FF_ENABLE_DEPRECATION_WARNINGS
  261. #endif
  262. s->m.motion_est = s->motion_est;
  263. s->m.me.scene_change_score = 0;
  264. // s->m.out_format = FMT_H263;
  265. // s->m.unrestricted_mv = 1;
  266. s->m.lambda = s->quality;
  267. s->m.qscale = s->m.lambda * 139 +
  268. FF_LAMBDA_SCALE * 64 >>
  269. FF_LAMBDA_SHIFT + 7;
  270. s->m.lambda2 = s->m.lambda * s->m.lambda +
  271. FF_LAMBDA_SCALE / 2 >>
  272. FF_LAMBDA_SHIFT;
  273. if (!s->motion_val8[plane]) {
  274. s->motion_val8[plane] = av_mallocz((s->m.b8_stride *
  275. block_height * 2 + 2) *
  276. 2 * sizeof(int16_t));
  277. s->motion_val16[plane] = av_mallocz((s->m.mb_stride *
  278. (block_height + 2) + 1) *
  279. 2 * sizeof(int16_t));
  280. if (!s->motion_val8[plane] || !s->motion_val16[plane])
  281. return AVERROR(ENOMEM);
  282. }
  283. s->m.mb_type = s->mb_type;
  284. // dummies, to avoid segfaults
  285. s->m.current_picture.mb_mean = (uint8_t *)s->dummy;
  286. s->m.current_picture.mb_var = (uint16_t *)s->dummy;
  287. s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy;
  288. s->m.current_picture.mb_type = s->dummy;
  289. s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2;
  290. s->m.p_mv_table = s->motion_val16[plane] +
  291. s->m.mb_stride + 1;
  292. s->m.mecc = s->mecc; // move
  293. ff_init_me(&s->m);
  294. s->m.me.dia_size = s->avctx->dia_size;
  295. s->m.first_slice_line = 1;
  296. for (y = 0; y < block_height; y++) {
  297. s->m.new_picture.f->data[0] = src - y * 16 * stride; // ugly
  298. s->m.mb_y = y;
  299. for (i = 0; i < 16 && i + 16 * y < height; i++) {
  300. memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
  301. width);
  302. for (x = width; x < 16 * block_width; x++)
  303. src[i * stride + x] = src[i * stride + x - 1];
  304. }
  305. for (; i < 16 && i + 16 * y < 16 * block_height; i++)
  306. memcpy(&src[i * stride], &src[(i - 1) * stride],
  307. 16 * block_width);
  308. for (x = 0; x < block_width; x++) {
  309. s->m.mb_x = x;
  310. init_block_index(&s->m);
  311. ff_estimate_p_frame_motion(&s->m, x, y);
  312. }
  313. s->m.first_slice_line = 0;
  314. }
  315. ff_fix_long_p_mvs(&s->m);
  316. ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code,
  317. CANDIDATE_MB_TYPE_INTER, 0);
  318. }
  319. s->m.first_slice_line = 1;
  320. for (y = 0; y < block_height; y++) {
  321. for (i = 0; i < 16 && i + 16 * y < height; i++) {
  322. memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
  323. width);
  324. for (x = width; x < 16 * block_width; x++)
  325. src[i * stride + x] = src[i * stride + x - 1];
  326. }
  327. for (; i < 16 && i + 16 * y < 16 * block_height; i++)
  328. memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width);
  329. s->m.mb_y = y;
  330. for (x = 0; x < block_width; x++) {
  331. uint8_t reorder_buffer[2][6][7 * 32];
  332. int count[2][6];
  333. int offset = y * 16 * stride + x * 16;
  334. uint8_t *decoded = decoded_plane + offset;
  335. uint8_t *ref = ref_plane + offset;
  336. int score[4] = { 0, 0, 0, 0 }, best;
  337. uint8_t *temp = s->scratchbuf;
  338. if (s->pb.buf_end - s->pb.buf -
  339. (put_bits_count(&s->pb) >> 3) < 3000) { // FIXME: check size
  340. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  341. return -1;
  342. }
  343. s->m.mb_x = x;
  344. init_block_index(&s->m);
  345. if (s->pict_type == AV_PICTURE_TYPE_I ||
  346. (s->m.mb_type[x + y * s->m.mb_stride] &
  347. CANDIDATE_MB_TYPE_INTRA)) {
  348. for (i = 0; i < 6; i++)
  349. init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i],
  350. 7 * 32);
  351. if (s->pict_type == AV_PICTURE_TYPE_P) {
  352. const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
  353. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  354. score[0] = vlc[1] * lambda;
  355. }
  356. score[0] += encode_block(s, src + 16 * x, NULL, temp, stride,
  357. 5, 64, lambda, 1);
  358. for (i = 0; i < 6; i++) {
  359. count[0][i] = put_bits_count(&s->reorder_pb[i]);
  360. flush_put_bits(&s->reorder_pb[i]);
  361. }
  362. } else
  363. score[0] = INT_MAX;
  364. best = 0;
  365. if (s->pict_type == AV_PICTURE_TYPE_P) {
  366. const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER];
  367. int mx, my, pred_x, pred_y, dxy;
  368. int16_t *motion_ptr;
  369. motion_ptr = ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
  370. if (s->m.mb_type[x + y * s->m.mb_stride] &
  371. CANDIDATE_MB_TYPE_INTER) {
  372. for (i = 0; i < 6; i++)
  373. init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i],
  374. 7 * 32);
  375. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  376. s->m.pb = s->reorder_pb[5];
  377. mx = motion_ptr[0];
  378. my = motion_ptr[1];
  379. av_assert1(mx >= -32 && mx <= 31);
  380. av_assert1(my >= -32 && my <= 31);
  381. av_assert1(pred_x >= -32 && pred_x <= 31);
  382. av_assert1(pred_y >= -32 && pred_y <= 31);
  383. ff_h263_encode_motion(&s->m.pb, mx - pred_x, 1);
  384. ff_h263_encode_motion(&s->m.pb, my - pred_y, 1);
  385. s->reorder_pb[5] = s->m.pb;
  386. score[1] += lambda * put_bits_count(&s->reorder_pb[5]);
  387. dxy = (mx & 1) + 2 * (my & 1);
  388. s->hdsp.put_pixels_tab[0][dxy](temp + 16*stride,
  389. ref + (mx >> 1) +
  390. stride * (my >> 1),
  391. stride, 16);
  392. score[1] += encode_block(s, src + 16 * x, temp + 16*stride,
  393. decoded, stride, 5, 64, lambda, 0);
  394. best = score[1] <= score[0];
  395. vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_SKIP];
  396. score[2] = s->mecc.sse[0](NULL, src + 16 * x, ref,
  397. stride, 16);
  398. score[2] += vlc[1] * lambda;
  399. if (score[2] < score[best] && mx == 0 && my == 0) {
  400. best = 2;
  401. s->hdsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
  402. put_bits(&s->pb, vlc[1], vlc[0]);
  403. }
  404. }
  405. if (best == 1) {
  406. for (i = 0; i < 6; i++) {
  407. count[1][i] = put_bits_count(&s->reorder_pb[i]);
  408. flush_put_bits(&s->reorder_pb[i]);
  409. }
  410. } else {
  411. motion_ptr[0] =
  412. motion_ptr[1] =
  413. motion_ptr[2] =
  414. motion_ptr[3] =
  415. motion_ptr[0 + 2 * s->m.b8_stride] =
  416. motion_ptr[1 + 2 * s->m.b8_stride] =
  417. motion_ptr[2 + 2 * s->m.b8_stride] =
  418. motion_ptr[3 + 2 * s->m.b8_stride] = 0;
  419. }
  420. }
  421. s->rd_total += score[best];
  422. if (best != 2)
  423. for (i = 5; i >= 0; i--)
  424. avpriv_copy_bits(&s->pb, reorder_buffer[best][i],
  425. count[best][i]);
  426. if (best == 0)
  427. s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
  428. }
  429. s->m.first_slice_line = 0;
  430. }
  431. return 0;
  432. }
  433. static av_cold int svq1_encode_end(AVCodecContext *avctx)
  434. {
  435. SVQ1EncContext *const s = avctx->priv_data;
  436. int i;
  437. av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
  438. s->rd_total / (double)(avctx->width * avctx->height *
  439. avctx->frame_number));
  440. s->m.mb_type = NULL;
  441. ff_mpv_common_end(&s->m);
  442. av_freep(&s->m.me.scratchpad);
  443. av_freep(&s->m.me.map);
  444. av_freep(&s->m.me.score_map);
  445. av_freep(&s->mb_type);
  446. av_freep(&s->dummy);
  447. av_freep(&s->scratchbuf);
  448. for (i = 0; i < 3; i++) {
  449. av_freep(&s->motion_val8[i]);
  450. av_freep(&s->motion_val16[i]);
  451. }
  452. av_frame_free(&s->current_picture);
  453. av_frame_free(&s->last_picture);
  454. return 0;
  455. }
  456. static av_cold int svq1_encode_init(AVCodecContext *avctx)
  457. {
  458. SVQ1EncContext *const s = avctx->priv_data;
  459. int ret;
  460. if (avctx->width >= 4096 || avctx->height >= 4096) {
  461. av_log(avctx, AV_LOG_ERROR, "Dimensions too large, maximum is 4095x4095\n");
  462. return AVERROR(EINVAL);
  463. }
  464. ff_hpeldsp_init(&s->hdsp, avctx->flags);
  465. ff_me_cmp_init(&s->mecc, avctx);
  466. ff_mpegvideoencdsp_init(&s->m.mpvencdsp, avctx);
  467. s->current_picture = av_frame_alloc();
  468. s->last_picture = av_frame_alloc();
  469. if (!s->current_picture || !s->last_picture) {
  470. svq1_encode_end(avctx);
  471. return AVERROR(ENOMEM);
  472. }
  473. s->frame_width = avctx->width;
  474. s->frame_height = avctx->height;
  475. s->y_block_width = (s->frame_width + 15) / 16;
  476. s->y_block_height = (s->frame_height + 15) / 16;
  477. s->c_block_width = (s->frame_width / 4 + 15) / 16;
  478. s->c_block_height = (s->frame_height / 4 + 15) / 16;
  479. s->avctx = avctx;
  480. s->m.avctx = avctx;
  481. if ((ret = ff_mpv_common_init(&s->m)) < 0) {
  482. svq1_encode_end(avctx);
  483. return ret;
  484. }
  485. s->m.picture_structure = PICT_FRAME;
  486. s->m.me.temp =
  487. s->m.me.scratchpad = av_mallocz((avctx->width + 64) *
  488. 2 * 16 * 2 * sizeof(uint8_t));
  489. s->m.me.map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
  490. s->m.me.score_map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
  491. s->mb_type = av_mallocz((s->y_block_width + 1) *
  492. s->y_block_height * sizeof(int16_t));
  493. s->dummy = av_mallocz((s->y_block_width + 1) *
  494. s->y_block_height * sizeof(int32_t));
  495. s->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
  496. if (!s->m.me.temp || !s->m.me.scratchpad || !s->m.me.map ||
  497. !s->m.me.score_map || !s->mb_type || !s->dummy) {
  498. svq1_encode_end(avctx);
  499. return AVERROR(ENOMEM);
  500. }
  501. if (ARCH_PPC)
  502. ff_svq1enc_init_ppc(s);
  503. if (ARCH_X86)
  504. ff_svq1enc_init_x86(s);
  505. ff_h263_encode_init(&s->m); // mv_penalty
  506. return 0;
  507. }
  508. static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  509. const AVFrame *pict, int *got_packet)
  510. {
  511. SVQ1EncContext *const s = avctx->priv_data;
  512. int i, ret;
  513. if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width * s->y_block_height *
  514. MAX_MB_BYTES*3 + AV_INPUT_BUFFER_MIN_SIZE, 0)) < 0)
  515. return ret;
  516. if (avctx->pix_fmt != AV_PIX_FMT_YUV410P) {
  517. av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
  518. return -1;
  519. }
  520. if (!s->current_picture->data[0]) {
  521. if ((ret = ff_get_buffer(avctx, s->current_picture, 0)) < 0) {
  522. return ret;
  523. }
  524. }
  525. if (!s->last_picture->data[0]) {
  526. ret = ff_get_buffer(avctx, s->last_picture, 0);
  527. if (ret < 0)
  528. return ret;
  529. }
  530. if (!s->scratchbuf) {
  531. s->scratchbuf = av_malloc_array(s->current_picture->linesize[0], 16 * 3);
  532. if (!s->scratchbuf)
  533. return AVERROR(ENOMEM);
  534. }
  535. FFSWAP(AVFrame*, s->current_picture, s->last_picture);
  536. init_put_bits(&s->pb, pkt->data, pkt->size);
  537. if (avctx->gop_size && (avctx->frame_number % avctx->gop_size))
  538. s->pict_type = AV_PICTURE_TYPE_P;
  539. else
  540. s->pict_type = AV_PICTURE_TYPE_I;
  541. s->quality = pict->quality;
  542. #if FF_API_CODED_FRAME
  543. FF_DISABLE_DEPRECATION_WARNINGS
  544. avctx->coded_frame->pict_type = s->pict_type;
  545. avctx->coded_frame->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  546. FF_ENABLE_DEPRECATION_WARNINGS
  547. #endif
  548. ff_side_data_set_encoder_stats(pkt, pict->quality, NULL, 0, s->pict_type);
  549. svq1_write_header(s, s->pict_type);
  550. for (i = 0; i < 3; i++)
  551. if (svq1_encode_plane(s, i,
  552. pict->data[i],
  553. s->last_picture->data[i],
  554. s->current_picture->data[i],
  555. s->frame_width / (i ? 4 : 1),
  556. s->frame_height / (i ? 4 : 1),
  557. pict->linesize[i],
  558. s->current_picture->linesize[i]) < 0) {
  559. int j;
  560. for (j = 0; j < i; j++) {
  561. av_freep(&s->motion_val8[j]);
  562. av_freep(&s->motion_val16[j]);
  563. }
  564. av_freep(&s->scratchbuf);
  565. return -1;
  566. }
  567. // avpriv_align_put_bits(&s->pb);
  568. while (put_bits_count(&s->pb) & 31)
  569. put_bits(&s->pb, 1, 0);
  570. flush_put_bits(&s->pb);
  571. pkt->size = put_bits_count(&s->pb) / 8;
  572. if (s->pict_type == AV_PICTURE_TYPE_I)
  573. pkt->flags |= AV_PKT_FLAG_KEY;
  574. *got_packet = 1;
  575. return 0;
  576. }
  577. #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
  578. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  579. static const AVOption options[] = {
  580. { "motion-est", "Motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, { .i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_XONE, VE, "motion-est"},
  581. { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
  582. { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
  583. { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
  584. { NULL },
  585. };
  586. static const AVClass svq1enc_class = {
  587. .class_name = "svq1enc",
  588. .item_name = av_default_item_name,
  589. .option = options,
  590. .version = LIBAVUTIL_VERSION_INT,
  591. };
  592. AVCodec ff_svq1_encoder = {
  593. .name = "svq1",
  594. .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
  595. .type = AVMEDIA_TYPE_VIDEO,
  596. .id = AV_CODEC_ID_SVQ1,
  597. .priv_data_size = sizeof(SVQ1EncContext),
  598. .priv_class = &svq1enc_class,
  599. .init = svq1_encode_init,
  600. .encode2 = svq1_encode_frame,
  601. .close = svq1_encode_end,
  602. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
  603. AV_PIX_FMT_NONE },
  604. };