You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

701 lines
26KB

  1. /*
  2. * SVQ1 Encoder
  3. * Copyright (C) 2004 Mike Melanson <melanson@pcisys.net>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Sorenson Vector Quantizer #1 (SVQ1) video codec.
  24. * For more information of the SVQ1 algorithm, visit:
  25. * http://www.pcisys.net/~melanson/codecs/
  26. */
  27. #include "avcodec.h"
  28. #include "hpeldsp.h"
  29. #include "me_cmp.h"
  30. #include "mpegvideo.h"
  31. #include "h263.h"
  32. #include "internal.h"
  33. #include "mpegutils.h"
  34. #include "svq1.h"
  35. #include "svq1enc.h"
  36. #include "svq1enc_cb.h"
  37. #undef NDEBUG
  38. #include <assert.h>
  39. static void svq1_write_header(SVQ1EncContext *s, int frame_type)
  40. {
  41. int i;
  42. /* frame code */
  43. put_bits(&s->pb, 22, 0x20);
  44. /* temporal reference (sure hope this is a "don't care") */
  45. put_bits(&s->pb, 8, 0x00);
  46. /* frame type */
  47. put_bits(&s->pb, 2, frame_type - 1);
  48. if (frame_type == AV_PICTURE_TYPE_I) {
  49. /* no checksum since frame code is 0x20 */
  50. /* no embedded string either */
  51. /* output 5 unknown bits (2 + 2 + 1) */
  52. put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
  53. i = ff_match_2uint16(ff_svq1_frame_size_table,
  54. FF_ARRAY_ELEMS(ff_svq1_frame_size_table),
  55. s->frame_width, s->frame_height);
  56. put_bits(&s->pb, 3, i);
  57. if (i == 7) {
  58. put_bits(&s->pb, 12, s->frame_width);
  59. put_bits(&s->pb, 12, s->frame_height);
  60. }
  61. }
  62. /* no checksum or extra data (next 2 bits get 0) */
  63. put_bits(&s->pb, 2, 0);
  64. }
  65. #define QUALITY_THRESHOLD 100
  66. #define THRESHOLD_MULTIPLIER 0.6
  67. static int ssd_int8_vs_int16_c(const int8_t *pix1, const int16_t *pix2,
  68. int size)
  69. {
  70. int score = 0, i;
  71. for (i = 0; i < size; i++)
  72. score += (pix1[i] - pix2[i]) * (pix1[i] - pix2[i]);
  73. return score;
  74. }
  75. static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref,
  76. uint8_t *decoded, int stride, int level,
  77. int threshold, int lambda, int intra)
  78. {
  79. int count, y, x, i, j, split, best_mean, best_score, best_count;
  80. int best_vector[6];
  81. int block_sum[7] = { 0, 0, 0, 0, 0, 0 };
  82. int w = 2 << (level + 2 >> 1);
  83. int h = 2 << (level + 1 >> 1);
  84. int size = w * h;
  85. int16_t block[7][256];
  86. const int8_t *codebook_sum, *codebook;
  87. const uint16_t(*mean_vlc)[2];
  88. const uint8_t(*multistage_vlc)[2];
  89. best_score = 0;
  90. // FIXME: Optimize, this does not need to be done multiple times.
  91. if (intra) {
  92. codebook_sum = svq1_intra_codebook_sum[level];
  93. codebook = ff_svq1_intra_codebooks[level];
  94. mean_vlc = ff_svq1_intra_mean_vlc;
  95. multistage_vlc = ff_svq1_intra_multistage_vlc[level];
  96. for (y = 0; y < h; y++) {
  97. for (x = 0; x < w; x++) {
  98. int v = src[x + y * stride];
  99. block[0][x + w * y] = v;
  100. best_score += v * v;
  101. block_sum[0] += v;
  102. }
  103. }
  104. } else {
  105. codebook_sum = svq1_inter_codebook_sum[level];
  106. codebook = ff_svq1_inter_codebooks[level];
  107. mean_vlc = ff_svq1_inter_mean_vlc + 256;
  108. multistage_vlc = ff_svq1_inter_multistage_vlc[level];
  109. for (y = 0; y < h; y++) {
  110. for (x = 0; x < w; x++) {
  111. int v = src[x + y * stride] - ref[x + y * stride];
  112. block[0][x + w * y] = v;
  113. best_score += v * v;
  114. block_sum[0] += v;
  115. }
  116. }
  117. }
  118. best_count = 0;
  119. best_score -= (int)((unsigned)block_sum[0] * block_sum[0] >> (level + 3));
  120. best_mean = block_sum[0] + (size >> 1) >> (level + 3);
  121. if (level < 4) {
  122. for (count = 1; count < 7; count++) {
  123. int best_vector_score = INT_MAX;
  124. int best_vector_sum = -999, best_vector_mean = -999;
  125. const int stage = count - 1;
  126. const int8_t *vector;
  127. for (i = 0; i < 16; i++) {
  128. int sum = codebook_sum[stage * 16 + i];
  129. int sqr, diff, score;
  130. vector = codebook + stage * size * 16 + i * size;
  131. sqr = s->ssd_int8_vs_int16(vector, block[stage], size);
  132. diff = block_sum[stage] - sum;
  133. score = sqr - (diff * (int64_t)diff >> (level + 3)); // FIXME: 64 bits slooow
  134. if (score < best_vector_score) {
  135. int mean = diff + (size >> 1) >> (level + 3);
  136. assert(mean > -300 && mean < 300);
  137. mean = av_clip(mean, intra ? 0 : -256, 255);
  138. best_vector_score = score;
  139. best_vector[stage] = i;
  140. best_vector_sum = sum;
  141. best_vector_mean = mean;
  142. }
  143. }
  144. assert(best_vector_mean != -999);
  145. vector = codebook + stage * size * 16 + best_vector[stage] * size;
  146. for (j = 0; j < size; j++)
  147. block[stage + 1][j] = block[stage][j] - vector[j];
  148. block_sum[stage + 1] = block_sum[stage] - best_vector_sum;
  149. best_vector_score += lambda *
  150. (+1 + 4 * count +
  151. multistage_vlc[1 + count][1]
  152. + mean_vlc[best_vector_mean][1]);
  153. if (best_vector_score < best_score) {
  154. best_score = best_vector_score;
  155. best_count = count;
  156. best_mean = best_vector_mean;
  157. }
  158. }
  159. }
  160. split = 0;
  161. if (best_score > threshold && level) {
  162. int score = 0;
  163. int offset = level & 1 ? stride * h / 2 : w / 2;
  164. PutBitContext backup[6];
  165. for (i = level - 1; i >= 0; i--)
  166. backup[i] = s->reorder_pb[i];
  167. score += encode_block(s, src, ref, decoded, stride, level - 1,
  168. threshold >> 1, lambda, intra);
  169. score += encode_block(s, src + offset, ref + offset, decoded + offset,
  170. stride, level - 1, threshold >> 1, lambda, intra);
  171. score += lambda;
  172. if (score < best_score) {
  173. best_score = score;
  174. split = 1;
  175. } else {
  176. for (i = level - 1; i >= 0; i--)
  177. s->reorder_pb[i] = backup[i];
  178. }
  179. }
  180. if (level > 0)
  181. put_bits(&s->reorder_pb[level], 1, split);
  182. if (!split) {
  183. assert(best_mean >= 0 && best_mean < 256 || !intra);
  184. assert(best_mean >= -256 && best_mean < 256);
  185. assert(best_count >= 0 && best_count < 7);
  186. assert(level < 4 || best_count == 0);
  187. /* output the encoding */
  188. put_bits(&s->reorder_pb[level],
  189. multistage_vlc[1 + best_count][1],
  190. multistage_vlc[1 + best_count][0]);
  191. put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
  192. mean_vlc[best_mean][0]);
  193. for (i = 0; i < best_count; i++) {
  194. assert(best_vector[i] >= 0 && best_vector[i] < 16);
  195. put_bits(&s->reorder_pb[level], 4, best_vector[i]);
  196. }
  197. for (y = 0; y < h; y++)
  198. for (x = 0; x < w; x++)
  199. decoded[x + y * stride] = src[x + y * stride] -
  200. block[best_count][x + w * y] +
  201. best_mean;
  202. }
  203. return best_score;
  204. }
  205. static int svq1_encode_plane(SVQ1EncContext *s, int plane,
  206. unsigned char *src_plane,
  207. unsigned char *ref_plane,
  208. unsigned char *decoded_plane,
  209. int width, int height, int src_stride, int stride)
  210. {
  211. int x, y;
  212. int i;
  213. int block_width, block_height;
  214. int level;
  215. int threshold[6];
  216. uint8_t *src = s->scratchbuf + stride * 16;
  217. const int lambda = (s->quality * s->quality) >>
  218. (2 * FF_LAMBDA_SHIFT);
  219. /* figure out the acceptable level thresholds in advance */
  220. threshold[5] = QUALITY_THRESHOLD;
  221. for (level = 4; level >= 0; level--)
  222. threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
  223. block_width = (width + 15) / 16;
  224. block_height = (height + 15) / 16;
  225. if (s->pict_type == AV_PICTURE_TYPE_P) {
  226. s->m.avctx = s->avctx;
  227. s->m.current_picture_ptr = &s->m.current_picture;
  228. s->m.last_picture_ptr = &s->m.last_picture;
  229. s->m.last_picture.f->data[0] = ref_plane;
  230. s->m.linesize =
  231. s->m.last_picture.f->linesize[0] =
  232. s->m.new_picture.f->linesize[0] =
  233. s->m.current_picture.f->linesize[0] = stride;
  234. s->m.width = width;
  235. s->m.height = height;
  236. s->m.mb_width = block_width;
  237. s->m.mb_height = block_height;
  238. s->m.mb_stride = s->m.mb_width + 1;
  239. s->m.b8_stride = 2 * s->m.mb_width + 1;
  240. s->m.f_code = 1;
  241. s->m.pict_type = s->pict_type;
  242. #if FF_API_MOTION_EST
  243. FF_DISABLE_DEPRECATION_WARNINGS
  244. s->m.me_method = s->avctx->me_method;
  245. if (s->motion_est == FF_ME_EPZS) {
  246. if (s->avctx->me_method == ME_ZERO)
  247. s->motion_est = FF_ME_ZERO;
  248. else if (s->avctx->me_method == ME_EPZS)
  249. s->motion_est = FF_ME_EPZS;
  250. else if (s->avctx->me_method == ME_X1)
  251. s->motion_est = FF_ME_XONE;
  252. }
  253. FF_ENABLE_DEPRECATION_WARNINGS
  254. #endif
  255. s->m.motion_est = s->motion_est;
  256. s->m.me.scene_change_score = 0;
  257. // s->m.out_format = FMT_H263;
  258. // s->m.unrestricted_mv = 1;
  259. s->m.lambda = s->quality;
  260. s->m.qscale = s->m.lambda * 139 +
  261. FF_LAMBDA_SCALE * 64 >>
  262. FF_LAMBDA_SHIFT + 7;
  263. s->m.lambda2 = s->m.lambda * s->m.lambda +
  264. FF_LAMBDA_SCALE / 2 >>
  265. FF_LAMBDA_SHIFT;
  266. if (!s->motion_val8[plane]) {
  267. s->motion_val8[plane] = av_mallocz((s->m.b8_stride *
  268. block_height * 2 + 2) *
  269. 2 * sizeof(int16_t));
  270. s->motion_val16[plane] = av_mallocz((s->m.mb_stride *
  271. (block_height + 2) + 1) *
  272. 2 * sizeof(int16_t));
  273. if (!s->motion_val8[plane] || !s->motion_val16[plane])
  274. return AVERROR(ENOMEM);
  275. }
  276. s->m.mb_type = s->mb_type;
  277. // dummies, to avoid segfaults
  278. s->m.current_picture.mb_mean = (uint8_t *)s->dummy;
  279. s->m.current_picture.mb_var = (uint16_t *)s->dummy;
  280. s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy;
  281. s->m.current_picture.mb_type = s->dummy;
  282. s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2;
  283. s->m.p_mv_table = s->motion_val16[plane] +
  284. s->m.mb_stride + 1;
  285. s->m.mecc = s->mecc; // move
  286. ff_init_me(&s->m);
  287. s->m.me.dia_size = s->avctx->dia_size;
  288. s->m.first_slice_line = 1;
  289. for (y = 0; y < block_height; y++) {
  290. s->m.new_picture.f->data[0] = src - y * 16 * stride; // ugly
  291. s->m.mb_y = y;
  292. for (i = 0; i < 16 && i + 16 * y < height; i++) {
  293. memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
  294. width);
  295. for (x = width; x < 16 * block_width; x++)
  296. src[i * stride + x] = src[i * stride + x - 1];
  297. }
  298. for (; i < 16 && i + 16 * y < 16 * block_height; i++)
  299. memcpy(&src[i * stride], &src[(i - 1) * stride],
  300. 16 * block_width);
  301. for (x = 0; x < block_width; x++) {
  302. s->m.mb_x = x;
  303. ff_init_block_index(&s->m);
  304. ff_update_block_index(&s->m);
  305. ff_estimate_p_frame_motion(&s->m, x, y);
  306. }
  307. s->m.first_slice_line = 0;
  308. }
  309. ff_fix_long_p_mvs(&s->m);
  310. ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code,
  311. CANDIDATE_MB_TYPE_INTER, 0);
  312. }
  313. s->m.first_slice_line = 1;
  314. for (y = 0; y < block_height; y++) {
  315. for (i = 0; i < 16 && i + 16 * y < height; i++) {
  316. memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
  317. width);
  318. for (x = width; x < 16 * block_width; x++)
  319. src[i * stride + x] = src[i * stride + x - 1];
  320. }
  321. for (; i < 16 && i + 16 * y < 16 * block_height; i++)
  322. memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width);
  323. s->m.mb_y = y;
  324. for (x = 0; x < block_width; x++) {
  325. uint8_t reorder_buffer[3][6][7 * 32];
  326. int count[3][6];
  327. int offset = y * 16 * stride + x * 16;
  328. uint8_t *decoded = decoded_plane + offset;
  329. uint8_t *ref = ref_plane + offset;
  330. int score[4] = { 0, 0, 0, 0 }, best;
  331. uint8_t *temp = s->scratchbuf;
  332. if (s->pb.buf_end - s->pb.buf -
  333. (put_bits_count(&s->pb) >> 3) < 3000) { // FIXME: check size
  334. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  335. return -1;
  336. }
  337. s->m.mb_x = x;
  338. ff_init_block_index(&s->m);
  339. ff_update_block_index(&s->m);
  340. if (s->pict_type == AV_PICTURE_TYPE_I ||
  341. (s->m.mb_type[x + y * s->m.mb_stride] &
  342. CANDIDATE_MB_TYPE_INTRA)) {
  343. for (i = 0; i < 6; i++)
  344. init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i],
  345. 7 * 32);
  346. if (s->pict_type == AV_PICTURE_TYPE_P) {
  347. const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
  348. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  349. score[0] = vlc[1] * lambda;
  350. }
  351. score[0] += encode_block(s, src + 16 * x, NULL, temp, stride,
  352. 5, 64, lambda, 1);
  353. for (i = 0; i < 6; i++) {
  354. count[0][i] = put_bits_count(&s->reorder_pb[i]);
  355. flush_put_bits(&s->reorder_pb[i]);
  356. }
  357. } else
  358. score[0] = INT_MAX;
  359. best = 0;
  360. if (s->pict_type == AV_PICTURE_TYPE_P) {
  361. const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER];
  362. int mx, my, pred_x, pred_y, dxy;
  363. int16_t *motion_ptr;
  364. motion_ptr = ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
  365. if (s->m.mb_type[x + y * s->m.mb_stride] &
  366. CANDIDATE_MB_TYPE_INTER) {
  367. for (i = 0; i < 6; i++)
  368. init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i],
  369. 7 * 32);
  370. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  371. s->m.pb = s->reorder_pb[5];
  372. mx = motion_ptr[0];
  373. my = motion_ptr[1];
  374. assert(mx >= -32 && mx <= 31);
  375. assert(my >= -32 && my <= 31);
  376. assert(pred_x >= -32 && pred_x <= 31);
  377. assert(pred_y >= -32 && pred_y <= 31);
  378. ff_h263_encode_motion(&s->m, mx - pred_x, 1);
  379. ff_h263_encode_motion(&s->m, my - pred_y, 1);
  380. s->reorder_pb[5] = s->m.pb;
  381. score[1] += lambda * put_bits_count(&s->reorder_pb[5]);
  382. dxy = (mx & 1) + 2 * (my & 1);
  383. s->hdsp.put_pixels_tab[0][dxy](temp + 16,
  384. ref + (mx >> 1) +
  385. stride * (my >> 1),
  386. stride, 16);
  387. score[1] += encode_block(s, src + 16 * x, temp + 16,
  388. decoded, stride, 5, 64, lambda, 0);
  389. best = score[1] <= score[0];
  390. vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_SKIP];
  391. score[2] = s->mecc.sse[0](NULL, src + 16 * x, ref,
  392. stride, 16);
  393. score[2] += vlc[1] * lambda;
  394. if (score[2] < score[best] && mx == 0 && my == 0) {
  395. best = 2;
  396. s->hdsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
  397. for (i = 0; i < 6; i++)
  398. count[2][i] = 0;
  399. put_bits(&s->pb, vlc[1], vlc[0]);
  400. }
  401. }
  402. if (best == 1) {
  403. for (i = 0; i < 6; i++) {
  404. count[1][i] = put_bits_count(&s->reorder_pb[i]);
  405. flush_put_bits(&s->reorder_pb[i]);
  406. }
  407. } else {
  408. motion_ptr[0] =
  409. motion_ptr[1] =
  410. motion_ptr[2] =
  411. motion_ptr[3] =
  412. motion_ptr[0 + 2 * s->m.b8_stride] =
  413. motion_ptr[1 + 2 * s->m.b8_stride] =
  414. motion_ptr[2 + 2 * s->m.b8_stride] =
  415. motion_ptr[3 + 2 * s->m.b8_stride] = 0;
  416. }
  417. }
  418. s->rd_total += score[best];
  419. for (i = 5; i >= 0; i--)
  420. avpriv_copy_bits(&s->pb, reorder_buffer[best][i],
  421. count[best][i]);
  422. if (best == 0)
  423. s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
  424. }
  425. s->m.first_slice_line = 0;
  426. }
  427. return 0;
  428. }
  429. static av_cold int svq1_encode_end(AVCodecContext *avctx)
  430. {
  431. SVQ1EncContext *const s = avctx->priv_data;
  432. int i;
  433. av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
  434. s->rd_total / (double)(avctx->width * avctx->height *
  435. avctx->frame_number));
  436. s->m.mb_type = NULL;
  437. ff_mpv_common_end(&s->m);
  438. av_freep(&s->m.me.scratchpad);
  439. av_freep(&s->m.me.map);
  440. av_freep(&s->m.me.score_map);
  441. av_freep(&s->mb_type);
  442. av_freep(&s->dummy);
  443. av_freep(&s->scratchbuf);
  444. for (i = 0; i < 3; i++) {
  445. av_freep(&s->motion_val8[i]);
  446. av_freep(&s->motion_val16[i]);
  447. }
  448. av_frame_free(&s->current_picture);
  449. av_frame_free(&s->last_picture);
  450. return 0;
  451. }
  452. static av_cold int svq1_encode_init(AVCodecContext *avctx)
  453. {
  454. SVQ1EncContext *const s = avctx->priv_data;
  455. int ret;
  456. ff_hpeldsp_init(&s->hdsp, avctx->flags);
  457. ff_me_cmp_init(&s->mecc, avctx);
  458. ff_mpegvideoencdsp_init(&s->m.mpvencdsp, avctx);
  459. s->current_picture = av_frame_alloc();
  460. s->last_picture = av_frame_alloc();
  461. if (!s->current_picture || !s->last_picture) {
  462. svq1_encode_end(avctx);
  463. return AVERROR(ENOMEM);
  464. }
  465. s->frame_width = avctx->width;
  466. s->frame_height = avctx->height;
  467. s->y_block_width = (s->frame_width + 15) / 16;
  468. s->y_block_height = (s->frame_height + 15) / 16;
  469. s->c_block_width = (s->frame_width / 4 + 15) / 16;
  470. s->c_block_height = (s->frame_height / 4 + 15) / 16;
  471. s->avctx = avctx;
  472. s->m.avctx = avctx;
  473. if ((ret = ff_mpv_common_init(&s->m)) < 0) {
  474. svq1_encode_end(avctx);
  475. return ret;
  476. }
  477. s->m.picture_structure = PICT_FRAME;
  478. s->m.me.temp =
  479. s->m.me.scratchpad = av_mallocz((avctx->width + 64) *
  480. 2 * 16 * 2 * sizeof(uint8_t));
  481. s->m.me.map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
  482. s->m.me.score_map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
  483. s->mb_type = av_mallocz((s->y_block_width + 1) *
  484. s->y_block_height * sizeof(int16_t));
  485. s->dummy = av_mallocz((s->y_block_width + 1) *
  486. s->y_block_height * sizeof(int32_t));
  487. s->ssd_int8_vs_int16 = ssd_int8_vs_int16_c;
  488. if (!s->m.me.temp || !s->m.me.scratchpad || !s->m.me.map ||
  489. !s->m.me.score_map || !s->mb_type || !s->dummy) {
  490. svq1_encode_end(avctx);
  491. return AVERROR(ENOMEM);
  492. }
  493. if (ARCH_PPC)
  494. ff_svq1enc_init_ppc(s);
  495. if (ARCH_X86)
  496. ff_svq1enc_init_x86(s);
  497. ff_h263_encode_init(&s->m); // mv_penalty
  498. return 0;
  499. }
  500. static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  501. const AVFrame *pict, int *got_packet)
  502. {
  503. SVQ1EncContext *const s = avctx->priv_data;
  504. int i, ret;
  505. uint8_t *sd;
  506. if (!pkt->data &&
  507. (ret = av_new_packet(pkt, s->y_block_width * s->y_block_height *
  508. MAX_MB_BYTES * 3 + AV_INPUT_BUFFER_MIN_SIZE)) < 0) {
  509. av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
  510. return ret;
  511. }
  512. if (avctx->pix_fmt != AV_PIX_FMT_YUV410P) {
  513. av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
  514. return -1;
  515. }
  516. if (!s->current_picture->data[0]) {
  517. ret = ff_get_buffer(avctx, s->current_picture, 0);
  518. if (ret < 0)
  519. return ret;
  520. }
  521. if (!s->last_picture->data[0]) {
  522. ret = ff_get_buffer(avctx, s->last_picture, 0);
  523. if (ret < 0)
  524. return ret;
  525. }
  526. if (!s->scratchbuf) {
  527. s->scratchbuf = av_malloc(s->current_picture->linesize[0] * 16 * 2);
  528. if (!s->scratchbuf)
  529. return AVERROR(ENOMEM);
  530. }
  531. FFSWAP(AVFrame*, s->current_picture, s->last_picture);
  532. init_put_bits(&s->pb, pkt->data, pkt->size);
  533. if (avctx->gop_size && (avctx->frame_number % avctx->gop_size))
  534. s->pict_type = AV_PICTURE_TYPE_P;
  535. else
  536. s->pict_type = AV_PICTURE_TYPE_I;
  537. s->quality = pict->quality;
  538. #if FF_API_CODED_FRAME
  539. FF_DISABLE_DEPRECATION_WARNINGS
  540. avctx->coded_frame->pict_type = s->pict_type;
  541. avctx->coded_frame->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
  542. FF_ENABLE_DEPRECATION_WARNINGS
  543. #endif
  544. sd = av_packet_new_side_data(pkt, AV_PKT_DATA_QUALITY_FACTOR, sizeof(int));
  545. if (!sd)
  546. return AVERROR(ENOMEM);
  547. *(int *)sd = pict->quality;
  548. svq1_write_header(s, s->pict_type);
  549. for (i = 0; i < 3; i++)
  550. if (svq1_encode_plane(s, i,
  551. pict->data[i],
  552. s->last_picture->data[i],
  553. s->current_picture->data[i],
  554. s->frame_width / (i ? 4 : 1),
  555. s->frame_height / (i ? 4 : 1),
  556. pict->linesize[i],
  557. s->current_picture->linesize[i]) < 0) {
  558. int j;
  559. for (j = 0; j < i; j++) {
  560. av_freep(&s->motion_val8[j]);
  561. av_freep(&s->motion_val16[j]);
  562. }
  563. av_freep(&s->scratchbuf);
  564. return -1;
  565. }
  566. // avpriv_align_put_bits(&s->pb);
  567. while (put_bits_count(&s->pb) & 31)
  568. put_bits(&s->pb, 1, 0);
  569. flush_put_bits(&s->pb);
  570. pkt->size = put_bits_count(&s->pb) / 8;
  571. if (s->pict_type == AV_PICTURE_TYPE_I)
  572. pkt->flags |= AV_PKT_FLAG_KEY;
  573. *got_packet = 1;
  574. return 0;
  575. }
  576. #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
  577. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  578. static const AVOption options[] = {
  579. { "motion-est", "Motion estimation algorithm", OFFSET(motion_est), AV_OPT_TYPE_INT, { .i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_XONE, VE, "motion-est"},
  580. { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
  581. { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
  582. { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, FF_MPV_OPT_FLAGS, "motion-est" },
  583. { NULL },
  584. };
  585. static const AVClass svq1enc_class = {
  586. .class_name = "svq1enc",
  587. .item_name = av_default_item_name,
  588. .option = options,
  589. .version = LIBAVUTIL_VERSION_INT,
  590. };
  591. AVCodec ff_svq1_encoder = {
  592. .name = "svq1",
  593. .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
  594. .type = AVMEDIA_TYPE_VIDEO,
  595. .id = AV_CODEC_ID_SVQ1,
  596. .priv_data_size = sizeof(SVQ1EncContext),
  597. .priv_class = &svq1enc_class,
  598. .init = svq1_encode_init,
  599. .encode2 = svq1_encode_frame,
  600. .close = svq1_encode_end,
  601. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
  602. AV_PIX_FMT_NONE },
  603. };