You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

638 lines
24KB

  1. /*
  2. * SVQ1 Encoder
  3. * Copyright (C) 2004 Mike Melanson <melanson@pcisys.net>
  4. *
  5. * This file is part of Libav.
  6. *
  7. * Libav is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * Libav is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with Libav; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. /**
  22. * @file
  23. * Sorenson Vector Quantizer #1 (SVQ1) video codec.
  24. * For more information of the SVQ1 algorithm, visit:
  25. * http://www.pcisys.net/~melanson/codecs/
  26. */
  27. #include "avcodec.h"
  28. #include "dsputil.h"
  29. #include "hpeldsp.h"
  30. #include "mpegvideo.h"
  31. #include "h263.h"
  32. #include "internal.h"
  33. #include "svq1.h"
  34. #include "svq1enc_cb.h"
  35. #undef NDEBUG
  36. #include <assert.h>
  37. typedef struct SVQ1Context {
  38. /* FIXME: Needed for motion estimation, should not be used for anything
  39. * else, the idea is to make the motion estimation eventually independent
  40. * of MpegEncContext, so this will be removed then. */
  41. MpegEncContext m;
  42. AVCodecContext *avctx;
  43. DSPContext dsp;
  44. HpelDSPContext hdsp;
  45. AVFrame picture;
  46. AVFrame current_picture;
  47. AVFrame last_picture;
  48. PutBitContext pb;
  49. GetBitContext gb;
  50. /* why ooh why this sick breadth first order,
  51. * everything is slower and more complex */
  52. PutBitContext reorder_pb[6];
  53. int frame_width;
  54. int frame_height;
  55. /* Y plane block dimensions */
  56. int y_block_width;
  57. int y_block_height;
  58. /* U & V plane (C planes) block dimensions */
  59. int c_block_width;
  60. int c_block_height;
  61. uint16_t *mb_type;
  62. uint32_t *dummy;
  63. int16_t (*motion_val8[3])[2];
  64. int16_t (*motion_val16[3])[2];
  65. int64_t rd_total;
  66. uint8_t *scratchbuf;
  67. } SVQ1Context;
  68. static void svq1_write_header(SVQ1Context *s, int frame_type)
  69. {
  70. int i;
  71. /* frame code */
  72. put_bits(&s->pb, 22, 0x20);
  73. /* temporal reference (sure hope this is a "don't care") */
  74. put_bits(&s->pb, 8, 0x00);
  75. /* frame type */
  76. put_bits(&s->pb, 2, frame_type - 1);
  77. if (frame_type == AV_PICTURE_TYPE_I) {
  78. /* no checksum since frame code is 0x20 */
  79. /* no embedded string either */
  80. /* output 5 unknown bits (2 + 2 + 1) */
  81. put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */
  82. i = ff_match_2uint16(ff_svq1_frame_size_table,
  83. FF_ARRAY_ELEMS(ff_svq1_frame_size_table),
  84. s->frame_width, s->frame_height);
  85. put_bits(&s->pb, 3, i);
  86. if (i == 7) {
  87. put_bits(&s->pb, 12, s->frame_width);
  88. put_bits(&s->pb, 12, s->frame_height);
  89. }
  90. }
  91. /* no checksum or extra data (next 2 bits get 0) */
  92. put_bits(&s->pb, 2, 0);
  93. }
  94. #define QUALITY_THRESHOLD 100
  95. #define THRESHOLD_MULTIPLIER 0.6
  96. static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref,
  97. uint8_t *decoded, int stride, int level,
  98. int threshold, int lambda, int intra)
  99. {
  100. int count, y, x, i, j, split, best_mean, best_score, best_count;
  101. int best_vector[6];
  102. int block_sum[7] = { 0, 0, 0, 0, 0, 0 };
  103. int w = 2 << (level + 2 >> 1);
  104. int h = 2 << (level + 1 >> 1);
  105. int size = w * h;
  106. int16_t block[7][256];
  107. const int8_t *codebook_sum, *codebook;
  108. const uint16_t(*mean_vlc)[2];
  109. const uint8_t(*multistage_vlc)[2];
  110. best_score = 0;
  111. // FIXME: Optimize, this does not need to be done multiple times.
  112. if (intra) {
  113. codebook_sum = svq1_intra_codebook_sum[level];
  114. codebook = ff_svq1_intra_codebooks[level];
  115. mean_vlc = ff_svq1_intra_mean_vlc;
  116. multistage_vlc = ff_svq1_intra_multistage_vlc[level];
  117. for (y = 0; y < h; y++) {
  118. for (x = 0; x < w; x++) {
  119. int v = src[x + y * stride];
  120. block[0][x + w * y] = v;
  121. best_score += v * v;
  122. block_sum[0] += v;
  123. }
  124. }
  125. } else {
  126. codebook_sum = svq1_inter_codebook_sum[level];
  127. codebook = ff_svq1_inter_codebooks[level];
  128. mean_vlc = ff_svq1_inter_mean_vlc + 256;
  129. multistage_vlc = ff_svq1_inter_multistage_vlc[level];
  130. for (y = 0; y < h; y++) {
  131. for (x = 0; x < w; x++) {
  132. int v = src[x + y * stride] - ref[x + y * stride];
  133. block[0][x + w * y] = v;
  134. best_score += v * v;
  135. block_sum[0] += v;
  136. }
  137. }
  138. }
  139. best_count = 0;
  140. best_score -= (int)((unsigned)block_sum[0] * block_sum[0] >> (level + 3));
  141. best_mean = block_sum[0] + (size >> 1) >> (level + 3);
  142. if (level < 4) {
  143. for (count = 1; count < 7; count++) {
  144. int best_vector_score = INT_MAX;
  145. int best_vector_sum = -999, best_vector_mean = -999;
  146. const int stage = count - 1;
  147. const int8_t *vector;
  148. for (i = 0; i < 16; i++) {
  149. int sum = codebook_sum[stage * 16 + i];
  150. int sqr, diff, score;
  151. vector = codebook + stage * size * 16 + i * size;
  152. sqr = s->dsp.ssd_int8_vs_int16(vector, block[stage], size);
  153. diff = block_sum[stage] - sum;
  154. score = sqr - (diff * (int64_t)diff >> (level + 3)); // FIXME: 64bit slooow
  155. if (score < best_vector_score) {
  156. int mean = diff + (size >> 1) >> (level + 3);
  157. assert(mean > -300 && mean < 300);
  158. mean = av_clip(mean, intra ? 0 : -256, 255);
  159. best_vector_score = score;
  160. best_vector[stage] = i;
  161. best_vector_sum = sum;
  162. best_vector_mean = mean;
  163. }
  164. }
  165. assert(best_vector_mean != -999);
  166. vector = codebook + stage * size * 16 + best_vector[stage] * size;
  167. for (j = 0; j < size; j++)
  168. block[stage + 1][j] = block[stage][j] - vector[j];
  169. block_sum[stage + 1] = block_sum[stage] - best_vector_sum;
  170. best_vector_score += lambda *
  171. (+1 + 4 * count +
  172. multistage_vlc[1 + count][1]
  173. + mean_vlc[best_vector_mean][1]);
  174. if (best_vector_score < best_score) {
  175. best_score = best_vector_score;
  176. best_count = count;
  177. best_mean = best_vector_mean;
  178. }
  179. }
  180. }
  181. split = 0;
  182. if (best_score > threshold && level) {
  183. int score = 0;
  184. int offset = level & 1 ? stride * h / 2 : w / 2;
  185. PutBitContext backup[6];
  186. for (i = level - 1; i >= 0; i--)
  187. backup[i] = s->reorder_pb[i];
  188. score += encode_block(s, src, ref, decoded, stride, level - 1,
  189. threshold >> 1, lambda, intra);
  190. score += encode_block(s, src + offset, ref + offset, decoded + offset,
  191. stride, level - 1, threshold >> 1, lambda, intra);
  192. score += lambda;
  193. if (score < best_score) {
  194. best_score = score;
  195. split = 1;
  196. } else {
  197. for (i = level - 1; i >= 0; i--)
  198. s->reorder_pb[i] = backup[i];
  199. }
  200. }
  201. if (level > 0)
  202. put_bits(&s->reorder_pb[level], 1, split);
  203. if (!split) {
  204. assert(best_mean >= 0 && best_mean < 256 || !intra);
  205. assert(best_mean >= -256 && best_mean < 256);
  206. assert(best_count >= 0 && best_count < 7);
  207. assert(level < 4 || best_count == 0);
  208. /* output the encoding */
  209. put_bits(&s->reorder_pb[level],
  210. multistage_vlc[1 + best_count][1],
  211. multistage_vlc[1 + best_count][0]);
  212. put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1],
  213. mean_vlc[best_mean][0]);
  214. for (i = 0; i < best_count; i++) {
  215. assert(best_vector[i] >= 0 && best_vector[i] < 16);
  216. put_bits(&s->reorder_pb[level], 4, best_vector[i]);
  217. }
  218. for (y = 0; y < h; y++)
  219. for (x = 0; x < w; x++)
  220. decoded[x + y * stride] = src[x + y * stride] -
  221. block[best_count][x + w * y] +
  222. best_mean;
  223. }
  224. return best_score;
  225. }
  226. static int svq1_encode_plane(SVQ1Context *s, int plane,
  227. unsigned char *src_plane,
  228. unsigned char *ref_plane,
  229. unsigned char *decoded_plane,
  230. int width, int height, int src_stride, int stride)
  231. {
  232. int x, y;
  233. int i;
  234. int block_width, block_height;
  235. int level;
  236. int threshold[6];
  237. uint8_t *src = s->scratchbuf + stride * 16;
  238. const int lambda = (s->picture.quality * s->picture.quality) >>
  239. (2 * FF_LAMBDA_SHIFT);
  240. /* figure out the acceptable level thresholds in advance */
  241. threshold[5] = QUALITY_THRESHOLD;
  242. for (level = 4; level >= 0; level--)
  243. threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER;
  244. block_width = (width + 15) / 16;
  245. block_height = (height + 15) / 16;
  246. if (s->picture.pict_type == AV_PICTURE_TYPE_P) {
  247. s->m.avctx = s->avctx;
  248. s->m.current_picture_ptr = &s->m.current_picture;
  249. s->m.last_picture_ptr = &s->m.last_picture;
  250. s->m.last_picture.f.data[0] = ref_plane;
  251. s->m.linesize =
  252. s->m.last_picture.f.linesize[0] =
  253. s->m.new_picture.f.linesize[0] =
  254. s->m.current_picture.f.linesize[0] = stride;
  255. s->m.width = width;
  256. s->m.height = height;
  257. s->m.mb_width = block_width;
  258. s->m.mb_height = block_height;
  259. s->m.mb_stride = s->m.mb_width + 1;
  260. s->m.b8_stride = 2 * s->m.mb_width + 1;
  261. s->m.f_code = 1;
  262. s->m.pict_type = s->picture.pict_type;
  263. s->m.me_method = s->avctx->me_method;
  264. s->m.me.scene_change_score = 0;
  265. s->m.flags = s->avctx->flags;
  266. // s->m.out_format = FMT_H263;
  267. // s->m.unrestricted_mv = 1;
  268. s->m.lambda = s->picture.quality;
  269. s->m.qscale = s->m.lambda * 139 +
  270. FF_LAMBDA_SCALE * 64 >>
  271. FF_LAMBDA_SHIFT + 7;
  272. s->m.lambda2 = s->m.lambda * s->m.lambda +
  273. FF_LAMBDA_SCALE / 2 >>
  274. FF_LAMBDA_SHIFT;
  275. if (!s->motion_val8[plane]) {
  276. s->motion_val8[plane] = av_mallocz((s->m.b8_stride *
  277. block_height * 2 + 2) *
  278. 2 * sizeof(int16_t));
  279. s->motion_val16[plane] = av_mallocz((s->m.mb_stride *
  280. (block_height + 2) + 1) *
  281. 2 * sizeof(int16_t));
  282. }
  283. s->m.mb_type = s->mb_type;
  284. // dummies, to avoid segfaults
  285. s->m.current_picture.mb_mean = (uint8_t *)s->dummy;
  286. s->m.current_picture.mb_var = (uint16_t *)s->dummy;
  287. s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy;
  288. s->m.current_picture.mb_type = s->dummy;
  289. s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2;
  290. s->m.p_mv_table = s->motion_val16[plane] +
  291. s->m.mb_stride + 1;
  292. s->m.dsp = s->dsp; // move
  293. ff_init_me(&s->m);
  294. s->m.me.dia_size = s->avctx->dia_size;
  295. s->m.first_slice_line = 1;
  296. for (y = 0; y < block_height; y++) {
  297. s->m.new_picture.f.data[0] = src - y * 16 * stride; // ugly
  298. s->m.mb_y = y;
  299. for (i = 0; i < 16 && i + 16 * y < height; i++) {
  300. memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
  301. width);
  302. for (x = width; x < 16 * block_width; x++)
  303. src[i * stride + x] = src[i * stride + x - 1];
  304. }
  305. for (; i < 16 && i + 16 * y < 16 * block_height; i++)
  306. memcpy(&src[i * stride], &src[(i - 1) * stride],
  307. 16 * block_width);
  308. for (x = 0; x < block_width; x++) {
  309. s->m.mb_x = x;
  310. ff_init_block_index(&s->m);
  311. ff_update_block_index(&s->m);
  312. ff_estimate_p_frame_motion(&s->m, x, y);
  313. }
  314. s->m.first_slice_line = 0;
  315. }
  316. ff_fix_long_p_mvs(&s->m);
  317. ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code,
  318. CANDIDATE_MB_TYPE_INTER, 0);
  319. }
  320. s->m.first_slice_line = 1;
  321. for (y = 0; y < block_height; y++) {
  322. for (i = 0; i < 16 && i + 16 * y < height; i++) {
  323. memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
  324. width);
  325. for (x = width; x < 16 * block_width; x++)
  326. src[i * stride + x] = src[i * stride + x - 1];
  327. }
  328. for (; i < 16 && i + 16 * y < 16 * block_height; i++)
  329. memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width);
  330. s->m.mb_y = y;
  331. for (x = 0; x < block_width; x++) {
  332. uint8_t reorder_buffer[3][6][7 * 32];
  333. int count[3][6];
  334. int offset = y * 16 * stride + x * 16;
  335. uint8_t *decoded = decoded_plane + offset;
  336. uint8_t *ref = ref_plane + offset;
  337. int score[4] = { 0, 0, 0, 0 }, best;
  338. uint8_t *temp = s->scratchbuf;
  339. if (s->pb.buf_end - s->pb.buf -
  340. (put_bits_count(&s->pb) >> 3) < 3000) { // FIXME: check size
  341. av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
  342. return -1;
  343. }
  344. s->m.mb_x = x;
  345. ff_init_block_index(&s->m);
  346. ff_update_block_index(&s->m);
  347. if (s->picture.pict_type == AV_PICTURE_TYPE_I ||
  348. (s->m.mb_type[x + y * s->m.mb_stride] &
  349. CANDIDATE_MB_TYPE_INTRA)) {
  350. for (i = 0; i < 6; i++)
  351. init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i],
  352. 7 * 32);
  353. if (s->picture.pict_type == AV_PICTURE_TYPE_P) {
  354. const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA];
  355. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  356. score[0] = vlc[1] * lambda;
  357. }
  358. score[0] += encode_block(s, src + 16 * x, NULL, temp, stride,
  359. 5, 64, lambda, 1);
  360. for (i = 0; i < 6; i++) {
  361. count[0][i] = put_bits_count(&s->reorder_pb[i]);
  362. flush_put_bits(&s->reorder_pb[i]);
  363. }
  364. } else
  365. score[0] = INT_MAX;
  366. best = 0;
  367. if (s->picture.pict_type == AV_PICTURE_TYPE_P) {
  368. const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER];
  369. int mx, my, pred_x, pred_y, dxy;
  370. int16_t *motion_ptr;
  371. motion_ptr = ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y);
  372. if (s->m.mb_type[x + y * s->m.mb_stride] &
  373. CANDIDATE_MB_TYPE_INTER) {
  374. for (i = 0; i < 6; i++)
  375. init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i],
  376. 7 * 32);
  377. put_bits(&s->reorder_pb[5], vlc[1], vlc[0]);
  378. s->m.pb = s->reorder_pb[5];
  379. mx = motion_ptr[0];
  380. my = motion_ptr[1];
  381. assert(mx >= -32 && mx <= 31);
  382. assert(my >= -32 && my <= 31);
  383. assert(pred_x >= -32 && pred_x <= 31);
  384. assert(pred_y >= -32 && pred_y <= 31);
  385. ff_h263_encode_motion(&s->m, mx - pred_x, 1);
  386. ff_h263_encode_motion(&s->m, my - pred_y, 1);
  387. s->reorder_pb[5] = s->m.pb;
  388. score[1] += lambda * put_bits_count(&s->reorder_pb[5]);
  389. dxy = (mx & 1) + 2 * (my & 1);
  390. s->hdsp.put_pixels_tab[0][dxy](temp + 16,
  391. ref + (mx >> 1) +
  392. stride * (my >> 1),
  393. stride, 16);
  394. score[1] += encode_block(s, src + 16 * x, temp + 16,
  395. decoded, stride, 5, 64, lambda, 0);
  396. best = score[1] <= score[0];
  397. vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_SKIP];
  398. score[2] = s->dsp.sse[0](NULL, src + 16 * x, ref,
  399. stride, 16);
  400. score[2] += vlc[1] * lambda;
  401. if (score[2] < score[best] && mx == 0 && my == 0) {
  402. best = 2;
  403. s->hdsp.put_pixels_tab[0][0](decoded, ref, stride, 16);
  404. for (i = 0; i < 6; i++)
  405. count[2][i] = 0;
  406. put_bits(&s->pb, vlc[1], vlc[0]);
  407. }
  408. }
  409. if (best == 1) {
  410. for (i = 0; i < 6; i++) {
  411. count[1][i] = put_bits_count(&s->reorder_pb[i]);
  412. flush_put_bits(&s->reorder_pb[i]);
  413. }
  414. } else {
  415. motion_ptr[0] =
  416. motion_ptr[1] =
  417. motion_ptr[2] =
  418. motion_ptr[3] =
  419. motion_ptr[0 + 2 * s->m.b8_stride] =
  420. motion_ptr[1 + 2 * s->m.b8_stride] =
  421. motion_ptr[2 + 2 * s->m.b8_stride] =
  422. motion_ptr[3 + 2 * s->m.b8_stride] = 0;
  423. }
  424. }
  425. s->rd_total += score[best];
  426. for (i = 5; i >= 0; i--)
  427. avpriv_copy_bits(&s->pb, reorder_buffer[best][i],
  428. count[best][i]);
  429. if (best == 0)
  430. s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
  431. }
  432. s->m.first_slice_line = 0;
  433. }
  434. return 0;
  435. }
  436. static av_cold int svq1_encode_init(AVCodecContext *avctx)
  437. {
  438. SVQ1Context *const s = avctx->priv_data;
  439. ff_dsputil_init(&s->dsp, avctx);
  440. ff_hpeldsp_init(&s->hdsp, avctx->flags);
  441. avctx->coded_frame = &s->picture;
  442. s->frame_width = avctx->width;
  443. s->frame_height = avctx->height;
  444. s->y_block_width = (s->frame_width + 15) / 16;
  445. s->y_block_height = (s->frame_height + 15) / 16;
  446. s->c_block_width = (s->frame_width / 4 + 15) / 16;
  447. s->c_block_height = (s->frame_height / 4 + 15) / 16;
  448. s->avctx = avctx;
  449. s->m.avctx = avctx;
  450. s->m.picture_structure = PICT_FRAME;
  451. s->m.me.temp =
  452. s->m.me.scratchpad = av_mallocz((avctx->width + 64) *
  453. 2 * 16 * 2 * sizeof(uint8_t));
  454. s->m.me.map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
  455. s->m.me.score_map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t));
  456. s->mb_type = av_mallocz((s->y_block_width + 1) *
  457. s->y_block_height * sizeof(int16_t));
  458. s->dummy = av_mallocz((s->y_block_width + 1) *
  459. s->y_block_height * sizeof(int32_t));
  460. ff_h263_encode_init(&s->m); // mv_penalty
  461. return 0;
  462. }
  463. static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
  464. const AVFrame *pict, int *got_packet)
  465. {
  466. SVQ1Context *const s = avctx->priv_data;
  467. AVFrame *const p = &s->picture;
  468. AVFrame temp;
  469. int i, ret;
  470. if (!pkt->data &&
  471. (ret = av_new_packet(pkt, s->y_block_width * s->y_block_height *
  472. MAX_MB_BYTES * 3 + FF_MIN_BUFFER_SIZE)) < 0) {
  473. av_log(avctx, AV_LOG_ERROR, "Error getting output packet.\n");
  474. return ret;
  475. }
  476. if (avctx->pix_fmt != AV_PIX_FMT_YUV410P) {
  477. av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n");
  478. return -1;
  479. }
  480. if (!s->current_picture.data[0]) {
  481. ff_get_buffer(avctx, &s->current_picture, 0);
  482. ff_get_buffer(avctx, &s->last_picture, 0);
  483. s->scratchbuf = av_malloc(s->current_picture.linesize[0] * 16 * 2);
  484. }
  485. temp = s->current_picture;
  486. s->current_picture = s->last_picture;
  487. s->last_picture = temp;
  488. init_put_bits(&s->pb, pkt->data, pkt->size);
  489. *p = *pict;
  490. p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ?
  491. AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
  492. p->key_frame = p->pict_type == AV_PICTURE_TYPE_I;
  493. svq1_write_header(s, p->pict_type);
  494. for (i = 0; i < 3; i++)
  495. if (svq1_encode_plane(s, i,
  496. s->picture.data[i],
  497. s->last_picture.data[i],
  498. s->current_picture.data[i],
  499. s->frame_width / (i ? 4 : 1),
  500. s->frame_height / (i ? 4 : 1),
  501. s->picture.linesize[i],
  502. s->current_picture.linesize[i]) < 0)
  503. return -1;
  504. // avpriv_align_put_bits(&s->pb);
  505. while (put_bits_count(&s->pb) & 31)
  506. put_bits(&s->pb, 1, 0);
  507. flush_put_bits(&s->pb);
  508. pkt->size = put_bits_count(&s->pb) / 8;
  509. if (p->pict_type == AV_PICTURE_TYPE_I)
  510. pkt->flags |= AV_PKT_FLAG_KEY;
  511. *got_packet = 1;
  512. return 0;
  513. }
  514. static av_cold int svq1_encode_end(AVCodecContext *avctx)
  515. {
  516. SVQ1Context *const s = avctx->priv_data;
  517. int i;
  518. av_log(avctx, AV_LOG_DEBUG, "RD: %f\n",
  519. s->rd_total / (double)(avctx->width * avctx->height *
  520. avctx->frame_number));
  521. av_freep(&s->m.me.scratchpad);
  522. av_freep(&s->m.me.map);
  523. av_freep(&s->m.me.score_map);
  524. av_freep(&s->mb_type);
  525. av_freep(&s->dummy);
  526. av_freep(&s->scratchbuf);
  527. for (i = 0; i < 3; i++) {
  528. av_freep(&s->motion_val8[i]);
  529. av_freep(&s->motion_val16[i]);
  530. }
  531. av_frame_unref(&s->current_picture);
  532. av_frame_unref(&s->last_picture);
  533. return 0;
  534. }
  535. AVCodec ff_svq1_encoder = {
  536. .name = "svq1",
  537. .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"),
  538. .type = AVMEDIA_TYPE_VIDEO,
  539. .id = AV_CODEC_ID_SVQ1,
  540. .priv_data_size = sizeof(SVQ1Context),
  541. .init = svq1_encode_init,
  542. .encode2 = svq1_encode_frame,
  543. .close = svq1_encode_end,
  544. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
  545. AV_PIX_FMT_NONE },
  546. };