You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1075 lines
36KB

  1. /*
  2. * Rate control for video encoders
  3. *
  4. * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. /**
  23. * @file
  24. * Rate control for video encoders.
  25. */
  26. #include "libavutil/attributes.h"
  27. #include "avcodec.h"
  28. #include "internal.h"
  29. #include "ratecontrol.h"
  30. #include "mpegutils.h"
  31. #include "mpegvideo.h"
  32. #include "libavutil/eval.h"
  33. #ifndef M_E
  34. #define M_E 2.718281828
  35. #endif
  36. static int init_pass2(MpegEncContext *s);
  37. static double get_qscale(MpegEncContext *s, RateControlEntry *rce,
  38. double rate_factor, int frame_num);
  39. void ff_write_pass1_stats(MpegEncContext *s)
  40. {
  41. snprintf(s->avctx->stats_out, 256,
  42. "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
  43. "fcode:%d bcode:%d mc-var:%"PRId64" var:%"PRId64" icount:%d skipcount:%d hbits:%d;\n",
  44. s->current_picture_ptr->f->display_picture_number,
  45. s->current_picture_ptr->f->coded_picture_number,
  46. s->pict_type,
  47. s->current_picture.f->quality,
  48. s->i_tex_bits,
  49. s->p_tex_bits,
  50. s->mv_bits,
  51. s->misc_bits,
  52. s->f_code,
  53. s->b_code,
  54. s->current_picture.mc_mb_var_sum,
  55. s->current_picture.mb_var_sum,
  56. s->i_count, s->skip_count,
  57. s->header_bits);
  58. }
  59. static double get_fps(AVCodecContext *avctx)
  60. {
  61. return 1.0 / av_q2d(avctx->time_base) / FFMAX(avctx->ticks_per_frame, 1);
  62. }
  63. static inline double qp2bits(RateControlEntry *rce, double qp)
  64. {
  65. if (qp <= 0.0) {
  66. av_log(NULL, AV_LOG_ERROR, "qp<=0.0\n");
  67. }
  68. return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits + 1) / qp;
  69. }
  70. static inline double bits2qp(RateControlEntry *rce, double bits)
  71. {
  72. if (bits < 0.9) {
  73. av_log(NULL, AV_LOG_ERROR, "bits<0.9\n");
  74. }
  75. return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits + 1) / bits;
  76. }
  77. av_cold int ff_rate_control_init(MpegEncContext *s)
  78. {
  79. RateControlContext *rcc = &s->rc_context;
  80. int i, res;
  81. static const char * const const_names[] = {
  82. "PI",
  83. "E",
  84. "iTex",
  85. "pTex",
  86. "tex",
  87. "mv",
  88. "fCode",
  89. "iCount",
  90. "mcVar",
  91. "var",
  92. "isI",
  93. "isP",
  94. "isB",
  95. "avgQP",
  96. "qComp",
  97. #if 0
  98. "lastIQP",
  99. "lastPQP",
  100. "lastBQP",
  101. "nextNonBQP",
  102. #endif
  103. "avgIITex",
  104. "avgPITex",
  105. "avgPPTex",
  106. "avgBPTex",
  107. "avgTex",
  108. NULL
  109. };
  110. static double (* const func1[])(void *, double) = {
  111. (void *)bits2qp,
  112. (void *)qp2bits,
  113. NULL
  114. };
  115. static const char * const func1_names[] = {
  116. "bits2qp",
  117. "qp2bits",
  118. NULL
  119. };
  120. emms_c();
  121. if (!s->avctx->rc_max_available_vbv_use && s->avctx->rc_buffer_size) {
  122. if (s->avctx->rc_max_rate) {
  123. s->avctx->rc_max_available_vbv_use = av_clipf(s->avctx->rc_max_rate/(s->avctx->rc_buffer_size*get_fps(s->avctx)), 1.0/3, 1.0);
  124. } else
  125. s->avctx->rc_max_available_vbv_use = 1.0;
  126. }
  127. res = av_expr_parse(&rcc->rc_eq_eval,
  128. s->rc_eq ? s->rc_eq : "tex^qComp",
  129. const_names, func1_names, func1,
  130. NULL, NULL, 0, s->avctx);
  131. if (res < 0) {
  132. av_log(s->avctx, AV_LOG_ERROR, "Error parsing rc_eq \"%s\"\n", s->rc_eq);
  133. return res;
  134. }
  135. for (i = 0; i < 5; i++) {
  136. rcc->pred[i].coeff = FF_QP2LAMBDA * 7.0;
  137. rcc->pred[i].count = 1.0;
  138. rcc->pred[i].decay = 0.4;
  139. rcc->i_cplx_sum [i] =
  140. rcc->p_cplx_sum [i] =
  141. rcc->mv_bits_sum[i] =
  142. rcc->qscale_sum [i] =
  143. rcc->frame_count[i] = 1; // 1 is better because of 1/0 and such
  144. rcc->last_qscale_for[i] = FF_QP2LAMBDA * 5;
  145. }
  146. rcc->buffer_index = s->avctx->rc_initial_buffer_occupancy;
  147. if (!rcc->buffer_index)
  148. rcc->buffer_index = s->avctx->rc_buffer_size * 3 / 4;
  149. if (s->avctx->flags & CODEC_FLAG_PASS2) {
  150. int i;
  151. char *p;
  152. /* find number of pics */
  153. p = s->avctx->stats_in;
  154. for (i = -1; p; i++)
  155. p = strchr(p + 1, ';');
  156. i += s->max_b_frames;
  157. if (i <= 0 || i >= INT_MAX / sizeof(RateControlEntry))
  158. return -1;
  159. rcc->entry = av_mallocz(i * sizeof(RateControlEntry));
  160. if (!rcc->entry)
  161. return AVERROR(ENOMEM);
  162. rcc->num_entries = i;
  163. /* init all to skipped p frames
  164. * (with b frames we might have a not encoded frame at the end FIXME) */
  165. for (i = 0; i < rcc->num_entries; i++) {
  166. RateControlEntry *rce = &rcc->entry[i];
  167. rce->pict_type = rce->new_pict_type = AV_PICTURE_TYPE_P;
  168. rce->qscale = rce->new_qscale = FF_QP2LAMBDA * 2;
  169. rce->misc_bits = s->mb_num + 10;
  170. rce->mb_var_sum = s->mb_num * 100;
  171. }
  172. /* read stats */
  173. p = s->avctx->stats_in;
  174. for (i = 0; i < rcc->num_entries - s->max_b_frames; i++) {
  175. RateControlEntry *rce;
  176. int picture_number;
  177. int e;
  178. char *next;
  179. next = strchr(p, ';');
  180. if (next) {
  181. (*next) = 0; // sscanf in unbelievably slow on looong strings // FIXME copy / do not write
  182. next++;
  183. }
  184. e = sscanf(p, " in:%d ", &picture_number);
  185. av_assert0(picture_number >= 0);
  186. av_assert0(picture_number < rcc->num_entries);
  187. rce = &rcc->entry[picture_number];
  188. e += sscanf(p, " in:%*d out:%*d type:%d q:%f itex:%d ptex:%d mv:%d misc:%d fcode:%d bcode:%d mc-var:%"SCNd64" var:%"SCNd64" icount:%d skipcount:%d hbits:%d",
  189. &rce->pict_type, &rce->qscale, &rce->i_tex_bits, &rce->p_tex_bits,
  190. &rce->mv_bits, &rce->misc_bits,
  191. &rce->f_code, &rce->b_code,
  192. &rce->mc_mb_var_sum, &rce->mb_var_sum,
  193. &rce->i_count, &rce->skip_count, &rce->header_bits);
  194. if (e != 14) {
  195. av_log(s->avctx, AV_LOG_ERROR,
  196. "statistics are damaged at line %d, parser out=%d\n",
  197. i, e);
  198. return -1;
  199. }
  200. p = next;
  201. }
  202. if (init_pass2(s) < 0) {
  203. ff_rate_control_uninit(s);
  204. return -1;
  205. }
  206. // FIXME maybe move to end
  207. if ((s->avctx->flags & CODEC_FLAG_PASS2) && s->avctx->rc_strategy == FF_RC_STRATEGY_XVID) {
  208. #if CONFIG_LIBXVID
  209. return ff_xvid_rate_control_init(s);
  210. #else
  211. av_log(s->avctx, AV_LOG_ERROR,
  212. "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
  213. return -1;
  214. #endif
  215. }
  216. }
  217. if (!(s->avctx->flags & CODEC_FLAG_PASS2)) {
  218. rcc->short_term_qsum = 0.001;
  219. rcc->short_term_qcount = 0.001;
  220. rcc->pass1_rc_eq_output_sum = 0.001;
  221. rcc->pass1_wanted_bits = 0.001;
  222. if (s->avctx->qblur > 1.0) {
  223. av_log(s->avctx, AV_LOG_ERROR, "qblur too large\n");
  224. return -1;
  225. }
  226. /* init stuff with the user specified complexity */
  227. if (s->rc_initial_cplx) {
  228. for (i = 0; i < 60 * 30; i++) {
  229. double bits = s->rc_initial_cplx * (i / 10000.0 + 1.0) * s->mb_num;
  230. RateControlEntry rce;
  231. if (i % ((s->gop_size + 3) / 4) == 0)
  232. rce.pict_type = AV_PICTURE_TYPE_I;
  233. else if (i % (s->max_b_frames + 1))
  234. rce.pict_type = AV_PICTURE_TYPE_B;
  235. else
  236. rce.pict_type = AV_PICTURE_TYPE_P;
  237. rce.new_pict_type = rce.pict_type;
  238. rce.mc_mb_var_sum = bits * s->mb_num / 100000;
  239. rce.mb_var_sum = s->mb_num;
  240. rce.qscale = FF_QP2LAMBDA * 2;
  241. rce.f_code = 2;
  242. rce.b_code = 1;
  243. rce.misc_bits = 1;
  244. if (s->pict_type == AV_PICTURE_TYPE_I) {
  245. rce.i_count = s->mb_num;
  246. rce.i_tex_bits = bits;
  247. rce.p_tex_bits = 0;
  248. rce.mv_bits = 0;
  249. } else {
  250. rce.i_count = 0; // FIXME we do know this approx
  251. rce.i_tex_bits = 0;
  252. rce.p_tex_bits = bits * 0.9;
  253. rce.mv_bits = bits * 0.1;
  254. }
  255. rcc->i_cplx_sum[rce.pict_type] += rce.i_tex_bits * rce.qscale;
  256. rcc->p_cplx_sum[rce.pict_type] += rce.p_tex_bits * rce.qscale;
  257. rcc->mv_bits_sum[rce.pict_type] += rce.mv_bits;
  258. rcc->frame_count[rce.pict_type]++;
  259. get_qscale(s, &rce, rcc->pass1_wanted_bits / rcc->pass1_rc_eq_output_sum, i);
  260. // FIXME misbehaves a little for variable fps
  261. rcc->pass1_wanted_bits += s->bit_rate / get_fps(s->avctx);
  262. }
  263. }
  264. }
  265. return 0;
  266. }
  267. av_cold void ff_rate_control_uninit(MpegEncContext *s)
  268. {
  269. RateControlContext *rcc = &s->rc_context;
  270. emms_c();
  271. av_expr_free(rcc->rc_eq_eval);
  272. av_freep(&rcc->entry);
  273. #if CONFIG_LIBXVID
  274. if ((s->avctx->flags & CODEC_FLAG_PASS2) && s->avctx->rc_strategy == FF_RC_STRATEGY_XVID)
  275. ff_xvid_rate_control_uninit(s);
  276. #endif
  277. }
  278. int ff_vbv_update(MpegEncContext *s, int frame_size)
  279. {
  280. RateControlContext *rcc = &s->rc_context;
  281. const double fps = get_fps(s->avctx);
  282. const int buffer_size = s->avctx->rc_buffer_size;
  283. const double min_rate = s->avctx->rc_min_rate / fps;
  284. const double max_rate = s->avctx->rc_max_rate / fps;
  285. ff_dlog(s, "%d %f %d %f %f\n",
  286. buffer_size, rcc->buffer_index, frame_size, min_rate, max_rate);
  287. if (buffer_size) {
  288. int left;
  289. rcc->buffer_index -= frame_size;
  290. if (rcc->buffer_index < 0) {
  291. av_log(s->avctx, AV_LOG_ERROR, "rc buffer underflow\n");
  292. if (frame_size > max_rate && s->qscale == s->avctx->qmax) {
  293. av_log(s->avctx, AV_LOG_ERROR, "max bitrate possibly too small or try trellis with large lmax or increase qmax\n");
  294. }
  295. rcc->buffer_index = 0;
  296. }
  297. left = buffer_size - rcc->buffer_index - 1;
  298. rcc->buffer_index += av_clip(left, min_rate, max_rate);
  299. if (rcc->buffer_index > buffer_size) {
  300. int stuffing = ceil((rcc->buffer_index - buffer_size) / 8);
  301. if (stuffing < 4 && s->codec_id == AV_CODEC_ID_MPEG4)
  302. stuffing = 4;
  303. rcc->buffer_index -= 8 * stuffing;
  304. if (s->avctx->debug & FF_DEBUG_RC)
  305. av_log(s->avctx, AV_LOG_DEBUG, "stuffing %d bytes\n", stuffing);
  306. return stuffing;
  307. }
  308. }
  309. return 0;
  310. }
  311. /**
  312. * Modify the bitrate curve from pass1 for one frame.
  313. */
  314. static double get_qscale(MpegEncContext *s, RateControlEntry *rce,
  315. double rate_factor, int frame_num)
  316. {
  317. RateControlContext *rcc = &s->rc_context;
  318. AVCodecContext *a = s->avctx;
  319. const int pict_type = rce->new_pict_type;
  320. const double mb_num = s->mb_num;
  321. double q, bits;
  322. int i;
  323. double const_values[] = {
  324. M_PI,
  325. M_E,
  326. rce->i_tex_bits * rce->qscale,
  327. rce->p_tex_bits * rce->qscale,
  328. (rce->i_tex_bits + rce->p_tex_bits) * (double)rce->qscale,
  329. rce->mv_bits / mb_num,
  330. rce->pict_type == AV_PICTURE_TYPE_B ? (rce->f_code + rce->b_code) * 0.5 : rce->f_code,
  331. rce->i_count / mb_num,
  332. rce->mc_mb_var_sum / mb_num,
  333. rce->mb_var_sum / mb_num,
  334. rce->pict_type == AV_PICTURE_TYPE_I,
  335. rce->pict_type == AV_PICTURE_TYPE_P,
  336. rce->pict_type == AV_PICTURE_TYPE_B,
  337. rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type],
  338. a->qcompress,
  339. #if 0
  340. rcc->last_qscale_for[AV_PICTURE_TYPE_I],
  341. rcc->last_qscale_for[AV_PICTURE_TYPE_P],
  342. rcc->last_qscale_for[AV_PICTURE_TYPE_B],
  343. rcc->next_non_b_qscale,
  344. #endif
  345. rcc->i_cplx_sum[AV_PICTURE_TYPE_I] / (double)rcc->frame_count[AV_PICTURE_TYPE_I],
  346. rcc->i_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P],
  347. rcc->p_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P],
  348. rcc->p_cplx_sum[AV_PICTURE_TYPE_B] / (double)rcc->frame_count[AV_PICTURE_TYPE_B],
  349. (rcc->i_cplx_sum[pict_type] + rcc->p_cplx_sum[pict_type]) / (double)rcc->frame_count[pict_type],
  350. 0
  351. };
  352. bits = av_expr_eval(rcc->rc_eq_eval, const_values, rce);
  353. if (isnan(bits)) {
  354. av_log(s->avctx, AV_LOG_ERROR, "Error evaluating rc_eq \"%s\"\n", s->rc_eq);
  355. return -1;
  356. }
  357. rcc->pass1_rc_eq_output_sum += bits;
  358. bits *= rate_factor;
  359. if (bits < 0.0)
  360. bits = 0.0;
  361. bits += 1.0; // avoid 1/0 issues
  362. /* user override */
  363. for (i = 0; i < s->avctx->rc_override_count; i++) {
  364. RcOverride *rco = s->avctx->rc_override;
  365. if (rco[i].start_frame > frame_num)
  366. continue;
  367. if (rco[i].end_frame < frame_num)
  368. continue;
  369. if (rco[i].qscale)
  370. bits = qp2bits(rce, rco[i].qscale); // FIXME move at end to really force it?
  371. else
  372. bits *= rco[i].quality_factor;
  373. }
  374. q = bits2qp(rce, bits);
  375. /* I/B difference */
  376. if (pict_type == AV_PICTURE_TYPE_I && s->avctx->i_quant_factor < 0.0)
  377. q = -q * s->avctx->i_quant_factor + s->avctx->i_quant_offset;
  378. else if (pict_type == AV_PICTURE_TYPE_B && s->avctx->b_quant_factor < 0.0)
  379. q = -q * s->avctx->b_quant_factor + s->avctx->b_quant_offset;
  380. if (q < 1)
  381. q = 1;
  382. return q;
  383. }
  384. static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, double q)
  385. {
  386. RateControlContext *rcc = &s->rc_context;
  387. AVCodecContext *a = s->avctx;
  388. const int pict_type = rce->new_pict_type;
  389. const double last_p_q = rcc->last_qscale_for[AV_PICTURE_TYPE_P];
  390. const double last_non_b_q = rcc->last_qscale_for[rcc->last_non_b_pict_type];
  391. if (pict_type == AV_PICTURE_TYPE_I &&
  392. (a->i_quant_factor > 0.0 || rcc->last_non_b_pict_type == AV_PICTURE_TYPE_P))
  393. q = last_p_q * FFABS(a->i_quant_factor) + a->i_quant_offset;
  394. else if (pict_type == AV_PICTURE_TYPE_B &&
  395. a->b_quant_factor > 0.0)
  396. q = last_non_b_q * a->b_quant_factor + a->b_quant_offset;
  397. if (q < 1)
  398. q = 1;
  399. /* last qscale / qdiff stuff */
  400. if (rcc->last_non_b_pict_type == pict_type || pict_type != AV_PICTURE_TYPE_I) {
  401. double last_q = rcc->last_qscale_for[pict_type];
  402. const int maxdiff = FF_QP2LAMBDA * a->max_qdiff;
  403. if (q > last_q + maxdiff)
  404. q = last_q + maxdiff;
  405. else if (q < last_q - maxdiff)
  406. q = last_q - maxdiff;
  407. }
  408. rcc->last_qscale_for[pict_type] = q; // Note we cannot do that after blurring
  409. if (pict_type != AV_PICTURE_TYPE_B)
  410. rcc->last_non_b_pict_type = pict_type;
  411. return q;
  412. }
  413. /**
  414. * Get the qmin & qmax for pict_type.
  415. */
  416. static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pict_type)
  417. {
  418. int qmin = s->lmin;
  419. int qmax = s->lmax;
  420. av_assert0(qmin <= qmax);
  421. switch (pict_type) {
  422. case AV_PICTURE_TYPE_B:
  423. qmin = (int)(qmin * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset + 0.5);
  424. qmax = (int)(qmax * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset + 0.5);
  425. break;
  426. case AV_PICTURE_TYPE_I:
  427. qmin = (int)(qmin * FFABS(s->avctx->i_quant_factor) + s->avctx->i_quant_offset + 0.5);
  428. qmax = (int)(qmax * FFABS(s->avctx->i_quant_factor) + s->avctx->i_quant_offset + 0.5);
  429. break;
  430. }
  431. qmin = av_clip(qmin, 1, FF_LAMBDA_MAX);
  432. qmax = av_clip(qmax, 1, FF_LAMBDA_MAX);
  433. if (qmax < qmin)
  434. qmax = qmin;
  435. *qmin_ret = qmin;
  436. *qmax_ret = qmax;
  437. }
  438. static double modify_qscale(MpegEncContext *s, RateControlEntry *rce,
  439. double q, int frame_num)
  440. {
  441. RateControlContext *rcc = &s->rc_context;
  442. const double buffer_size = s->avctx->rc_buffer_size;
  443. const double fps = get_fps(s->avctx);
  444. const double min_rate = s->avctx->rc_min_rate / fps;
  445. const double max_rate = s->avctx->rc_max_rate / fps;
  446. const int pict_type = rce->new_pict_type;
  447. int qmin, qmax;
  448. get_qminmax(&qmin, &qmax, s, pict_type);
  449. /* modulation */
  450. if (s->rc_qmod_freq &&
  451. frame_num % s->rc_qmod_freq == 0 &&
  452. pict_type == AV_PICTURE_TYPE_P)
  453. q *= s->rc_qmod_amp;
  454. /* buffer overflow/underflow protection */
  455. if (buffer_size) {
  456. double expected_size = rcc->buffer_index;
  457. double q_limit;
  458. if (min_rate) {
  459. double d = 2 * (buffer_size - expected_size) / buffer_size;
  460. if (d > 1.0)
  461. d = 1.0;
  462. else if (d < 0.0001)
  463. d = 0.0001;
  464. q *= pow(d, 1.0 / s->rc_buffer_aggressivity);
  465. q_limit = bits2qp(rce,
  466. FFMAX((min_rate - buffer_size + rcc->buffer_index) *
  467. s->avctx->rc_min_vbv_overflow_use, 1));
  468. if (q > q_limit) {
  469. if (s->avctx->debug & FF_DEBUG_RC)
  470. av_log(s->avctx, AV_LOG_DEBUG,
  471. "limiting QP %f -> %f\n", q, q_limit);
  472. q = q_limit;
  473. }
  474. }
  475. if (max_rate) {
  476. double d = 2 * expected_size / buffer_size;
  477. if (d > 1.0)
  478. d = 1.0;
  479. else if (d < 0.0001)
  480. d = 0.0001;
  481. q /= pow(d, 1.0 / s->rc_buffer_aggressivity);
  482. q_limit = bits2qp(rce,
  483. FFMAX(rcc->buffer_index *
  484. s->avctx->rc_max_available_vbv_use,
  485. 1));
  486. if (q < q_limit) {
  487. if (s->avctx->debug & FF_DEBUG_RC)
  488. av_log(s->avctx, AV_LOG_DEBUG,
  489. "limiting QP %f -> %f\n", q, q_limit);
  490. q = q_limit;
  491. }
  492. }
  493. }
  494. ff_dlog(s, "q:%f max:%f min:%f size:%f index:%f agr:%f\n",
  495. q, max_rate, min_rate, buffer_size, rcc->buffer_index,
  496. s->rc_buffer_aggressivity);
  497. if (s->rc_qsquish == 0.0 || qmin == qmax) {
  498. if (q < qmin)
  499. q = qmin;
  500. else if (q > qmax)
  501. q = qmax;
  502. } else {
  503. double min2 = log(qmin);
  504. double max2 = log(qmax);
  505. q = log(q);
  506. q = (q - min2) / (max2 - min2) - 0.5;
  507. q *= -4.0;
  508. q = 1.0 / (1.0 + exp(q));
  509. q = q * (max2 - min2) + min2;
  510. q = exp(q);
  511. }
  512. return q;
  513. }
  514. // ----------------------------------
  515. // 1 Pass Code
  516. static double predict_size(Predictor *p, double q, double var)
  517. {
  518. return p->coeff * var / (q * p->count);
  519. }
  520. static void update_predictor(Predictor *p, double q, double var, double size)
  521. {
  522. double new_coeff = size * q / (var + 1);
  523. if (var < 10)
  524. return;
  525. p->count *= p->decay;
  526. p->coeff *= p->decay;
  527. p->count++;
  528. p->coeff += new_coeff;
  529. }
  530. static void adaptive_quantization(MpegEncContext *s, double q)
  531. {
  532. int i;
  533. const float lumi_masking = s->avctx->lumi_masking / (128.0 * 128.0);
  534. const float dark_masking = s->avctx->dark_masking / (128.0 * 128.0);
  535. const float temp_cplx_masking = s->avctx->temporal_cplx_masking;
  536. const float spatial_cplx_masking = s->avctx->spatial_cplx_masking;
  537. const float p_masking = s->avctx->p_masking;
  538. const float border_masking = s->border_masking;
  539. float bits_sum = 0.0;
  540. float cplx_sum = 0.0;
  541. float *cplx_tab = s->cplx_tab;
  542. float *bits_tab = s->bits_tab;
  543. const int qmin = s->avctx->mb_lmin;
  544. const int qmax = s->avctx->mb_lmax;
  545. Picture *const pic = &s->current_picture;
  546. const int mb_width = s->mb_width;
  547. const int mb_height = s->mb_height;
  548. for (i = 0; i < s->mb_num; i++) {
  549. const int mb_xy = s->mb_index2xy[i];
  550. float temp_cplx = sqrt(pic->mc_mb_var[mb_xy]); // FIXME merge in pow()
  551. float spat_cplx = sqrt(pic->mb_var[mb_xy]);
  552. const int lumi = pic->mb_mean[mb_xy];
  553. float bits, cplx, factor;
  554. int mb_x = mb_xy % s->mb_stride;
  555. int mb_y = mb_xy / s->mb_stride;
  556. int mb_distance;
  557. float mb_factor = 0.0;
  558. if (spat_cplx < 4)
  559. spat_cplx = 4; // FIXME finetune
  560. if (temp_cplx < 4)
  561. temp_cplx = 4; // FIXME finetune
  562. if ((s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_INTRA)) { // FIXME hq mode
  563. cplx = spat_cplx;
  564. factor = 1.0 + p_masking;
  565. } else {
  566. cplx = temp_cplx;
  567. factor = pow(temp_cplx, -temp_cplx_masking);
  568. }
  569. factor *= pow(spat_cplx, -spatial_cplx_masking);
  570. if (lumi > 127)
  571. factor *= (1.0 - (lumi - 128) * (lumi - 128) * lumi_masking);
  572. else
  573. factor *= (1.0 - (lumi - 128) * (lumi - 128) * dark_masking);
  574. if (mb_x < mb_width / 5) {
  575. mb_distance = mb_width / 5 - mb_x;
  576. mb_factor = (float)mb_distance / (float)(mb_width / 5);
  577. } else if (mb_x > 4 * mb_width / 5) {
  578. mb_distance = mb_x - 4 * mb_width / 5;
  579. mb_factor = (float)mb_distance / (float)(mb_width / 5);
  580. }
  581. if (mb_y < mb_height / 5) {
  582. mb_distance = mb_height / 5 - mb_y;
  583. mb_factor = FFMAX(mb_factor,
  584. (float)mb_distance / (float)(mb_height / 5));
  585. } else if (mb_y > 4 * mb_height / 5) {
  586. mb_distance = mb_y - 4 * mb_height / 5;
  587. mb_factor = FFMAX(mb_factor,
  588. (float)mb_distance / (float)(mb_height / 5));
  589. }
  590. factor *= 1.0 - border_masking * mb_factor;
  591. if (factor < 0.00001)
  592. factor = 0.00001;
  593. bits = cplx * factor;
  594. cplx_sum += cplx;
  595. bits_sum += bits;
  596. cplx_tab[i] = cplx;
  597. bits_tab[i] = bits;
  598. }
  599. /* handle qmin/qmax clipping */
  600. if (s->mpv_flags & FF_MPV_FLAG_NAQ) {
  601. float factor = bits_sum / cplx_sum;
  602. for (i = 0; i < s->mb_num; i++) {
  603. float newq = q * cplx_tab[i] / bits_tab[i];
  604. newq *= factor;
  605. if (newq > qmax) {
  606. bits_sum -= bits_tab[i];
  607. cplx_sum -= cplx_tab[i] * q / qmax;
  608. } else if (newq < qmin) {
  609. bits_sum -= bits_tab[i];
  610. cplx_sum -= cplx_tab[i] * q / qmin;
  611. }
  612. }
  613. if (bits_sum < 0.001)
  614. bits_sum = 0.001;
  615. if (cplx_sum < 0.001)
  616. cplx_sum = 0.001;
  617. }
  618. for (i = 0; i < s->mb_num; i++) {
  619. const int mb_xy = s->mb_index2xy[i];
  620. float newq = q * cplx_tab[i] / bits_tab[i];
  621. int intq;
  622. if (s->mpv_flags & FF_MPV_FLAG_NAQ) {
  623. newq *= bits_sum / cplx_sum;
  624. }
  625. intq = (int)(newq + 0.5);
  626. if (intq > qmax)
  627. intq = qmax;
  628. else if (intq < qmin)
  629. intq = qmin;
  630. s->lambda_table[mb_xy] = intq;
  631. }
  632. }
  633. void ff_get_2pass_fcode(MpegEncContext *s)
  634. {
  635. RateControlContext *rcc = &s->rc_context;
  636. RateControlEntry *rce = &rcc->entry[s->picture_number];
  637. s->f_code = rce->f_code;
  638. s->b_code = rce->b_code;
  639. }
  640. // FIXME rd or at least approx for dquant
  641. float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
  642. {
  643. float q;
  644. int qmin, qmax;
  645. float br_compensation;
  646. double diff;
  647. double short_term_q;
  648. double fps;
  649. int picture_number = s->picture_number;
  650. int64_t wanted_bits;
  651. RateControlContext *rcc = &s->rc_context;
  652. AVCodecContext *a = s->avctx;
  653. RateControlEntry local_rce, *rce;
  654. double bits;
  655. double rate_factor;
  656. int64_t var;
  657. const int pict_type = s->pict_type;
  658. Picture * const pic = &s->current_picture;
  659. emms_c();
  660. #if CONFIG_LIBXVID
  661. if ((s->avctx->flags & CODEC_FLAG_PASS2) &&
  662. s->avctx->rc_strategy == FF_RC_STRATEGY_XVID)
  663. return ff_xvid_rate_estimate_qscale(s, dry_run);
  664. #endif
  665. get_qminmax(&qmin, &qmax, s, pict_type);
  666. fps = get_fps(s->avctx);
  667. /* update predictors */
  668. if (picture_number > 2 && !dry_run) {
  669. const int64_t last_var =
  670. s->last_pict_type == AV_PICTURE_TYPE_I ? rcc->last_mb_var_sum
  671. : rcc->last_mc_mb_var_sum;
  672. av_assert1(s->frame_bits >= s->stuffing_bits);
  673. update_predictor(&rcc->pred[s->last_pict_type],
  674. rcc->last_qscale,
  675. sqrt(last_var),
  676. s->frame_bits - s->stuffing_bits);
  677. }
  678. if (s->avctx->flags & CODEC_FLAG_PASS2) {
  679. av_assert0(picture_number >= 0);
  680. if (picture_number >= rcc->num_entries) {
  681. av_log(s, AV_LOG_ERROR, "Input is longer than 2-pass log file\n");
  682. return -1;
  683. }
  684. rce = &rcc->entry[picture_number];
  685. wanted_bits = rce->expected_bits;
  686. } else {
  687. Picture *dts_pic;
  688. rce = &local_rce;
  689. /* FIXME add a dts field to AVFrame and ensure it is set and use it
  690. * here instead of reordering but the reordering is simpler for now
  691. * until H.264 B-pyramid must be handled. */
  692. if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay)
  693. dts_pic = s->current_picture_ptr;
  694. else
  695. dts_pic = s->last_picture_ptr;
  696. if (!dts_pic || dts_pic->f->pts == AV_NOPTS_VALUE)
  697. wanted_bits = (uint64_t)(s->bit_rate * (double)picture_number / fps);
  698. else
  699. wanted_bits = (uint64_t)(s->bit_rate * (double)dts_pic->f->pts / fps);
  700. }
  701. diff = s->total_bits - wanted_bits;
  702. br_compensation = (a->bit_rate_tolerance - diff) / a->bit_rate_tolerance;
  703. if (br_compensation <= 0.0)
  704. br_compensation = 0.001;
  705. var = pict_type == AV_PICTURE_TYPE_I ? pic->mb_var_sum : pic->mc_mb_var_sum;
  706. short_term_q = 0; /* avoid warning */
  707. if (s->avctx->flags & CODEC_FLAG_PASS2) {
  708. if (pict_type != AV_PICTURE_TYPE_I)
  709. av_assert0(pict_type == rce->new_pict_type);
  710. q = rce->new_qscale / br_compensation;
  711. ff_dlog(s, "%f %f %f last:%d var:%"PRId64" type:%d//\n", q, rce->new_qscale,
  712. br_compensation, s->frame_bits, var, pict_type);
  713. } else {
  714. rce->pict_type =
  715. rce->new_pict_type = pict_type;
  716. rce->mc_mb_var_sum = pic->mc_mb_var_sum;
  717. rce->mb_var_sum = pic->mb_var_sum;
  718. rce->qscale = FF_QP2LAMBDA * 2;
  719. rce->f_code = s->f_code;
  720. rce->b_code = s->b_code;
  721. rce->misc_bits = 1;
  722. bits = predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
  723. if (pict_type == AV_PICTURE_TYPE_I) {
  724. rce->i_count = s->mb_num;
  725. rce->i_tex_bits = bits;
  726. rce->p_tex_bits = 0;
  727. rce->mv_bits = 0;
  728. } else {
  729. rce->i_count = 0; // FIXME we do know this approx
  730. rce->i_tex_bits = 0;
  731. rce->p_tex_bits = bits * 0.9;
  732. rce->mv_bits = bits * 0.1;
  733. }
  734. rcc->i_cplx_sum[pict_type] += rce->i_tex_bits * rce->qscale;
  735. rcc->p_cplx_sum[pict_type] += rce->p_tex_bits * rce->qscale;
  736. rcc->mv_bits_sum[pict_type] += rce->mv_bits;
  737. rcc->frame_count[pict_type]++;
  738. rate_factor = rcc->pass1_wanted_bits /
  739. rcc->pass1_rc_eq_output_sum * br_compensation;
  740. q = get_qscale(s, rce, rate_factor, picture_number);
  741. if (q < 0)
  742. return -1;
  743. av_assert0(q > 0.0);
  744. q = get_diff_limited_q(s, rce, q);
  745. av_assert0(q > 0.0);
  746. // FIXME type dependent blur like in 2-pass
  747. if (pict_type == AV_PICTURE_TYPE_P || s->intra_only) {
  748. rcc->short_term_qsum *= a->qblur;
  749. rcc->short_term_qcount *= a->qblur;
  750. rcc->short_term_qsum += q;
  751. rcc->short_term_qcount++;
  752. q = short_term_q = rcc->short_term_qsum / rcc->short_term_qcount;
  753. }
  754. av_assert0(q > 0.0);
  755. q = modify_qscale(s, rce, q, picture_number);
  756. rcc->pass1_wanted_bits += s->bit_rate / fps;
  757. av_assert0(q > 0.0);
  758. }
  759. if (s->avctx->debug & FF_DEBUG_RC) {
  760. av_log(s->avctx, AV_LOG_DEBUG,
  761. "%c qp:%d<%2.1f<%d %d want:%d total:%d comp:%f st_q:%2.2f "
  762. "size:%d var:%"PRId64"/%"PRId64" br:%d fps:%d\n",
  763. av_get_picture_type_char(pict_type),
  764. qmin, q, qmax, picture_number,
  765. (int)wanted_bits / 1000, (int)s->total_bits / 1000,
  766. br_compensation, short_term_q, s->frame_bits,
  767. pic->mb_var_sum, pic->mc_mb_var_sum,
  768. s->bit_rate / 1000, (int)fps);
  769. }
  770. if (q < qmin)
  771. q = qmin;
  772. else if (q > qmax)
  773. q = qmax;
  774. if (s->adaptive_quant)
  775. adaptive_quantization(s, q);
  776. else
  777. q = (int)(q + 0.5);
  778. if (!dry_run) {
  779. rcc->last_qscale = q;
  780. rcc->last_mc_mb_var_sum = pic->mc_mb_var_sum;
  781. rcc->last_mb_var_sum = pic->mb_var_sum;
  782. }
  783. return q;
  784. }
  785. // ----------------------------------------------
  786. // 2-Pass code
  787. static int init_pass2(MpegEncContext *s)
  788. {
  789. RateControlContext *rcc = &s->rc_context;
  790. AVCodecContext *a = s->avctx;
  791. int i, toobig;
  792. double fps = get_fps(s->avctx);
  793. double complexity[5] = { 0 }; // approximate bits at quant=1
  794. uint64_t const_bits[5] = { 0 }; // quantizer independent bits
  795. uint64_t all_const_bits;
  796. uint64_t all_available_bits = (uint64_t)(s->bit_rate *
  797. (double)rcc->num_entries / fps);
  798. double rate_factor = 0;
  799. double step;
  800. const int filter_size = (int)(a->qblur * 4) | 1;
  801. double expected_bits = 0; // init to silence gcc warning
  802. double *qscale, *blurred_qscale, qscale_sum;
  803. /* find complexity & const_bits & decide the pict_types */
  804. for (i = 0; i < rcc->num_entries; i++) {
  805. RateControlEntry *rce = &rcc->entry[i];
  806. rce->new_pict_type = rce->pict_type;
  807. rcc->i_cplx_sum[rce->pict_type] += rce->i_tex_bits * rce->qscale;
  808. rcc->p_cplx_sum[rce->pict_type] += rce->p_tex_bits * rce->qscale;
  809. rcc->mv_bits_sum[rce->pict_type] += rce->mv_bits;
  810. rcc->frame_count[rce->pict_type]++;
  811. complexity[rce->new_pict_type] += (rce->i_tex_bits + rce->p_tex_bits) *
  812. (double)rce->qscale;
  813. const_bits[rce->new_pict_type] += rce->mv_bits + rce->misc_bits;
  814. }
  815. all_const_bits = const_bits[AV_PICTURE_TYPE_I] +
  816. const_bits[AV_PICTURE_TYPE_P] +
  817. const_bits[AV_PICTURE_TYPE_B];
  818. if (all_available_bits < all_const_bits) {
  819. av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n");
  820. return -1;
  821. }
  822. qscale = av_malloc_array(rcc->num_entries, sizeof(double));
  823. blurred_qscale = av_malloc_array(rcc->num_entries, sizeof(double));
  824. if (!qscale || !blurred_qscale) {
  825. av_free(qscale);
  826. av_free(blurred_qscale);
  827. return AVERROR(ENOMEM);
  828. }
  829. toobig = 0;
  830. for (step = 256 * 256; step > 0.0000001; step *= 0.5) {
  831. expected_bits = 0;
  832. rate_factor += step;
  833. rcc->buffer_index = s->avctx->rc_buffer_size / 2;
  834. /* find qscale */
  835. for (i = 0; i < rcc->num_entries; i++) {
  836. RateControlEntry *rce = &rcc->entry[i];
  837. qscale[i] = get_qscale(s, &rcc->entry[i], rate_factor, i);
  838. rcc->last_qscale_for[rce->pict_type] = qscale[i];
  839. }
  840. av_assert0(filter_size % 2 == 1);
  841. /* fixed I/B QP relative to P mode */
  842. for (i = FFMAX(0, rcc->num_entries - 300); i < rcc->num_entries; i++) {
  843. RateControlEntry *rce = &rcc->entry[i];
  844. qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
  845. }
  846. for (i = rcc->num_entries - 1; i >= 0; i--) {
  847. RateControlEntry *rce = &rcc->entry[i];
  848. qscale[i] = get_diff_limited_q(s, rce, qscale[i]);
  849. }
  850. /* smooth curve */
  851. for (i = 0; i < rcc->num_entries; i++) {
  852. RateControlEntry *rce = &rcc->entry[i];
  853. const int pict_type = rce->new_pict_type;
  854. int j;
  855. double q = 0.0, sum = 0.0;
  856. for (j = 0; j < filter_size; j++) {
  857. int index = i + j - filter_size / 2;
  858. double d = index - i;
  859. double coeff = a->qblur == 0 ? 1.0 : exp(-d * d / (a->qblur * a->qblur));
  860. if (index < 0 || index >= rcc->num_entries)
  861. continue;
  862. if (pict_type != rcc->entry[index].new_pict_type)
  863. continue;
  864. q += qscale[index] * coeff;
  865. sum += coeff;
  866. }
  867. blurred_qscale[i] = q / sum;
  868. }
  869. /* find expected bits */
  870. for (i = 0; i < rcc->num_entries; i++) {
  871. RateControlEntry *rce = &rcc->entry[i];
  872. double bits;
  873. rce->new_qscale = modify_qscale(s, rce, blurred_qscale[i], i);
  874. bits = qp2bits(rce, rce->new_qscale) + rce->mv_bits + rce->misc_bits;
  875. bits += 8 * ff_vbv_update(s, bits);
  876. rce->expected_bits = expected_bits;
  877. expected_bits += bits;
  878. }
  879. ff_dlog(s->avctx,
  880. "expected_bits: %f all_available_bits: %d rate_factor: %f\n",
  881. expected_bits, (int)all_available_bits, rate_factor);
  882. if (expected_bits > all_available_bits) {
  883. rate_factor -= step;
  884. ++toobig;
  885. }
  886. }
  887. av_free(qscale);
  888. av_free(blurred_qscale);
  889. /* check bitrate calculations and print info */
  890. qscale_sum = 0.0;
  891. for (i = 0; i < rcc->num_entries; i++) {
  892. ff_dlog(s, "[lavc rc] entry[%d].new_qscale = %.3f qp = %.3f\n",
  893. i,
  894. rcc->entry[i].new_qscale,
  895. rcc->entry[i].new_qscale / FF_QP2LAMBDA);
  896. qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA,
  897. s->avctx->qmin, s->avctx->qmax);
  898. }
  899. av_assert0(toobig <= 40);
  900. av_log(s->avctx, AV_LOG_DEBUG,
  901. "[lavc rc] requested bitrate: %d bps expected bitrate: %d bps\n",
  902. s->bit_rate,
  903. (int)(expected_bits / ((double)all_available_bits / s->bit_rate)));
  904. av_log(s->avctx, AV_LOG_DEBUG,
  905. "[lavc rc] estimated target average qp: %.3f\n",
  906. (float)qscale_sum / rcc->num_entries);
  907. if (toobig == 0) {
  908. av_log(s->avctx, AV_LOG_INFO,
  909. "[lavc rc] Using all of requested bitrate is not "
  910. "necessary for this video with these parameters.\n");
  911. } else if (toobig == 40) {
  912. av_log(s->avctx, AV_LOG_ERROR,
  913. "[lavc rc] Error: bitrate too low for this video "
  914. "with these parameters.\n");
  915. return -1;
  916. } else if (fabs(expected_bits / all_available_bits - 1.0) > 0.01) {
  917. av_log(s->avctx, AV_LOG_ERROR,
  918. "[lavc rc] Error: 2pass curve failed to converge\n");
  919. return -1;
  920. }
  921. return 0;
  922. }