You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

862 lines
29KB

  1. /*
  2. * VC3/DNxHD encoder
  3. * Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
  4. *
  5. * VC-3 encoder funded by the British Broadcasting Corporation
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. //#define DEBUG
  24. #define RC_VARIANCE 1 // use variance or ssd for fast rc
  25. #include "avcodec.h"
  26. #include "dsputil.h"
  27. #include "mpegvideo.h"
  28. #include "dnxhddata.h"
  29. typedef struct {
  30. uint16_t mb;
  31. int value;
  32. } RCCMPEntry;
  33. typedef struct {
  34. int ssd;
  35. int bits;
  36. } RCEntry;
  37. int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
  38. typedef struct DNXHDEncContext {
  39. MpegEncContext m; ///< Used for quantization dsp functions
  40. AVFrame frame;
  41. int cid;
  42. const CIDEntry *cid_table;
  43. uint8_t *msip; ///< Macroblock Scan Indexes Payload
  44. uint32_t *slice_size;
  45. struct DNXHDEncContext *thread[MAX_THREADS];
  46. unsigned dct_y_offset;
  47. unsigned dct_uv_offset;
  48. int interlaced;
  49. int cur_field;
  50. DECLARE_ALIGNED_16(DCTELEM, blocks[8][64]);
  51. int (*qmatrix_c) [64];
  52. int (*qmatrix_l) [64];
  53. uint16_t (*qmatrix_l16)[2][64];
  54. uint16_t (*qmatrix_c16)[2][64];
  55. unsigned frame_bits;
  56. uint8_t *src[3];
  57. uint32_t *vlc_codes;
  58. uint8_t *vlc_bits;
  59. uint16_t *run_codes;
  60. uint8_t *run_bits;
  61. /** Rate control */
  62. unsigned slice_bits;
  63. unsigned qscale;
  64. unsigned lambda;
  65. unsigned thread_size;
  66. uint16_t *mb_bits;
  67. uint8_t *mb_qscale;
  68. RCCMPEntry *mb_cmp;
  69. RCEntry (*mb_rc)[8160];
  70. } DNXHDEncContext;
  71. #define LAMBDA_FRAC_BITS 10
  72. static int dnxhd_init_vlc(DNXHDEncContext *ctx)
  73. {
  74. int i, j, level, run;
  75. int max_level = 1<<(ctx->cid_table->bit_depth+2);
  76. CHECKED_ALLOCZ(ctx->vlc_codes, max_level*4*sizeof(*ctx->vlc_codes));
  77. CHECKED_ALLOCZ(ctx->vlc_bits, max_level*4*sizeof(*ctx->vlc_bits));
  78. CHECKED_ALLOCZ(ctx->run_codes, 63*2);
  79. CHECKED_ALLOCZ(ctx->run_bits, 63);
  80. ctx->vlc_codes += max_level*2;
  81. ctx->vlc_bits += max_level*2;
  82. for (level = -max_level; level < max_level; level++) {
  83. for (run = 0; run < 2; run++) {
  84. int index = (level<<1)|run;
  85. int sign, offset = 0, alevel = level;
  86. MASK_ABS(sign, alevel);
  87. if (alevel > 64) {
  88. offset = (alevel-1)>>6;
  89. alevel -= offset<<6;
  90. }
  91. for (j = 0; j < 257; j++) {
  92. if (ctx->cid_table->ac_level[j] == alevel &&
  93. (!offset || (ctx->cid_table->ac_index_flag[j] && offset)) &&
  94. (!run || (ctx->cid_table->ac_run_flag [j] && run))) {
  95. assert(!ctx->vlc_codes[index]);
  96. if (alevel) {
  97. ctx->vlc_codes[index] = (ctx->cid_table->ac_codes[j]<<1)|(sign&1);
  98. ctx->vlc_bits [index] = ctx->cid_table->ac_bits[j]+1;
  99. } else {
  100. ctx->vlc_codes[index] = ctx->cid_table->ac_codes[j];
  101. ctx->vlc_bits [index] = ctx->cid_table->ac_bits [j];
  102. }
  103. break;
  104. }
  105. }
  106. assert(!alevel || j < 257);
  107. if (offset) {
  108. ctx->vlc_codes[index] = (ctx->vlc_codes[index]<<ctx->cid_table->index_bits)|offset;
  109. ctx->vlc_bits [index]+= ctx->cid_table->index_bits;
  110. }
  111. }
  112. }
  113. for (i = 0; i < 62; i++) {
  114. int run = ctx->cid_table->run[i];
  115. assert(run < 63);
  116. ctx->run_codes[run] = ctx->cid_table->run_codes[i];
  117. ctx->run_bits [run] = ctx->cid_table->run_bits[i];
  118. }
  119. return 0;
  120. fail:
  121. return -1;
  122. }
  123. static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
  124. {
  125. // init first elem to 1 to avoid div by 0 in convert_matrix
  126. uint16_t weight_matrix[64] = {1,}; // convert_matrix needs uint16_t*
  127. int qscale, i;
  128. CHECKED_ALLOCZ(ctx->qmatrix_l, (ctx->m.avctx->qmax+1) * 64 * sizeof(int));
  129. CHECKED_ALLOCZ(ctx->qmatrix_c, (ctx->m.avctx->qmax+1) * 64 * sizeof(int));
  130. CHECKED_ALLOCZ(ctx->qmatrix_l16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t));
  131. CHECKED_ALLOCZ(ctx->qmatrix_c16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t));
  132. for (i = 1; i < 64; i++) {
  133. int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
  134. weight_matrix[j] = ctx->cid_table->luma_weight[i];
  135. }
  136. ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_l, ctx->qmatrix_l16, weight_matrix,
  137. ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
  138. for (i = 1; i < 64; i++) {
  139. int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
  140. weight_matrix[j] = ctx->cid_table->chroma_weight[i];
  141. }
  142. ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_c, ctx->qmatrix_c16, weight_matrix,
  143. ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
  144. for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) {
  145. for (i = 0; i < 64; i++) {
  146. ctx->qmatrix_l [qscale] [i] <<= 2; ctx->qmatrix_c [qscale] [i] <<= 2;
  147. ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][1][i] <<= 2;
  148. ctx->qmatrix_c16[qscale][0][i] <<= 2; ctx->qmatrix_c16[qscale][1][i] <<= 2;
  149. }
  150. }
  151. return 0;
  152. fail:
  153. return -1;
  154. }
  155. static int dnxhd_init_rc(DNXHDEncContext *ctx)
  156. {
  157. CHECKED_ALLOCZ(ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry));
  158. if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
  159. CHECKED_ALLOCZ(ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry));
  160. ctx->frame_bits = (ctx->cid_table->coding_unit_size - 640 - 4) * 8;
  161. ctx->qscale = 1;
  162. ctx->lambda = 2<<LAMBDA_FRAC_BITS; // qscale 2
  163. return 0;
  164. fail:
  165. return -1;
  166. }
  167. static int dnxhd_encode_init(AVCodecContext *avctx)
  168. {
  169. DNXHDEncContext *ctx = avctx->priv_data;
  170. int i, index;
  171. ctx->cid = ff_dnxhd_find_cid(avctx);
  172. if (!ctx->cid || avctx->pix_fmt != PIX_FMT_YUV422P) {
  173. av_log(avctx, AV_LOG_ERROR, "video parameters incompatible with DNxHD\n");
  174. return -1;
  175. }
  176. av_log(avctx, AV_LOG_DEBUG, "cid %d\n", ctx->cid);
  177. index = ff_dnxhd_get_cid_table(ctx->cid);
  178. ctx->cid_table = &ff_dnxhd_cid_table[index];
  179. ctx->m.avctx = avctx;
  180. ctx->m.mb_intra = 1;
  181. ctx->m.h263_aic = 1;
  182. dsputil_init(&ctx->m.dsp, avctx);
  183. ff_dct_common_init(&ctx->m);
  184. if (!ctx->m.dct_quantize)
  185. ctx->m.dct_quantize = dct_quantize_c;
  186. ctx->m.mb_height = (avctx->height + 15) / 16;
  187. ctx->m.mb_width = (avctx->width + 15) / 16;
  188. if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
  189. ctx->interlaced = 1;
  190. ctx->m.mb_height /= 2;
  191. }
  192. ctx->m.mb_num = ctx->m.mb_height * ctx->m.mb_width;
  193. if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
  194. ctx->m.intra_quant_bias = avctx->intra_quant_bias;
  195. if (dnxhd_init_qmat(ctx, ctx->m.intra_quant_bias, 0) < 0) // XXX tune lbias/cbias
  196. return -1;
  197. if (dnxhd_init_vlc(ctx) < 0)
  198. return -1;
  199. if (dnxhd_init_rc(ctx) < 0)
  200. return -1;
  201. CHECKED_ALLOCZ(ctx->slice_size, ctx->m.mb_height*sizeof(uint32_t));
  202. CHECKED_ALLOCZ(ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t));
  203. CHECKED_ALLOCZ(ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t));
  204. ctx->frame.key_frame = 1;
  205. ctx->frame.pict_type = FF_I_TYPE;
  206. ctx->m.avctx->coded_frame = &ctx->frame;
  207. if (avctx->thread_count > MAX_THREADS || (avctx->thread_count > ctx->m.mb_height)) {
  208. av_log(avctx, AV_LOG_ERROR, "too many threads\n");
  209. return -1;
  210. }
  211. ctx->thread[0] = ctx;
  212. for (i = 1; i < avctx->thread_count; i++) {
  213. ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext));
  214. memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext));
  215. }
  216. for (i = 0; i < avctx->thread_count; i++) {
  217. ctx->thread[i]->m.start_mb_y = (ctx->m.mb_height*(i ) + avctx->thread_count/2) / avctx->thread_count;
  218. ctx->thread[i]->m.end_mb_y = (ctx->m.mb_height*(i+1) + avctx->thread_count/2) / avctx->thread_count;
  219. }
  220. return 0;
  221. fail: //for CHECKED_ALLOCZ
  222. return -1;
  223. }
  224. static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
  225. {
  226. DNXHDEncContext *ctx = avctx->priv_data;
  227. const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 };
  228. memcpy(buf, header_prefix, 5);
  229. buf[5] = ctx->interlaced ? ctx->cur_field+2 : 0x01;
  230. buf[6] = 0x80; // crc flag off
  231. buf[7] = 0xa0; // reserved
  232. AV_WB16(buf + 0x18, avctx->height); // ALPF
  233. AV_WB16(buf + 0x1a, avctx->width); // SPL
  234. AV_WB16(buf + 0x1d, avctx->height); // NAL
  235. buf[0x21] = 0x38; // FIXME 8 bit per comp
  236. buf[0x22] = 0x88 + (ctx->frame.interlaced_frame<<2);
  237. AV_WB32(buf + 0x28, ctx->cid); // CID
  238. buf[0x2c] = ctx->interlaced ? 0 : 0x80;
  239. buf[0x5f] = 0x01; // UDL
  240. buf[0x167] = 0x02; // reserved
  241. AV_WB16(buf + 0x16a, ctx->m.mb_height * 4 + 4); // MSIPS
  242. buf[0x16d] = ctx->m.mb_height; // Ns
  243. buf[0x16f] = 0x10; // reserved
  244. ctx->msip = buf + 0x170;
  245. return 0;
  246. }
  247. static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
  248. {
  249. int nbits;
  250. if (diff < 0) {
  251. nbits = av_log2_16bit(-2*diff);
  252. diff--;
  253. } else {
  254. nbits = av_log2_16bit(2*diff);
  255. }
  256. put_bits(&ctx->m.pb, ctx->cid_table->dc_bits[nbits] + nbits,
  257. (ctx->cid_table->dc_codes[nbits]<<nbits) + (diff & ((1 << nbits) - 1)));
  258. }
  259. static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, DCTELEM *block, int last_index, int n)
  260. {
  261. int last_non_zero = 0;
  262. int slevel, i, j;
  263. dnxhd_encode_dc(ctx, block[0] - ctx->m.last_dc[n]);
  264. ctx->m.last_dc[n] = block[0];
  265. for (i = 1; i <= last_index; i++) {
  266. j = ctx->m.intra_scantable.permutated[i];
  267. slevel = block[j];
  268. if (slevel) {
  269. int run_level = i - last_non_zero - 1;
  270. int rlevel = (slevel<<1)|!!run_level;
  271. put_bits(&ctx->m.pb, ctx->vlc_bits[rlevel], ctx->vlc_codes[rlevel]);
  272. if (run_level)
  273. put_bits(&ctx->m.pb, ctx->run_bits[run_level], ctx->run_codes[run_level]);
  274. last_non_zero = i;
  275. }
  276. }
  277. put_bits(&ctx->m.pb, ctx->vlc_bits[0], ctx->vlc_codes[0]); // EOB
  278. }
  279. static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, DCTELEM *block, int n, int qscale, int last_index)
  280. {
  281. const uint8_t *weight_matrix;
  282. int level;
  283. int i;
  284. weight_matrix = (n&2) ? ctx->cid_table->chroma_weight : ctx->cid_table->luma_weight;
  285. for (i = 1; i <= last_index; i++) {
  286. int j = ctx->m.intra_scantable.permutated[i];
  287. level = block[j];
  288. if (level) {
  289. if (level < 0) {
  290. level = (1-2*level) * qscale * weight_matrix[i];
  291. if (weight_matrix[i] != 32)
  292. level += 32;
  293. level >>= 6;
  294. level = -level;
  295. } else {
  296. level = (2*level+1) * qscale * weight_matrix[i];
  297. if (weight_matrix[i] != 32)
  298. level += 32;
  299. level >>= 6;
  300. }
  301. block[j] = level;
  302. }
  303. }
  304. }
  305. static av_always_inline int dnxhd_ssd_block(DCTELEM *qblock, DCTELEM *block)
  306. {
  307. int score = 0;
  308. int i;
  309. for (i = 0; i < 64; i++)
  310. score += (block[i]-qblock[i])*(block[i]-qblock[i]);
  311. return score;
  312. }
  313. static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, DCTELEM *block, int last_index)
  314. {
  315. int last_non_zero = 0;
  316. int bits = 0;
  317. int i, j, level;
  318. for (i = 1; i <= last_index; i++) {
  319. j = ctx->m.intra_scantable.permutated[i];
  320. level = block[j];
  321. if (level) {
  322. int run_level = i - last_non_zero - 1;
  323. bits += ctx->vlc_bits[(level<<1)|!!run_level]+ctx->run_bits[run_level];
  324. last_non_zero = i;
  325. }
  326. }
  327. return bits;
  328. }
  329. static av_always_inline void dnxhd_get_pixels_4x8(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
  330. {
  331. int i;
  332. for (i = 0; i < 4; i++) {
  333. block[0] = pixels[0];
  334. block[1] = pixels[1];
  335. block[2] = pixels[2];
  336. block[3] = pixels[3];
  337. block[4] = pixels[4];
  338. block[5] = pixels[5];
  339. block[6] = pixels[6];
  340. block[7] = pixels[7];
  341. pixels += line_size;
  342. block += 8;
  343. }
  344. memcpy(block , block- 8, sizeof(*block)*8);
  345. memcpy(block+ 8, block-16, sizeof(*block)*8);
  346. memcpy(block+16, block-24, sizeof(*block)*8);
  347. memcpy(block+24, block-32, sizeof(*block)*8);
  348. }
  349. static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
  350. {
  351. const uint8_t *ptr_y = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.linesize) + (mb_x << 4);
  352. const uint8_t *ptr_u = ctx->thread[0]->src[1] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << 3);
  353. const uint8_t *ptr_v = ctx->thread[0]->src[2] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << 3);
  354. DSPContext *dsp = &ctx->m.dsp;
  355. dsp->get_pixels(ctx->blocks[0], ptr_y , ctx->m.linesize);
  356. dsp->get_pixels(ctx->blocks[1], ptr_y + 8, ctx->m.linesize);
  357. dsp->get_pixels(ctx->blocks[2], ptr_u , ctx->m.uvlinesize);
  358. dsp->get_pixels(ctx->blocks[3], ptr_v , ctx->m.uvlinesize);
  359. if (mb_y+1 == ctx->m.mb_height && ctx->m.avctx->height == 1080) {
  360. if (ctx->interlaced) {
  361. dnxhd_get_pixels_4x8(ctx->blocks[4], ptr_y + ctx->dct_y_offset , ctx->m.linesize);
  362. dnxhd_get_pixels_4x8(ctx->blocks[5], ptr_y + ctx->dct_y_offset + 8, ctx->m.linesize);
  363. dnxhd_get_pixels_4x8(ctx->blocks[6], ptr_u + ctx->dct_uv_offset , ctx->m.uvlinesize);
  364. dnxhd_get_pixels_4x8(ctx->blocks[7], ptr_v + ctx->dct_uv_offset , ctx->m.uvlinesize);
  365. } else
  366. memset(ctx->blocks[4], 0, 4*64*sizeof(DCTELEM));
  367. } else {
  368. dsp->get_pixels(ctx->blocks[4], ptr_y + ctx->dct_y_offset , ctx->m.linesize);
  369. dsp->get_pixels(ctx->blocks[5], ptr_y + ctx->dct_y_offset + 8, ctx->m.linesize);
  370. dsp->get_pixels(ctx->blocks[6], ptr_u + ctx->dct_uv_offset , ctx->m.uvlinesize);
  371. dsp->get_pixels(ctx->blocks[7], ptr_v + ctx->dct_uv_offset , ctx->m.uvlinesize);
  372. }
  373. }
  374. static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
  375. {
  376. if (i&2) {
  377. ctx->m.q_intra_matrix16 = ctx->qmatrix_c16;
  378. ctx->m.q_intra_matrix = ctx->qmatrix_c;
  379. return 1 + (i&1);
  380. } else {
  381. ctx->m.q_intra_matrix16 = ctx->qmatrix_l16;
  382. ctx->m.q_intra_matrix = ctx->qmatrix_l;
  383. return 0;
  384. }
  385. }
  386. static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg)
  387. {
  388. DNXHDEncContext *ctx = arg;
  389. int mb_y, mb_x;
  390. int qscale = ctx->thread[0]->qscale;
  391. for (mb_y = ctx->m.start_mb_y; mb_y < ctx->m.end_mb_y; mb_y++) {
  392. ctx->m.last_dc[0] =
  393. ctx->m.last_dc[1] =
  394. ctx->m.last_dc[2] = 1024;
  395. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  396. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  397. int ssd = 0;
  398. int ac_bits = 0;
  399. int dc_bits = 0;
  400. int i;
  401. dnxhd_get_blocks(ctx, mb_x, mb_y);
  402. for (i = 0; i < 8; i++) {
  403. DECLARE_ALIGNED_16(DCTELEM, block[64]);
  404. DCTELEM *src_block = ctx->blocks[i];
  405. int overflow, nbits, diff, last_index;
  406. int n = dnxhd_switch_matrix(ctx, i);
  407. memcpy(block, src_block, sizeof(block));
  408. last_index = ctx->m.dct_quantize((MpegEncContext*)ctx, block, i, qscale, &overflow);
  409. ac_bits += dnxhd_calc_ac_bits(ctx, block, last_index);
  410. diff = block[0] - ctx->m.last_dc[n];
  411. if (diff < 0) nbits = av_log2_16bit(-2*diff);
  412. else nbits = av_log2_16bit( 2*diff);
  413. dc_bits += ctx->cid_table->dc_bits[nbits] + nbits;
  414. ctx->m.last_dc[n] = block[0];
  415. if (avctx->mb_decision == FF_MB_DECISION_RD || !RC_VARIANCE) {
  416. dnxhd_unquantize_c(ctx, block, i, qscale, last_index);
  417. ctx->m.dsp.idct(block);
  418. ssd += dnxhd_ssd_block(block, src_block);
  419. }
  420. }
  421. ctx->mb_rc[qscale][mb].ssd = ssd;
  422. ctx->mb_rc[qscale][mb].bits = ac_bits+dc_bits+12+8*ctx->vlc_bits[0];
  423. }
  424. }
  425. return 0;
  426. }
  427. static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg)
  428. {
  429. DNXHDEncContext *ctx = arg;
  430. int mb_y, mb_x;
  431. for (mb_y = ctx->m.start_mb_y; mb_y < ctx->m.end_mb_y; mb_y++) {
  432. ctx->m.last_dc[0] =
  433. ctx->m.last_dc[1] =
  434. ctx->m.last_dc[2] = 1024;
  435. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  436. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  437. int qscale = ctx->mb_qscale[mb];
  438. int i;
  439. put_bits(&ctx->m.pb, 12, qscale<<1);
  440. dnxhd_get_blocks(ctx, mb_x, mb_y);
  441. for (i = 0; i < 8; i++) {
  442. DCTELEM *block = ctx->blocks[i];
  443. int last_index, overflow;
  444. int n = dnxhd_switch_matrix(ctx, i);
  445. last_index = ctx->m.dct_quantize((MpegEncContext*)ctx, block, i, qscale, &overflow);
  446. //START_TIMER;
  447. dnxhd_encode_block(ctx, block, last_index, n);
  448. //STOP_TIMER("encode_block");
  449. }
  450. }
  451. if (put_bits_count(&ctx->m.pb)&31)
  452. put_bits(&ctx->m.pb, 32-(put_bits_count(&ctx->m.pb)&31), 0);
  453. }
  454. flush_put_bits(&ctx->m.pb);
  455. return 0;
  456. }
  457. static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx, uint8_t *buf)
  458. {
  459. int mb_y, mb_x;
  460. int i, offset = 0;
  461. for (i = 0; i < ctx->m.avctx->thread_count; i++) {
  462. int thread_size = 0;
  463. for (mb_y = ctx->thread[i]->m.start_mb_y; mb_y < ctx->thread[i]->m.end_mb_y; mb_y++) {
  464. ctx->slice_size[mb_y] = 0;
  465. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  466. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  467. ctx->slice_size[mb_y] += ctx->mb_bits[mb];
  468. }
  469. ctx->slice_size[mb_y] = (ctx->slice_size[mb_y]+31)&~31;
  470. ctx->slice_size[mb_y] >>= 3;
  471. thread_size += ctx->slice_size[mb_y];
  472. }
  473. init_put_bits(&ctx->thread[i]->m.pb, buf + 640 + offset, thread_size);
  474. offset += thread_size;
  475. }
  476. }
  477. static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg)
  478. {
  479. DNXHDEncContext *ctx = arg;
  480. int mb_y, mb_x;
  481. for (mb_y = ctx->m.start_mb_y; mb_y < ctx->m.end_mb_y; mb_y++) {
  482. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  483. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  484. uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize) + (mb_x<<4);
  485. int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
  486. int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)(sum*sum))>>8)+128)>>8;
  487. ctx->mb_cmp[mb].value = varc;
  488. ctx->mb_cmp[mb].mb = mb;
  489. }
  490. }
  491. return 0;
  492. }
  493. static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
  494. {
  495. int lambda, up_step, down_step;
  496. int last_lower = INT_MAX, last_higher = 0;
  497. int x, y, q;
  498. for (q = 1; q < avctx->qmax; q++) {
  499. ctx->qscale = q;
  500. avctx->execute(avctx, dnxhd_calc_bits_thread, (void**)&ctx->thread[0], NULL, avctx->thread_count);
  501. }
  502. up_step = down_step = 2<<LAMBDA_FRAC_BITS;
  503. lambda = ctx->lambda;
  504. for (;;) {
  505. int bits = 0;
  506. int end = 0;
  507. if (lambda == last_higher) {
  508. lambda++;
  509. end = 1; // need to set final qscales/bits
  510. }
  511. for (y = 0; y < ctx->m.mb_height; y++) {
  512. for (x = 0; x < ctx->m.mb_width; x++) {
  513. unsigned min = UINT_MAX;
  514. int qscale = 1;
  515. int mb = y*ctx->m.mb_width+x;
  516. for (q = 1; q < avctx->qmax; q++) {
  517. unsigned score = ctx->mb_rc[q][mb].bits*lambda+(ctx->mb_rc[q][mb].ssd<<LAMBDA_FRAC_BITS);
  518. if (score < min) {
  519. min = score;
  520. qscale = q;
  521. }
  522. }
  523. bits += ctx->mb_rc[qscale][mb].bits;
  524. ctx->mb_qscale[mb] = qscale;
  525. ctx->mb_bits[mb] = ctx->mb_rc[qscale][mb].bits;
  526. }
  527. bits = (bits+31)&~31; // padding
  528. if (bits > ctx->frame_bits)
  529. break;
  530. }
  531. //dprintf(ctx->m.avctx, "lambda %d, up %u, down %u, bits %d, frame %d\n",
  532. // lambda, last_higher, last_lower, bits, ctx->frame_bits);
  533. if (end) {
  534. if (bits > ctx->frame_bits)
  535. return -1;
  536. break;
  537. }
  538. if (bits < ctx->frame_bits) {
  539. last_lower = FFMIN(lambda, last_lower);
  540. if (last_higher != 0)
  541. lambda = (lambda+last_higher)>>1;
  542. else
  543. lambda -= down_step;
  544. down_step *= 5; // XXX tune ?
  545. up_step = 1<<LAMBDA_FRAC_BITS;
  546. lambda = FFMAX(1, lambda);
  547. if (lambda == last_lower)
  548. break;
  549. } else {
  550. last_higher = FFMAX(lambda, last_higher);
  551. if (last_lower != INT_MAX)
  552. lambda = (lambda+last_lower)>>1;
  553. else
  554. lambda += up_step;
  555. up_step *= 5;
  556. down_step = 1<<LAMBDA_FRAC_BITS;
  557. }
  558. }
  559. //dprintf(ctx->m.avctx, "out lambda %d\n", lambda);
  560. ctx->lambda = lambda;
  561. return 0;
  562. }
  563. static int dnxhd_find_qscale(DNXHDEncContext *ctx)
  564. {
  565. int bits = 0;
  566. int up_step = 1;
  567. int down_step = 1;
  568. int last_higher = 0;
  569. int last_lower = INT_MAX;
  570. int qscale;
  571. int x, y;
  572. qscale = ctx->qscale;
  573. for (;;) {
  574. bits = 0;
  575. ctx->qscale = qscale;
  576. // XXX avoid recalculating bits
  577. ctx->m.avctx->execute(ctx->m.avctx, dnxhd_calc_bits_thread, (void**)&ctx->thread[0], NULL, ctx->m.avctx->thread_count);
  578. for (y = 0; y < ctx->m.mb_height; y++) {
  579. for (x = 0; x < ctx->m.mb_width; x++)
  580. bits += ctx->mb_rc[qscale][y*ctx->m.mb_width+x].bits;
  581. bits = (bits+31)&~31; // padding
  582. if (bits > ctx->frame_bits)
  583. break;
  584. }
  585. //dprintf(ctx->m.avctx, "%d, qscale %d, bits %d, frame %d, higher %d, lower %d\n",
  586. // ctx->m.avctx->frame_number, qscale, bits, ctx->frame_bits, last_higher, last_lower);
  587. if (bits < ctx->frame_bits) {
  588. if (qscale == 1)
  589. return 1;
  590. if (last_higher == qscale - 1) {
  591. qscale = last_higher;
  592. break;
  593. }
  594. last_lower = FFMIN(qscale, last_lower);
  595. if (last_higher != 0)
  596. qscale = (qscale+last_higher)>>1;
  597. else
  598. qscale -= down_step++;
  599. if (qscale < 1)
  600. qscale = 1;
  601. up_step = 1;
  602. } else {
  603. if (last_lower == qscale + 1)
  604. break;
  605. last_higher = FFMAX(qscale, last_higher);
  606. if (last_lower != INT_MAX)
  607. qscale = (qscale+last_lower)>>1;
  608. else
  609. qscale += up_step++;
  610. down_step = 1;
  611. if (qscale >= ctx->m.avctx->qmax)
  612. return -1;
  613. }
  614. }
  615. //dprintf(ctx->m.avctx, "out qscale %d\n", qscale);
  616. ctx->qscale = qscale;
  617. return 0;
  618. }
  619. static int dnxhd_rc_cmp(const void *a, const void *b)
  620. {
  621. return ((const RCCMPEntry *)b)->value - ((const RCCMPEntry *)a)->value;
  622. }
  623. static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
  624. {
  625. int max_bits = 0;
  626. int ret, x, y;
  627. if ((ret = dnxhd_find_qscale(ctx)) < 0)
  628. return -1;
  629. for (y = 0; y < ctx->m.mb_height; y++) {
  630. for (x = 0; x < ctx->m.mb_width; x++) {
  631. int mb = y*ctx->m.mb_width+x;
  632. int delta_bits;
  633. ctx->mb_qscale[mb] = ctx->qscale;
  634. ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale][mb].bits;
  635. max_bits += ctx->mb_rc[ctx->qscale][mb].bits;
  636. if (!RC_VARIANCE) {
  637. delta_bits = ctx->mb_rc[ctx->qscale][mb].bits-ctx->mb_rc[ctx->qscale+1][mb].bits;
  638. ctx->mb_cmp[mb].mb = mb;
  639. ctx->mb_cmp[mb].value = delta_bits ?
  640. ((ctx->mb_rc[ctx->qscale][mb].ssd-ctx->mb_rc[ctx->qscale+1][mb].ssd)*100)/delta_bits
  641. : INT_MIN; //avoid increasing qscale
  642. }
  643. }
  644. max_bits += 31; //worst padding
  645. }
  646. if (!ret) {
  647. if (RC_VARIANCE)
  648. avctx->execute(avctx, dnxhd_mb_var_thread, (void**)&ctx->thread[0], NULL, avctx->thread_count);
  649. qsort(ctx->mb_cmp, ctx->m.mb_num, sizeof(RCEntry), dnxhd_rc_cmp);
  650. for (x = 0; x < ctx->m.mb_num && max_bits > ctx->frame_bits; x++) {
  651. int mb = ctx->mb_cmp[x].mb;
  652. max_bits -= ctx->mb_rc[ctx->qscale][mb].bits - ctx->mb_rc[ctx->qscale+1][mb].bits;
  653. ctx->mb_qscale[mb] = ctx->qscale+1;
  654. ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale+1][mb].bits;
  655. }
  656. }
  657. return 0;
  658. }
  659. static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
  660. {
  661. int i;
  662. for (i = 0; i < 3; i++) {
  663. ctx->frame.data[i] = frame->data[i];
  664. ctx->frame.linesize[i] = frame->linesize[i];
  665. }
  666. for (i = 0; i < ctx->m.avctx->thread_count; i++) {
  667. ctx->thread[i]->m.linesize = ctx->frame.linesize[0]<<ctx->interlaced;
  668. ctx->thread[i]->m.uvlinesize = ctx->frame.linesize[1]<<ctx->interlaced;
  669. ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
  670. ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
  671. }
  672. ctx->frame.interlaced_frame = frame->interlaced_frame;
  673. ctx->cur_field = frame->interlaced_frame && !frame->top_field_first;
  674. }
  675. static int dnxhd_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, const void *data)
  676. {
  677. DNXHDEncContext *ctx = avctx->priv_data;
  678. int first_field = 1;
  679. int offset, i, ret;
  680. if (buf_size < ctx->cid_table->frame_size) {
  681. av_log(avctx, AV_LOG_ERROR, "output buffer is too small to compress picture\n");
  682. return -1;
  683. }
  684. dnxhd_load_picture(ctx, data);
  685. encode_coding_unit:
  686. for (i = 0; i < 3; i++) {
  687. ctx->src[i] = ctx->frame.data[i];
  688. if (ctx->interlaced && ctx->cur_field)
  689. ctx->src[i] += ctx->frame.linesize[i];
  690. }
  691. dnxhd_write_header(avctx, buf);
  692. if (avctx->mb_decision == FF_MB_DECISION_RD)
  693. ret = dnxhd_encode_rdo(avctx, ctx);
  694. else
  695. ret = dnxhd_encode_fast(avctx, ctx);
  696. if (ret < 0) {
  697. av_log(avctx, AV_LOG_ERROR, "picture could not fit ratecontrol constraints\n");
  698. return -1;
  699. }
  700. dnxhd_setup_threads_slices(ctx, buf);
  701. offset = 0;
  702. for (i = 0; i < ctx->m.mb_height; i++) {
  703. AV_WB32(ctx->msip + i * 4, offset);
  704. offset += ctx->slice_size[i];
  705. assert(!(ctx->slice_size[i] & 3));
  706. }
  707. avctx->execute(avctx, dnxhd_encode_thread, (void**)&ctx->thread[0], NULL, avctx->thread_count);
  708. AV_WB32(buf + ctx->cid_table->coding_unit_size - 4, 0x600DC0DE); // EOF
  709. if (ctx->interlaced && first_field) {
  710. first_field = 0;
  711. ctx->cur_field ^= 1;
  712. buf += ctx->cid_table->coding_unit_size;
  713. buf_size -= ctx->cid_table->coding_unit_size;
  714. goto encode_coding_unit;
  715. }
  716. ctx->frame.quality = ctx->qscale*FF_QP2LAMBDA;
  717. return ctx->cid_table->frame_size;
  718. }
  719. static int dnxhd_encode_end(AVCodecContext *avctx)
  720. {
  721. DNXHDEncContext *ctx = avctx->priv_data;
  722. int max_level = 1<<(ctx->cid_table->bit_depth+2);
  723. int i;
  724. av_free(ctx->vlc_codes-max_level*2);
  725. av_free(ctx->vlc_bits -max_level*2);
  726. av_freep(&ctx->run_codes);
  727. av_freep(&ctx->run_bits);
  728. av_freep(&ctx->mb_bits);
  729. av_freep(&ctx->mb_qscale);
  730. av_freep(&ctx->mb_rc);
  731. av_freep(&ctx->mb_cmp);
  732. av_freep(&ctx->slice_size);
  733. av_freep(&ctx->qmatrix_c);
  734. av_freep(&ctx->qmatrix_l);
  735. av_freep(&ctx->qmatrix_c16);
  736. av_freep(&ctx->qmatrix_l16);
  737. for (i = 1; i < avctx->thread_count; i++)
  738. av_freep(&ctx->thread[i]);
  739. return 0;
  740. }
  741. AVCodec dnxhd_encoder = {
  742. "dnxhd",
  743. CODEC_TYPE_VIDEO,
  744. CODEC_ID_DNXHD,
  745. sizeof(DNXHDEncContext),
  746. dnxhd_encode_init,
  747. dnxhd_encode_picture,
  748. dnxhd_encode_end,
  749. .pix_fmts = (enum PixelFormat[]){PIX_FMT_YUV422P, PIX_FMT_NONE},
  750. .long_name = NULL_IF_CONFIG_SMALL("VC3/DNxHD"),
  751. };