You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

867 lines
29KB

  1. /*
  2. * VC3/DNxHD encoder
  3. * Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
  4. *
  5. * VC-3 encoder funded by the British Broadcasting Corporation
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. //#define DEBUG
  24. #define RC_VARIANCE 1 // use variance or ssd for fast rc
  25. #include "avcodec.h"
  26. #include "dsputil.h"
  27. #include "mpegvideo.h"
  28. #include "dnxhddata.h"
  29. typedef struct {
  30. uint16_t mb;
  31. int value;
  32. } RCCMPEntry;
  33. typedef struct {
  34. int ssd;
  35. int bits;
  36. } RCEntry;
  37. int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
  38. typedef struct DNXHDEncContext {
  39. MpegEncContext m; ///< Used for quantization dsp functions
  40. AVFrame frame;
  41. int cid;
  42. const CIDEntry *cid_table;
  43. uint8_t *msip; ///< Macroblock Scan Indices Payload
  44. uint32_t *slice_size;
  45. struct DNXHDEncContext *thread[MAX_THREADS];
  46. unsigned dct_y_offset;
  47. unsigned dct_uv_offset;
  48. int interlaced;
  49. int cur_field;
  50. DECLARE_ALIGNED_16(DCTELEM, blocks[8][64]);
  51. int (*qmatrix_c) [64];
  52. int (*qmatrix_l) [64];
  53. uint16_t (*qmatrix_l16)[2][64];
  54. uint16_t (*qmatrix_c16)[2][64];
  55. unsigned frame_bits;
  56. uint8_t *src[3];
  57. uint16_t *table_vlc_codes;
  58. uint8_t *table_vlc_bits;
  59. uint16_t *table_run_codes;
  60. uint8_t *table_run_bits;
  61. /** Rate control */
  62. unsigned slice_bits;
  63. unsigned qscale;
  64. unsigned lambda;
  65. unsigned thread_size;
  66. uint16_t *mb_bits;
  67. uint8_t *mb_qscale;
  68. RCCMPEntry *mb_cmp;
  69. RCEntry (*mb_rc)[8160];
  70. } DNXHDEncContext;
  71. #define LAMBDA_FRAC_BITS 10
  72. static int dnxhd_init_vlc(DNXHDEncContext *ctx)
  73. {
  74. int i;
  75. CHECKED_ALLOCZ(ctx->table_vlc_codes, 449*2);
  76. CHECKED_ALLOCZ(ctx->table_vlc_bits, 449);
  77. CHECKED_ALLOCZ(ctx->table_run_codes, 63*2);
  78. CHECKED_ALLOCZ(ctx->table_run_bits, 63);
  79. for (i = 0; i < 257; i++) {
  80. int level = ctx->cid_table->ac_level[i] +
  81. (ctx->cid_table->ac_run_flag[i] << 7) + (ctx->cid_table->ac_index_flag[i] << 8);
  82. assert(level < 449);
  83. if (ctx->cid_table->ac_level[i] == 64 && ctx->cid_table->ac_index_flag[i])
  84. level -= 64; // use 0+(1<<8) level
  85. ctx->table_vlc_codes[level] = ctx->cid_table->ac_codes[i];
  86. ctx->table_vlc_bits [level] = ctx->cid_table->ac_bits[i];
  87. }
  88. for (i = 0; i < 62; i++) {
  89. int run = ctx->cid_table->run[i];
  90. assert(run < 63);
  91. ctx->table_run_codes[run] = ctx->cid_table->run_codes[i];
  92. ctx->table_run_bits [run] = ctx->cid_table->run_bits[i];
  93. }
  94. return 0;
  95. fail:
  96. return -1;
  97. }
  98. static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
  99. {
  100. // init first elem to 1 to avoid div by 0 in convert_matrix
  101. uint16_t weight_matrix[64] = {1,}; // convert_matrix needs uint16_t*
  102. int qscale, i;
  103. CHECKED_ALLOCZ(ctx->qmatrix_l, (ctx->m.avctx->qmax+1) * 64 * sizeof(int));
  104. CHECKED_ALLOCZ(ctx->qmatrix_c, (ctx->m.avctx->qmax+1) * 64 * sizeof(int));
  105. CHECKED_ALLOCZ(ctx->qmatrix_l16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t));
  106. CHECKED_ALLOCZ(ctx->qmatrix_c16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t));
  107. for (i = 1; i < 64; i++) {
  108. int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
  109. weight_matrix[j] = ctx->cid_table->luma_weight[i];
  110. }
  111. ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_l, ctx->qmatrix_l16, weight_matrix,
  112. ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
  113. for (i = 1; i < 64; i++) {
  114. int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
  115. weight_matrix[j] = ctx->cid_table->chroma_weight[i];
  116. }
  117. ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_c, ctx->qmatrix_c16, weight_matrix,
  118. ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
  119. for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) {
  120. for (i = 0; i < 64; i++) {
  121. ctx->qmatrix_l [qscale] [i] <<= 2; ctx->qmatrix_c [qscale] [i] <<= 2;
  122. ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][1][i] <<= 2;
  123. ctx->qmatrix_c16[qscale][0][i] <<= 2; ctx->qmatrix_c16[qscale][1][i] <<= 2;
  124. }
  125. }
  126. return 0;
  127. fail:
  128. return -1;
  129. }
  130. static int dnxhd_init_rc(DNXHDEncContext *ctx)
  131. {
  132. CHECKED_ALLOCZ(ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry));
  133. if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
  134. CHECKED_ALLOCZ(ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry));
  135. ctx->frame_bits = (ctx->cid_table->coding_unit_size - 640 - 4) * 8;
  136. ctx->qscale = 1;
  137. ctx->lambda = 2<<LAMBDA_FRAC_BITS; // qscale 2
  138. return 0;
  139. fail:
  140. return -1;
  141. }
  142. static int dnxhd_encode_init(AVCodecContext *avctx)
  143. {
  144. DNXHDEncContext *ctx = avctx->priv_data;
  145. int i, index;
  146. if (avctx->width == 1920 && avctx->height == 1080) {
  147. if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
  148. if (avctx->bit_rate == 120000000)
  149. ctx->cid = 1242;
  150. else if (avctx->bit_rate == 185000000)
  151. ctx->cid = 1243;
  152. } else {
  153. if (avctx->bit_rate == 120000000)
  154. ctx->cid = 1237;
  155. else if (avctx->bit_rate == 185000000)
  156. ctx->cid = 1238;
  157. else if (avctx->bit_rate == 36000000)
  158. ctx->cid = 1253;
  159. }
  160. } else if (avctx->width == 1280 && avctx->height == 720 &&
  161. !(avctx->flags & CODEC_FLAG_INTERLACED_DCT)) {
  162. if (avctx->bit_rate == 90000000)
  163. ctx->cid = 1251;
  164. else if (avctx->bit_rate == 60000000)
  165. ctx->cid = 1252;
  166. }
  167. if (!ctx->cid || avctx->pix_fmt != PIX_FMT_YUV422P) {
  168. av_log(avctx, AV_LOG_ERROR, "video parameters incompatible with DNxHD\n");
  169. return -1;
  170. }
  171. index = ff_dnxhd_get_cid_table(ctx->cid);
  172. ctx->cid_table = &ff_dnxhd_cid_table[index];
  173. ctx->m.avctx = avctx;
  174. ctx->m.mb_intra = 1;
  175. ctx->m.h263_aic = 1;
  176. dsputil_init(&ctx->m.dsp, avctx);
  177. ff_dct_common_init(&ctx->m);
  178. if (!ctx->m.dct_quantize)
  179. ctx->m.dct_quantize = dct_quantize_c;
  180. ctx->m.mb_height = (avctx->height + 15) / 16;
  181. ctx->m.mb_width = (avctx->width + 15) / 16;
  182. if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
  183. ctx->interlaced = 1;
  184. ctx->m.mb_height /= 2;
  185. }
  186. ctx->m.mb_num = ctx->m.mb_height * ctx->m.mb_width;
  187. if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
  188. ctx->m.intra_quant_bias = avctx->intra_quant_bias;
  189. if (dnxhd_init_qmat(ctx, ctx->m.intra_quant_bias, 0) < 0) // XXX tune lbias/cbias
  190. return -1;
  191. if (dnxhd_init_vlc(ctx) < 0)
  192. return -1;
  193. if (dnxhd_init_rc(ctx) < 0)
  194. return -1;
  195. CHECKED_ALLOCZ(ctx->slice_size, ctx->m.mb_height*sizeof(uint32_t));
  196. CHECKED_ALLOCZ(ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t));
  197. CHECKED_ALLOCZ(ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t));
  198. ctx->frame.key_frame = 1;
  199. ctx->frame.pict_type = FF_I_TYPE;
  200. ctx->m.avctx->coded_frame = &ctx->frame;
  201. if (avctx->thread_count > MAX_THREADS || (avctx->thread_count > ctx->m.mb_height)) {
  202. av_log(avctx, AV_LOG_ERROR, "too many threads\n");
  203. return -1;
  204. }
  205. ctx->thread[0] = ctx;
  206. for (i = 1; i < avctx->thread_count; i++) {
  207. ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext));
  208. memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext));
  209. }
  210. for (i = 0; i < avctx->thread_count; i++) {
  211. ctx->thread[i]->m.start_mb_y = (ctx->m.mb_height*(i ) + avctx->thread_count/2) / avctx->thread_count;
  212. ctx->thread[i]->m.end_mb_y = (ctx->m.mb_height*(i+1) + avctx->thread_count/2) / avctx->thread_count;
  213. }
  214. return 0;
  215. fail: //for CHECKED_ALLOCZ
  216. return -1;
  217. }
  218. static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
  219. {
  220. DNXHDEncContext *ctx = avctx->priv_data;
  221. const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 };
  222. memcpy(buf, header_prefix, 5);
  223. buf[5] = ctx->interlaced ? ctx->cur_field+2 : 0x01;
  224. buf[6] = 0x80; // crc flag off
  225. buf[7] = 0xa0; // reserved
  226. AV_WB16(buf + 0x18, avctx->height); // ALPF
  227. AV_WB16(buf + 0x1a, avctx->width); // SPL
  228. AV_WB16(buf + 0x1d, avctx->height); // NAL
  229. buf[0x21] = 0x38; // FIXME 8 bit per comp
  230. buf[0x22] = 0x88 + (ctx->frame.interlaced_frame<<2);
  231. AV_WB32(buf + 0x28, ctx->cid); // CID
  232. buf[0x2c] = ctx->interlaced ? 0 : 0x80;
  233. buf[0x5f] = 0x01; // UDL
  234. buf[0x167] = 0x02; // reserved
  235. AV_WB16(buf + 0x16a, ctx->m.mb_height * 4 + 4); // MSIPS
  236. buf[0x16d] = ctx->m.mb_height; // Ns
  237. buf[0x16f] = 0x10; // reserved
  238. ctx->msip = buf + 0x170;
  239. return 0;
  240. }
  241. static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
  242. {
  243. int nbits;
  244. if (diff < 0) {
  245. nbits = av_log2_16bit(-2*diff);
  246. diff--;
  247. } else {
  248. nbits = av_log2_16bit(2*diff);
  249. }
  250. put_bits(&ctx->m.pb, ctx->cid_table->dc_bits[nbits] + nbits,
  251. (ctx->cid_table->dc_codes[nbits]<<nbits) + (diff & ((1 << nbits) - 1)));
  252. }
  253. static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, DCTELEM *block, int last_index, int n)
  254. {
  255. int last_non_zero = 0;
  256. int offset = 0;
  257. int slevel, i, j;
  258. dnxhd_encode_dc(ctx, block[0] - ctx->m.last_dc[n]);
  259. ctx->m.last_dc[n] = block[0];
  260. for (i = 1; i <= last_index; i++) {
  261. j = ctx->m.intra_scantable.permutated[i];
  262. slevel = block[j];
  263. if (slevel) {
  264. int run_level = i - last_non_zero - 1;
  265. int sign;
  266. MASK_ABS(sign, slevel);
  267. if (slevel > 64) {
  268. offset = (slevel-1) >> 6;
  269. slevel = 256 | (slevel & 63); // level 64 is treated as 0
  270. }
  271. if (run_level)
  272. slevel |= 128;
  273. put_bits(&ctx->m.pb, ctx->table_vlc_bits[slevel]+1, (ctx->table_vlc_codes[slevel]<<1)|(sign&1));
  274. if (offset) {
  275. put_bits(&ctx->m.pb, 4, offset);
  276. offset = 0;
  277. }
  278. if (run_level)
  279. put_bits(&ctx->m.pb, ctx->table_run_bits[run_level], ctx->table_run_codes[run_level]);
  280. last_non_zero = i;
  281. }
  282. }
  283. put_bits(&ctx->m.pb, ctx->table_vlc_bits[0], ctx->table_vlc_codes[0]); // EOB
  284. }
  285. static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, DCTELEM *block, int n, int qscale, int last_index)
  286. {
  287. const uint8_t *weight_matrix;
  288. int level;
  289. int i;
  290. weight_matrix = (n&2) ? ctx->cid_table->chroma_weight : ctx->cid_table->luma_weight;
  291. for (i = 1; i <= last_index; i++) {
  292. int j = ctx->m.intra_scantable.permutated[i];
  293. level = block[j];
  294. if (level) {
  295. if (level < 0) {
  296. level = (1-2*level) * qscale * weight_matrix[i];
  297. if (weight_matrix[i] != 32)
  298. level += 32;
  299. level >>= 6;
  300. level = -level;
  301. } else {
  302. level = (2*level+1) * qscale * weight_matrix[i];
  303. if (weight_matrix[i] != 32)
  304. level += 32;
  305. level >>= 6;
  306. }
  307. block[j] = level;
  308. }
  309. }
  310. }
  311. static av_always_inline int dnxhd_ssd_block(DCTELEM *qblock, DCTELEM *block)
  312. {
  313. int score = 0;
  314. int i;
  315. for (i = 0; i < 64; i++)
  316. score += (block[i]-qblock[i])*(block[i]-qblock[i]);
  317. return score;
  318. }
  319. static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, DCTELEM *block, int last_index)
  320. {
  321. int last_non_zero = 0;
  322. int bits = 0;
  323. int i, j, level;
  324. for (i = 1; i <= last_index; i++) {
  325. j = ctx->m.intra_scantable.permutated[i];
  326. level = block[j];
  327. if (level) {
  328. int run_level = i - last_non_zero - 1;
  329. level = FFABS(level);
  330. if (level > 64) {
  331. level = 256 | (level & 63); // level 64 is treated as 0
  332. bits += 4;
  333. }
  334. level |= (!!run_level)<<7;
  335. bits += ctx->table_vlc_bits[level]+1 + ctx->table_run_bits[run_level];
  336. last_non_zero = i;
  337. }
  338. }
  339. return bits;
  340. }
  341. static av_always_inline void dnxhd_get_pixels_4x8(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
  342. {
  343. int i;
  344. for (i = 0; i < 4; i++) {
  345. block[0] = pixels[0];
  346. block[1] = pixels[1];
  347. block[2] = pixels[2];
  348. block[3] = pixels[3];
  349. block[4] = pixels[4];
  350. block[5] = pixels[5];
  351. block[6] = pixels[6];
  352. block[7] = pixels[7];
  353. pixels += line_size;
  354. block += 8;
  355. }
  356. memcpy(block , block- 8, sizeof(*block)*8);
  357. memcpy(block+ 8, block-16, sizeof(*block)*8);
  358. memcpy(block+16, block-24, sizeof(*block)*8);
  359. memcpy(block+24, block-32, sizeof(*block)*8);
  360. }
  361. static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
  362. {
  363. const uint8_t *ptr_y = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.linesize) + (mb_x << 4);
  364. const uint8_t *ptr_u = ctx->thread[0]->src[1] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << 3);
  365. const uint8_t *ptr_v = ctx->thread[0]->src[2] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << 3);
  366. DSPContext *dsp = &ctx->m.dsp;
  367. dsp->get_pixels(ctx->blocks[0], ptr_y , ctx->m.linesize);
  368. dsp->get_pixels(ctx->blocks[1], ptr_y + 8, ctx->m.linesize);
  369. dsp->get_pixels(ctx->blocks[2], ptr_u , ctx->m.uvlinesize);
  370. dsp->get_pixels(ctx->blocks[3], ptr_v , ctx->m.uvlinesize);
  371. if (mb_y+1 == ctx->m.mb_height && ctx->m.avctx->height == 1080) {
  372. if (ctx->interlaced) {
  373. dnxhd_get_pixels_4x8(ctx->blocks[4], ptr_y + ctx->dct_y_offset , ctx->m.linesize);
  374. dnxhd_get_pixels_4x8(ctx->blocks[5], ptr_y + ctx->dct_y_offset + 8, ctx->m.linesize);
  375. dnxhd_get_pixels_4x8(ctx->blocks[6], ptr_u + ctx->dct_uv_offset , ctx->m.uvlinesize);
  376. dnxhd_get_pixels_4x8(ctx->blocks[7], ptr_v + ctx->dct_uv_offset , ctx->m.uvlinesize);
  377. } else
  378. memset(ctx->blocks[4], 0, 4*64*sizeof(DCTELEM));
  379. } else {
  380. dsp->get_pixels(ctx->blocks[4], ptr_y + ctx->dct_y_offset , ctx->m.linesize);
  381. dsp->get_pixels(ctx->blocks[5], ptr_y + ctx->dct_y_offset + 8, ctx->m.linesize);
  382. dsp->get_pixels(ctx->blocks[6], ptr_u + ctx->dct_uv_offset , ctx->m.uvlinesize);
  383. dsp->get_pixels(ctx->blocks[7], ptr_v + ctx->dct_uv_offset , ctx->m.uvlinesize);
  384. }
  385. }
  386. static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
  387. {
  388. if (i&2) {
  389. ctx->m.q_intra_matrix16 = ctx->qmatrix_c16;
  390. ctx->m.q_intra_matrix = ctx->qmatrix_c;
  391. return 1 + (i&1);
  392. } else {
  393. ctx->m.q_intra_matrix16 = ctx->qmatrix_l16;
  394. ctx->m.q_intra_matrix = ctx->qmatrix_l;
  395. return 0;
  396. }
  397. }
  398. static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg)
  399. {
  400. DNXHDEncContext *ctx = arg;
  401. int mb_y, mb_x;
  402. int qscale = ctx->thread[0]->qscale;
  403. for (mb_y = ctx->m.start_mb_y; mb_y < ctx->m.end_mb_y; mb_y++) {
  404. ctx->m.last_dc[0] =
  405. ctx->m.last_dc[1] =
  406. ctx->m.last_dc[2] = 1024;
  407. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  408. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  409. int ssd = 0;
  410. int ac_bits = 0;
  411. int dc_bits = 0;
  412. int i;
  413. dnxhd_get_blocks(ctx, mb_x, mb_y);
  414. for (i = 0; i < 8; i++) {
  415. DECLARE_ALIGNED_16(DCTELEM, block[64]);
  416. DCTELEM *src_block = ctx->blocks[i];
  417. int overflow, nbits, diff, last_index;
  418. int n = dnxhd_switch_matrix(ctx, i);
  419. memcpy(block, src_block, sizeof(block));
  420. last_index = ctx->m.dct_quantize((MpegEncContext*)ctx, block, i, qscale, &overflow);
  421. ac_bits += dnxhd_calc_ac_bits(ctx, block, last_index);
  422. diff = block[0] - ctx->m.last_dc[n];
  423. if (diff < 0) nbits = av_log2_16bit(-2*diff);
  424. else nbits = av_log2_16bit( 2*diff);
  425. dc_bits += ctx->cid_table->dc_bits[nbits] + nbits;
  426. ctx->m.last_dc[n] = block[0];
  427. if (avctx->mb_decision == FF_MB_DECISION_RD || !RC_VARIANCE) {
  428. dnxhd_unquantize_c(ctx, block, i, qscale, last_index);
  429. ctx->m.dsp.idct(block);
  430. ssd += dnxhd_ssd_block(block, src_block);
  431. }
  432. }
  433. ctx->mb_rc[qscale][mb].ssd = ssd;
  434. ctx->mb_rc[qscale][mb].bits = ac_bits+dc_bits+12+8*ctx->table_vlc_bits[0];
  435. }
  436. }
  437. return 0;
  438. }
  439. static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg)
  440. {
  441. DNXHDEncContext *ctx = arg;
  442. int mb_y, mb_x;
  443. for (mb_y = ctx->m.start_mb_y; mb_y < ctx->m.end_mb_y; mb_y++) {
  444. ctx->m.last_dc[0] =
  445. ctx->m.last_dc[1] =
  446. ctx->m.last_dc[2] = 1024;
  447. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  448. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  449. int qscale = ctx->mb_qscale[mb];
  450. int i;
  451. put_bits(&ctx->m.pb, 12, qscale<<1);
  452. dnxhd_get_blocks(ctx, mb_x, mb_y);
  453. for (i = 0; i < 8; i++) {
  454. DCTELEM *block = ctx->blocks[i];
  455. int last_index, overflow;
  456. int n = dnxhd_switch_matrix(ctx, i);
  457. last_index = ctx->m.dct_quantize((MpegEncContext*)ctx, block, i, qscale, &overflow);
  458. dnxhd_encode_block(ctx, block, last_index, n);
  459. }
  460. }
  461. if (put_bits_count(&ctx->m.pb)&31)
  462. put_bits(&ctx->m.pb, 32-(put_bits_count(&ctx->m.pb)&31), 0);
  463. }
  464. flush_put_bits(&ctx->m.pb);
  465. return 0;
  466. }
  467. static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx, uint8_t *buf)
  468. {
  469. int mb_y, mb_x;
  470. int i, offset = 0;
  471. for (i = 0; i < ctx->m.avctx->thread_count; i++) {
  472. int thread_size = 0;
  473. for (mb_y = ctx->thread[i]->m.start_mb_y; mb_y < ctx->thread[i]->m.end_mb_y; mb_y++) {
  474. ctx->slice_size[mb_y] = 0;
  475. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  476. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  477. ctx->slice_size[mb_y] += ctx->mb_bits[mb];
  478. }
  479. ctx->slice_size[mb_y] = (ctx->slice_size[mb_y]+31)&~31;
  480. ctx->slice_size[mb_y] >>= 3;
  481. thread_size += ctx->slice_size[mb_y];
  482. }
  483. init_put_bits(&ctx->thread[i]->m.pb, buf + 640 + offset, thread_size);
  484. offset += thread_size;
  485. }
  486. }
  487. static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg)
  488. {
  489. DNXHDEncContext *ctx = arg;
  490. int mb_y, mb_x;
  491. for (mb_y = ctx->m.start_mb_y; mb_y < ctx->m.end_mb_y; mb_y++) {
  492. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  493. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  494. uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize) + (mb_x<<4);
  495. int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
  496. int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)(sum*sum))>>8)+128)>>8;
  497. ctx->mb_cmp[mb].value = varc;
  498. ctx->mb_cmp[mb].mb = mb;
  499. }
  500. }
  501. return 0;
  502. }
  503. static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
  504. {
  505. int lambda, up_step, down_step;
  506. int last_lower = INT_MAX, last_higher = 0;
  507. int x, y, q;
  508. for (q = 1; q < avctx->qmax; q++) {
  509. ctx->qscale = q;
  510. avctx->execute(avctx, dnxhd_calc_bits_thread, (void**)&ctx->thread[0], NULL, avctx->thread_count);
  511. }
  512. up_step = down_step = 2<<LAMBDA_FRAC_BITS;
  513. lambda = ctx->lambda;
  514. for (;;) {
  515. int bits = 0;
  516. int end = 0;
  517. if (lambda == last_higher) {
  518. lambda++;
  519. end = 1; // need to set final qscales/bits
  520. }
  521. for (y = 0; y < ctx->m.mb_height; y++) {
  522. for (x = 0; x < ctx->m.mb_width; x++) {
  523. unsigned min = UINT_MAX;
  524. int qscale = 1;
  525. int mb = y*ctx->m.mb_width+x;
  526. for (q = 1; q < avctx->qmax; q++) {
  527. unsigned score = ctx->mb_rc[q][mb].bits*lambda+(ctx->mb_rc[q][mb].ssd<<LAMBDA_FRAC_BITS);
  528. if (score < min) {
  529. min = score;
  530. qscale = q;
  531. }
  532. }
  533. bits += ctx->mb_rc[qscale][mb].bits;
  534. ctx->mb_qscale[mb] = qscale;
  535. ctx->mb_bits[mb] = ctx->mb_rc[qscale][mb].bits;
  536. }
  537. bits = (bits+31)&~31; // padding
  538. if (bits > ctx->frame_bits)
  539. break;
  540. }
  541. //dprintf(ctx->m.avctx, "lambda %d, up %u, down %u, bits %d, frame %d\n",
  542. // lambda, last_higher, last_lower, bits, ctx->frame_bits);
  543. if (end) {
  544. if (bits > ctx->frame_bits)
  545. return -1;
  546. break;
  547. }
  548. if (bits < ctx->frame_bits) {
  549. last_lower = FFMIN(lambda, last_lower);
  550. if (last_higher != 0)
  551. lambda = (lambda+last_higher)>>1;
  552. else
  553. lambda -= down_step;
  554. down_step *= 5; // XXX tune ?
  555. up_step = 1<<LAMBDA_FRAC_BITS;
  556. lambda = FFMAX(1, lambda);
  557. if (lambda == last_lower)
  558. break;
  559. } else {
  560. last_higher = FFMAX(lambda, last_higher);
  561. if (last_lower != INT_MAX)
  562. lambda = (lambda+last_lower)>>1;
  563. else
  564. lambda += up_step;
  565. up_step *= 5;
  566. down_step = 1<<LAMBDA_FRAC_BITS;
  567. }
  568. }
  569. //dprintf(ctx->m.avctx, "out lambda %d\n", lambda);
  570. ctx->lambda = lambda;
  571. return 0;
  572. }
  573. static int dnxhd_find_qscale(DNXHDEncContext *ctx)
  574. {
  575. int bits = 0;
  576. int up_step = 1;
  577. int down_step = 1;
  578. int last_higher = 0;
  579. int last_lower = INT_MAX;
  580. int qscale;
  581. int x, y;
  582. qscale = ctx->qscale;
  583. for (;;) {
  584. bits = 0;
  585. ctx->qscale = qscale;
  586. // XXX avoid recalculating bits
  587. ctx->m.avctx->execute(ctx->m.avctx, dnxhd_calc_bits_thread, (void**)&ctx->thread[0], NULL, ctx->m.avctx->thread_count);
  588. for (y = 0; y < ctx->m.mb_height; y++) {
  589. for (x = 0; x < ctx->m.mb_width; x++)
  590. bits += ctx->mb_rc[qscale][y*ctx->m.mb_width+x].bits;
  591. bits = (bits+31)&~31; // padding
  592. if (bits > ctx->frame_bits)
  593. break;
  594. }
  595. //dprintf(ctx->m.avctx, "%d, qscale %d, bits %d, frame %d, higher %d, lower %d\n",
  596. // ctx->m.avctx->frame_number, qscale, bits, ctx->frame_bits, last_higher, last_lower);
  597. if (bits < ctx->frame_bits) {
  598. if (qscale == 1)
  599. return 1;
  600. if (last_higher == qscale - 1) {
  601. qscale = last_higher;
  602. break;
  603. }
  604. last_lower = FFMIN(qscale, last_lower);
  605. if (last_higher != 0)
  606. qscale = (qscale+last_higher)>>1;
  607. else
  608. qscale -= down_step++;
  609. if (qscale < 1)
  610. qscale = 1;
  611. up_step = 1;
  612. } else {
  613. if (last_lower == qscale + 1)
  614. break;
  615. last_higher = FFMAX(qscale, last_higher);
  616. if (last_lower != INT_MAX)
  617. qscale = (qscale+last_lower)>>1;
  618. else
  619. qscale += up_step++;
  620. down_step = 1;
  621. if (qscale >= ctx->m.avctx->qmax)
  622. return -1;
  623. }
  624. }
  625. //dprintf(ctx->m.avctx, "out qscale %d\n", qscale);
  626. ctx->qscale = qscale;
  627. return 0;
  628. }
  629. static int dnxhd_rc_cmp(const void *a, const void *b)
  630. {
  631. return ((RCCMPEntry *)b)->value - ((RCCMPEntry *)a)->value;
  632. }
  633. static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
  634. {
  635. int max_bits = 0;
  636. int ret, x, y;
  637. if ((ret = dnxhd_find_qscale(ctx)) < 0)
  638. return -1;
  639. for (y = 0; y < ctx->m.mb_height; y++) {
  640. for (x = 0; x < ctx->m.mb_width; x++) {
  641. int mb = y*ctx->m.mb_width+x;
  642. int delta_bits;
  643. ctx->mb_qscale[mb] = ctx->qscale;
  644. ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale][mb].bits;
  645. max_bits += ctx->mb_rc[ctx->qscale][mb].bits;
  646. if (!RC_VARIANCE) {
  647. delta_bits = ctx->mb_rc[ctx->qscale][mb].bits-ctx->mb_rc[ctx->qscale+1][mb].bits;
  648. ctx->mb_cmp[mb].mb = mb;
  649. ctx->mb_cmp[mb].value = delta_bits ?
  650. ((ctx->mb_rc[ctx->qscale][mb].ssd-ctx->mb_rc[ctx->qscale+1][mb].ssd)*100)/delta_bits
  651. : INT_MIN; //avoid increasing qscale
  652. }
  653. }
  654. max_bits += 31; //worst padding
  655. }
  656. if (!ret) {
  657. if (RC_VARIANCE)
  658. avctx->execute(avctx, dnxhd_mb_var_thread, (void**)&ctx->thread[0], NULL, avctx->thread_count);
  659. qsort(ctx->mb_cmp, ctx->m.mb_num, sizeof(RCEntry), dnxhd_rc_cmp);
  660. for (x = 0; x < ctx->m.mb_num && max_bits > ctx->frame_bits; x++) {
  661. int mb = ctx->mb_cmp[x].mb;
  662. max_bits -= ctx->mb_rc[ctx->qscale][mb].bits - ctx->mb_rc[ctx->qscale+1][mb].bits;
  663. ctx->mb_qscale[mb] = ctx->qscale+1;
  664. ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale+1][mb].bits;
  665. }
  666. }
  667. return 0;
  668. }
  669. static void dnxhd_load_picture(DNXHDEncContext *ctx, AVFrame *frame)
  670. {
  671. int i;
  672. for (i = 0; i < 3; i++) {
  673. ctx->frame.data[i] = frame->data[i];
  674. ctx->frame.linesize[i] = frame->linesize[i];
  675. }
  676. for (i = 0; i < ctx->m.avctx->thread_count; i++) {
  677. ctx->thread[i]->m.linesize = ctx->frame.linesize[0]<<ctx->interlaced;
  678. ctx->thread[i]->m.uvlinesize = ctx->frame.linesize[1]<<ctx->interlaced;
  679. ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
  680. ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
  681. }
  682. ctx->frame.interlaced_frame = frame->interlaced_frame;
  683. ctx->cur_field = frame->interlaced_frame && !frame->top_field_first;
  684. }
  685. static int dnxhd_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data)
  686. {
  687. DNXHDEncContext *ctx = avctx->priv_data;
  688. int first_field = 1;
  689. int offset, i, ret;
  690. if (buf_size < ctx->cid_table->frame_size) {
  691. av_log(avctx, AV_LOG_ERROR, "output buffer is too small to compress picture\n");
  692. return -1;
  693. }
  694. dnxhd_load_picture(ctx, data);
  695. encode_coding_unit:
  696. for (i = 0; i < 3; i++) {
  697. ctx->src[i] = ctx->frame.data[i];
  698. if (ctx->interlaced && ctx->cur_field)
  699. ctx->src[i] += ctx->frame.linesize[i];
  700. }
  701. dnxhd_write_header(avctx, buf);
  702. if (avctx->mb_decision == FF_MB_DECISION_RD)
  703. ret = dnxhd_encode_rdo(avctx, ctx);
  704. else
  705. ret = dnxhd_encode_fast(avctx, ctx);
  706. if (ret < 0) {
  707. av_log(avctx, AV_LOG_ERROR, "picture could not fit ratecontrol constraints\n");
  708. return -1;
  709. }
  710. dnxhd_setup_threads_slices(ctx, buf);
  711. offset = 0;
  712. for (i = 0; i < ctx->m.mb_height; i++) {
  713. AV_WB32(ctx->msip + i * 4, offset);
  714. offset += ctx->slice_size[i];
  715. assert(!(ctx->slice_size[i] & 3));
  716. }
  717. avctx->execute(avctx, dnxhd_encode_thread, (void**)&ctx->thread[0], NULL, avctx->thread_count);
  718. AV_WB32(buf + ctx->cid_table->coding_unit_size - 4, 0x600DC0DE); // EOF
  719. if (ctx->interlaced && first_field) {
  720. first_field = 0;
  721. ctx->cur_field ^= 1;
  722. buf += ctx->cid_table->coding_unit_size;
  723. buf_size -= ctx->cid_table->coding_unit_size;
  724. goto encode_coding_unit;
  725. }
  726. return ctx->cid_table->frame_size;
  727. }
  728. static int dnxhd_encode_end(AVCodecContext *avctx)
  729. {
  730. DNXHDEncContext *ctx = avctx->priv_data;
  731. int i;
  732. av_freep(&ctx->table_vlc_codes);
  733. av_freep(&ctx->table_vlc_bits);
  734. av_freep(&ctx->table_run_codes);
  735. av_freep(&ctx->table_run_bits);
  736. av_freep(&ctx->mb_bits);
  737. av_freep(&ctx->mb_qscale);
  738. av_freep(&ctx->mb_rc);
  739. av_freep(&ctx->mb_cmp);
  740. av_freep(&ctx->slice_size);
  741. av_freep(&ctx->qmatrix_c);
  742. av_freep(&ctx->qmatrix_l);
  743. av_freep(&ctx->qmatrix_c16);
  744. av_freep(&ctx->qmatrix_l16);
  745. for (i = 1; i < avctx->thread_count; i++)
  746. av_freep(&ctx->thread[i]);
  747. return 0;
  748. }
  749. AVCodec dnxhd_encoder = {
  750. "dnxhd",
  751. CODEC_TYPE_VIDEO,
  752. CODEC_ID_DNXHD,
  753. sizeof(DNXHDEncContext),
  754. dnxhd_encode_init,
  755. dnxhd_encode_picture,
  756. dnxhd_encode_end,
  757. .pix_fmts = (enum PixelFormat[]){PIX_FMT_YUV422P, -1},
  758. };