You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

848 lines
28KB

  1. /*
  2. * VC3/DNxHD encoder
  3. * Copyright (c) 2007 Baptiste Coudurier <baptiste dot coudurier at smartjog dot com>
  4. *
  5. * VC-3 encoder funded by the British Broadcasting Corporation
  6. *
  7. * This file is part of FFmpeg.
  8. *
  9. * FFmpeg is free software; you can redistribute it and/or
  10. * modify it under the terms of the GNU Lesser General Public
  11. * License as published by the Free Software Foundation; either
  12. * version 2.1 of the License, or (at your option) any later version.
  13. *
  14. * FFmpeg is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  17. * Lesser General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU Lesser General Public
  20. * License along with FFmpeg; if not, write to the Free Software
  21. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  22. */
  23. //#define DEBUG
  24. #define RC_VARIANCE 1 // use variance or ssd for fast rc
  25. #include "avcodec.h"
  26. #include "dsputil.h"
  27. #include "mpegvideo.h"
  28. #include "dnxhddata.h"
  29. typedef struct {
  30. uint16_t mb;
  31. int value;
  32. } RCCMPEntry;
  33. typedef struct {
  34. int ssd;
  35. int bits;
  36. } RCEntry;
  37. int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow);
  38. typedef struct DNXHDEncContext {
  39. MpegEncContext m; ///< Used for quantization dsp functions
  40. AVFrame frame;
  41. int cid;
  42. const CIDEntry *cid_table;
  43. uint8_t *msip; ///< Macroblock Scan Indices Payload
  44. uint32_t *slice_size;
  45. struct DNXHDEncContext *thread[MAX_THREADS];
  46. unsigned dct_y_offset;
  47. unsigned dct_uv_offset;
  48. int interlaced;
  49. int cur_field;
  50. DECLARE_ALIGNED_16(DCTELEM, blocks[8][64]);
  51. int (*qmatrix_c) [64];
  52. int (*qmatrix_l) [64];
  53. uint16_t (*qmatrix_l16)[2][64];
  54. uint16_t (*qmatrix_c16)[2][64];
  55. unsigned frame_bits;
  56. uint8_t *src[3];
  57. uint16_t *table_vlc_codes;
  58. uint8_t *table_vlc_bits;
  59. uint16_t *table_run_codes;
  60. uint8_t *table_run_bits;
  61. /** Rate control */
  62. unsigned slice_bits;
  63. unsigned qscale;
  64. unsigned lambda;
  65. unsigned thread_size;
  66. uint16_t *mb_bits;
  67. uint8_t *mb_qscale;
  68. RCCMPEntry *mb_cmp;
  69. RCEntry (*mb_rc)[8160];
  70. } DNXHDEncContext;
  71. #define LAMBDA_FRAC_BITS 10
  72. static int dnxhd_init_vlc(DNXHDEncContext *ctx)
  73. {
  74. int i;
  75. CHECKED_ALLOCZ(ctx->table_vlc_codes, 449*2);
  76. CHECKED_ALLOCZ(ctx->table_vlc_bits, 449);
  77. CHECKED_ALLOCZ(ctx->table_run_codes, 63*2);
  78. CHECKED_ALLOCZ(ctx->table_run_bits, 63);
  79. for (i = 0; i < 257; i++) {
  80. int level = ctx->cid_table->ac_level[i] +
  81. (ctx->cid_table->ac_run_flag[i] << 7) + (ctx->cid_table->ac_index_flag[i] << 8);
  82. assert(level < 449);
  83. if (ctx->cid_table->ac_level[i] == 64 && ctx->cid_table->ac_index_flag[i])
  84. level -= 64; // use 0+(1<<8) level
  85. ctx->table_vlc_codes[level] = ctx->cid_table->ac_codes[i];
  86. ctx->table_vlc_bits [level] = ctx->cid_table->ac_bits[i];
  87. }
  88. for (i = 0; i < 62; i++) {
  89. int run = ctx->cid_table->run[i];
  90. assert(run < 63);
  91. ctx->table_run_codes[run] = ctx->cid_table->run_codes[i];
  92. ctx->table_run_bits [run] = ctx->cid_table->run_bits[i];
  93. }
  94. return 0;
  95. fail:
  96. return -1;
  97. }
  98. static int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
  99. {
  100. // init first elem to 1 to avoid div by 0 in convert_matrix
  101. uint16_t weight_matrix[64] = {1,}; // convert_matrix needs uint16_t*
  102. int qscale, i;
  103. CHECKED_ALLOCZ(ctx->qmatrix_l, (ctx->m.avctx->qmax+1) * 64 * sizeof(int));
  104. CHECKED_ALLOCZ(ctx->qmatrix_c, (ctx->m.avctx->qmax+1) * 64 * sizeof(int));
  105. CHECKED_ALLOCZ(ctx->qmatrix_l16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t));
  106. CHECKED_ALLOCZ(ctx->qmatrix_c16, (ctx->m.avctx->qmax+1) * 64 * 2 * sizeof(uint16_t));
  107. for (i = 1; i < 64; i++) {
  108. int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
  109. weight_matrix[j] = ctx->cid_table->luma_weight[i];
  110. }
  111. ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_l, ctx->qmatrix_l16, weight_matrix,
  112. ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
  113. for (i = 1; i < 64; i++) {
  114. int j = ctx->m.dsp.idct_permutation[ff_zigzag_direct[i]];
  115. weight_matrix[j] = ctx->cid_table->chroma_weight[i];
  116. }
  117. ff_convert_matrix(&ctx->m.dsp, ctx->qmatrix_c, ctx->qmatrix_c16, weight_matrix,
  118. ctx->m.intra_quant_bias, 1, ctx->m.avctx->qmax, 1);
  119. for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) {
  120. for (i = 0; i < 64; i++) {
  121. ctx->qmatrix_l [qscale] [i] <<= 2; ctx->qmatrix_c [qscale] [i] <<= 2;
  122. ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][1][i] <<= 2;
  123. ctx->qmatrix_c16[qscale][0][i] <<= 2; ctx->qmatrix_c16[qscale][1][i] <<= 2;
  124. }
  125. }
  126. return 0;
  127. fail:
  128. return -1;
  129. }
  130. static int dnxhd_init_rc(DNXHDEncContext *ctx)
  131. {
  132. CHECKED_ALLOCZ(ctx->mb_rc, 8160*ctx->m.avctx->qmax*sizeof(RCEntry));
  133. if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD)
  134. CHECKED_ALLOCZ(ctx->mb_cmp, ctx->m.mb_num*sizeof(RCCMPEntry));
  135. ctx->frame_bits = (ctx->cid_table->coding_unit_size - 640 - 4) * 8;
  136. ctx->qscale = 1;
  137. ctx->lambda = 2<<LAMBDA_FRAC_BITS; // qscale 2
  138. return 0;
  139. fail:
  140. return -1;
  141. }
  142. static int dnxhd_encode_init(AVCodecContext *avctx)
  143. {
  144. DNXHDEncContext *ctx = avctx->priv_data;
  145. int i, index;
  146. ctx->cid = ff_dnxhd_find_cid(avctx);
  147. if (!ctx->cid || avctx->pix_fmt != PIX_FMT_YUV422P) {
  148. av_log(avctx, AV_LOG_ERROR, "video parameters incompatible with DNxHD\n");
  149. return -1;
  150. }
  151. av_log(avctx, AV_LOG_DEBUG, "cid %d\n", ctx->cid);
  152. index = ff_dnxhd_get_cid_table(ctx->cid);
  153. ctx->cid_table = &ff_dnxhd_cid_table[index];
  154. ctx->m.avctx = avctx;
  155. ctx->m.mb_intra = 1;
  156. ctx->m.h263_aic = 1;
  157. dsputil_init(&ctx->m.dsp, avctx);
  158. ff_dct_common_init(&ctx->m);
  159. if (!ctx->m.dct_quantize)
  160. ctx->m.dct_quantize = dct_quantize_c;
  161. ctx->m.mb_height = (avctx->height + 15) / 16;
  162. ctx->m.mb_width = (avctx->width + 15) / 16;
  163. if (avctx->flags & CODEC_FLAG_INTERLACED_DCT) {
  164. ctx->interlaced = 1;
  165. ctx->m.mb_height /= 2;
  166. }
  167. ctx->m.mb_num = ctx->m.mb_height * ctx->m.mb_width;
  168. if (avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS)
  169. ctx->m.intra_quant_bias = avctx->intra_quant_bias;
  170. if (dnxhd_init_qmat(ctx, ctx->m.intra_quant_bias, 0) < 0) // XXX tune lbias/cbias
  171. return -1;
  172. if (dnxhd_init_vlc(ctx) < 0)
  173. return -1;
  174. if (dnxhd_init_rc(ctx) < 0)
  175. return -1;
  176. CHECKED_ALLOCZ(ctx->slice_size, ctx->m.mb_height*sizeof(uint32_t));
  177. CHECKED_ALLOCZ(ctx->mb_bits, ctx->m.mb_num *sizeof(uint16_t));
  178. CHECKED_ALLOCZ(ctx->mb_qscale, ctx->m.mb_num *sizeof(uint8_t));
  179. ctx->frame.key_frame = 1;
  180. ctx->frame.pict_type = FF_I_TYPE;
  181. ctx->m.avctx->coded_frame = &ctx->frame;
  182. if (avctx->thread_count > MAX_THREADS || (avctx->thread_count > ctx->m.mb_height)) {
  183. av_log(avctx, AV_LOG_ERROR, "too many threads\n");
  184. return -1;
  185. }
  186. ctx->thread[0] = ctx;
  187. for (i = 1; i < avctx->thread_count; i++) {
  188. ctx->thread[i] = av_malloc(sizeof(DNXHDEncContext));
  189. memcpy(ctx->thread[i], ctx, sizeof(DNXHDEncContext));
  190. }
  191. for (i = 0; i < avctx->thread_count; i++) {
  192. ctx->thread[i]->m.start_mb_y = (ctx->m.mb_height*(i ) + avctx->thread_count/2) / avctx->thread_count;
  193. ctx->thread[i]->m.end_mb_y = (ctx->m.mb_height*(i+1) + avctx->thread_count/2) / avctx->thread_count;
  194. }
  195. return 0;
  196. fail: //for CHECKED_ALLOCZ
  197. return -1;
  198. }
  199. static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
  200. {
  201. DNXHDEncContext *ctx = avctx->priv_data;
  202. const uint8_t header_prefix[5] = { 0x00,0x00,0x02,0x80,0x01 };
  203. memcpy(buf, header_prefix, 5);
  204. buf[5] = ctx->interlaced ? ctx->cur_field+2 : 0x01;
  205. buf[6] = 0x80; // crc flag off
  206. buf[7] = 0xa0; // reserved
  207. AV_WB16(buf + 0x18, avctx->height); // ALPF
  208. AV_WB16(buf + 0x1a, avctx->width); // SPL
  209. AV_WB16(buf + 0x1d, avctx->height); // NAL
  210. buf[0x21] = 0x38; // FIXME 8 bit per comp
  211. buf[0x22] = 0x88 + (ctx->frame.interlaced_frame<<2);
  212. AV_WB32(buf + 0x28, ctx->cid); // CID
  213. buf[0x2c] = ctx->interlaced ? 0 : 0x80;
  214. buf[0x5f] = 0x01; // UDL
  215. buf[0x167] = 0x02; // reserved
  216. AV_WB16(buf + 0x16a, ctx->m.mb_height * 4 + 4); // MSIPS
  217. buf[0x16d] = ctx->m.mb_height; // Ns
  218. buf[0x16f] = 0x10; // reserved
  219. ctx->msip = buf + 0x170;
  220. return 0;
  221. }
  222. static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
  223. {
  224. int nbits;
  225. if (diff < 0) {
  226. nbits = av_log2_16bit(-2*diff);
  227. diff--;
  228. } else {
  229. nbits = av_log2_16bit(2*diff);
  230. }
  231. put_bits(&ctx->m.pb, ctx->cid_table->dc_bits[nbits] + nbits,
  232. (ctx->cid_table->dc_codes[nbits]<<nbits) + (diff & ((1 << nbits) - 1)));
  233. }
  234. static av_always_inline void dnxhd_encode_block(DNXHDEncContext *ctx, DCTELEM *block, int last_index, int n)
  235. {
  236. int last_non_zero = 0;
  237. int offset = 0;
  238. int slevel, i, j;
  239. dnxhd_encode_dc(ctx, block[0] - ctx->m.last_dc[n]);
  240. ctx->m.last_dc[n] = block[0];
  241. for (i = 1; i <= last_index; i++) {
  242. j = ctx->m.intra_scantable.permutated[i];
  243. slevel = block[j];
  244. if (slevel) {
  245. int run_level = i - last_non_zero - 1;
  246. int sign;
  247. MASK_ABS(sign, slevel);
  248. if (slevel > 64) {
  249. offset = (slevel-1) >> 6;
  250. slevel = 256 | (slevel & 63); // level 64 is treated as 0
  251. }
  252. if (run_level)
  253. slevel |= 128;
  254. put_bits(&ctx->m.pb, ctx->table_vlc_bits[slevel]+1, (ctx->table_vlc_codes[slevel]<<1)|(sign&1));
  255. if (offset) {
  256. put_bits(&ctx->m.pb, 4, offset);
  257. offset = 0;
  258. }
  259. if (run_level)
  260. put_bits(&ctx->m.pb, ctx->table_run_bits[run_level], ctx->table_run_codes[run_level]);
  261. last_non_zero = i;
  262. }
  263. }
  264. put_bits(&ctx->m.pb, ctx->table_vlc_bits[0], ctx->table_vlc_codes[0]); // EOB
  265. }
  266. static av_always_inline void dnxhd_unquantize_c(DNXHDEncContext *ctx, DCTELEM *block, int n, int qscale, int last_index)
  267. {
  268. const uint8_t *weight_matrix;
  269. int level;
  270. int i;
  271. weight_matrix = (n&2) ? ctx->cid_table->chroma_weight : ctx->cid_table->luma_weight;
  272. for (i = 1; i <= last_index; i++) {
  273. int j = ctx->m.intra_scantable.permutated[i];
  274. level = block[j];
  275. if (level) {
  276. if (level < 0) {
  277. level = (1-2*level) * qscale * weight_matrix[i];
  278. if (weight_matrix[i] != 32)
  279. level += 32;
  280. level >>= 6;
  281. level = -level;
  282. } else {
  283. level = (2*level+1) * qscale * weight_matrix[i];
  284. if (weight_matrix[i] != 32)
  285. level += 32;
  286. level >>= 6;
  287. }
  288. block[j] = level;
  289. }
  290. }
  291. }
  292. static av_always_inline int dnxhd_ssd_block(DCTELEM *qblock, DCTELEM *block)
  293. {
  294. int score = 0;
  295. int i;
  296. for (i = 0; i < 64; i++)
  297. score += (block[i]-qblock[i])*(block[i]-qblock[i]);
  298. return score;
  299. }
  300. static av_always_inline int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, DCTELEM *block, int last_index)
  301. {
  302. int last_non_zero = 0;
  303. int bits = 0;
  304. int i, j, level;
  305. for (i = 1; i <= last_index; i++) {
  306. j = ctx->m.intra_scantable.permutated[i];
  307. level = block[j];
  308. if (level) {
  309. int run_level = i - last_non_zero - 1;
  310. level = FFABS(level);
  311. if (level > 64) {
  312. level = 256 | (level & 63); // level 64 is treated as 0
  313. bits += 4;
  314. }
  315. level |= (!!run_level)<<7;
  316. bits += ctx->table_vlc_bits[level]+1 + ctx->table_run_bits[run_level];
  317. last_non_zero = i;
  318. }
  319. }
  320. return bits;
  321. }
  322. static av_always_inline void dnxhd_get_pixels_4x8(DCTELEM *restrict block, const uint8_t *pixels, int line_size)
  323. {
  324. int i;
  325. for (i = 0; i < 4; i++) {
  326. block[0] = pixels[0];
  327. block[1] = pixels[1];
  328. block[2] = pixels[2];
  329. block[3] = pixels[3];
  330. block[4] = pixels[4];
  331. block[5] = pixels[5];
  332. block[6] = pixels[6];
  333. block[7] = pixels[7];
  334. pixels += line_size;
  335. block += 8;
  336. }
  337. memcpy(block , block- 8, sizeof(*block)*8);
  338. memcpy(block+ 8, block-16, sizeof(*block)*8);
  339. memcpy(block+16, block-24, sizeof(*block)*8);
  340. memcpy(block+24, block-32, sizeof(*block)*8);
  341. }
  342. static av_always_inline void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
  343. {
  344. const uint8_t *ptr_y = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.linesize) + (mb_x << 4);
  345. const uint8_t *ptr_u = ctx->thread[0]->src[1] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << 3);
  346. const uint8_t *ptr_v = ctx->thread[0]->src[2] + ((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << 3);
  347. DSPContext *dsp = &ctx->m.dsp;
  348. dsp->get_pixels(ctx->blocks[0], ptr_y , ctx->m.linesize);
  349. dsp->get_pixels(ctx->blocks[1], ptr_y + 8, ctx->m.linesize);
  350. dsp->get_pixels(ctx->blocks[2], ptr_u , ctx->m.uvlinesize);
  351. dsp->get_pixels(ctx->blocks[3], ptr_v , ctx->m.uvlinesize);
  352. if (mb_y+1 == ctx->m.mb_height && ctx->m.avctx->height == 1080) {
  353. if (ctx->interlaced) {
  354. dnxhd_get_pixels_4x8(ctx->blocks[4], ptr_y + ctx->dct_y_offset , ctx->m.linesize);
  355. dnxhd_get_pixels_4x8(ctx->blocks[5], ptr_y + ctx->dct_y_offset + 8, ctx->m.linesize);
  356. dnxhd_get_pixels_4x8(ctx->blocks[6], ptr_u + ctx->dct_uv_offset , ctx->m.uvlinesize);
  357. dnxhd_get_pixels_4x8(ctx->blocks[7], ptr_v + ctx->dct_uv_offset , ctx->m.uvlinesize);
  358. } else
  359. memset(ctx->blocks[4], 0, 4*64*sizeof(DCTELEM));
  360. } else {
  361. dsp->get_pixels(ctx->blocks[4], ptr_y + ctx->dct_y_offset , ctx->m.linesize);
  362. dsp->get_pixels(ctx->blocks[5], ptr_y + ctx->dct_y_offset + 8, ctx->m.linesize);
  363. dsp->get_pixels(ctx->blocks[6], ptr_u + ctx->dct_uv_offset , ctx->m.uvlinesize);
  364. dsp->get_pixels(ctx->blocks[7], ptr_v + ctx->dct_uv_offset , ctx->m.uvlinesize);
  365. }
  366. }
  367. static av_always_inline int dnxhd_switch_matrix(DNXHDEncContext *ctx, int i)
  368. {
  369. if (i&2) {
  370. ctx->m.q_intra_matrix16 = ctx->qmatrix_c16;
  371. ctx->m.q_intra_matrix = ctx->qmatrix_c;
  372. return 1 + (i&1);
  373. } else {
  374. ctx->m.q_intra_matrix16 = ctx->qmatrix_l16;
  375. ctx->m.q_intra_matrix = ctx->qmatrix_l;
  376. return 0;
  377. }
  378. }
  379. static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg)
  380. {
  381. DNXHDEncContext *ctx = arg;
  382. int mb_y, mb_x;
  383. int qscale = ctx->thread[0]->qscale;
  384. for (mb_y = ctx->m.start_mb_y; mb_y < ctx->m.end_mb_y; mb_y++) {
  385. ctx->m.last_dc[0] =
  386. ctx->m.last_dc[1] =
  387. ctx->m.last_dc[2] = 1024;
  388. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  389. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  390. int ssd = 0;
  391. int ac_bits = 0;
  392. int dc_bits = 0;
  393. int i;
  394. dnxhd_get_blocks(ctx, mb_x, mb_y);
  395. for (i = 0; i < 8; i++) {
  396. DECLARE_ALIGNED_16(DCTELEM, block[64]);
  397. DCTELEM *src_block = ctx->blocks[i];
  398. int overflow, nbits, diff, last_index;
  399. int n = dnxhd_switch_matrix(ctx, i);
  400. memcpy(block, src_block, sizeof(block));
  401. last_index = ctx->m.dct_quantize((MpegEncContext*)ctx, block, i, qscale, &overflow);
  402. ac_bits += dnxhd_calc_ac_bits(ctx, block, last_index);
  403. diff = block[0] - ctx->m.last_dc[n];
  404. if (diff < 0) nbits = av_log2_16bit(-2*diff);
  405. else nbits = av_log2_16bit( 2*diff);
  406. dc_bits += ctx->cid_table->dc_bits[nbits] + nbits;
  407. ctx->m.last_dc[n] = block[0];
  408. if (avctx->mb_decision == FF_MB_DECISION_RD || !RC_VARIANCE) {
  409. dnxhd_unquantize_c(ctx, block, i, qscale, last_index);
  410. ctx->m.dsp.idct(block);
  411. ssd += dnxhd_ssd_block(block, src_block);
  412. }
  413. }
  414. ctx->mb_rc[qscale][mb].ssd = ssd;
  415. ctx->mb_rc[qscale][mb].bits = ac_bits+dc_bits+12+8*ctx->table_vlc_bits[0];
  416. }
  417. }
  418. return 0;
  419. }
  420. static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg)
  421. {
  422. DNXHDEncContext *ctx = arg;
  423. int mb_y, mb_x;
  424. for (mb_y = ctx->m.start_mb_y; mb_y < ctx->m.end_mb_y; mb_y++) {
  425. ctx->m.last_dc[0] =
  426. ctx->m.last_dc[1] =
  427. ctx->m.last_dc[2] = 1024;
  428. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  429. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  430. int qscale = ctx->mb_qscale[mb];
  431. int i;
  432. put_bits(&ctx->m.pb, 12, qscale<<1);
  433. dnxhd_get_blocks(ctx, mb_x, mb_y);
  434. for (i = 0; i < 8; i++) {
  435. DCTELEM *block = ctx->blocks[i];
  436. int last_index, overflow;
  437. int n = dnxhd_switch_matrix(ctx, i);
  438. last_index = ctx->m.dct_quantize((MpegEncContext*)ctx, block, i, qscale, &overflow);
  439. dnxhd_encode_block(ctx, block, last_index, n);
  440. }
  441. }
  442. if (put_bits_count(&ctx->m.pb)&31)
  443. put_bits(&ctx->m.pb, 32-(put_bits_count(&ctx->m.pb)&31), 0);
  444. }
  445. flush_put_bits(&ctx->m.pb);
  446. return 0;
  447. }
  448. static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx, uint8_t *buf)
  449. {
  450. int mb_y, mb_x;
  451. int i, offset = 0;
  452. for (i = 0; i < ctx->m.avctx->thread_count; i++) {
  453. int thread_size = 0;
  454. for (mb_y = ctx->thread[i]->m.start_mb_y; mb_y < ctx->thread[i]->m.end_mb_y; mb_y++) {
  455. ctx->slice_size[mb_y] = 0;
  456. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  457. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  458. ctx->slice_size[mb_y] += ctx->mb_bits[mb];
  459. }
  460. ctx->slice_size[mb_y] = (ctx->slice_size[mb_y]+31)&~31;
  461. ctx->slice_size[mb_y] >>= 3;
  462. thread_size += ctx->slice_size[mb_y];
  463. }
  464. init_put_bits(&ctx->thread[i]->m.pb, buf + 640 + offset, thread_size);
  465. offset += thread_size;
  466. }
  467. }
  468. static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg)
  469. {
  470. DNXHDEncContext *ctx = arg;
  471. int mb_y, mb_x;
  472. for (mb_y = ctx->m.start_mb_y; mb_y < ctx->m.end_mb_y; mb_y++) {
  473. for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) {
  474. unsigned mb = mb_y * ctx->m.mb_width + mb_x;
  475. uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y<<4) * ctx->m.linesize) + (mb_x<<4);
  476. int sum = ctx->m.dsp.pix_sum(pix, ctx->m.linesize);
  477. int varc = (ctx->m.dsp.pix_norm1(pix, ctx->m.linesize) - (((unsigned)(sum*sum))>>8)+128)>>8;
  478. ctx->mb_cmp[mb].value = varc;
  479. ctx->mb_cmp[mb].mb = mb;
  480. }
  481. }
  482. return 0;
  483. }
  484. static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
  485. {
  486. int lambda, up_step, down_step;
  487. int last_lower = INT_MAX, last_higher = 0;
  488. int x, y, q;
  489. for (q = 1; q < avctx->qmax; q++) {
  490. ctx->qscale = q;
  491. avctx->execute(avctx, dnxhd_calc_bits_thread, (void**)&ctx->thread[0], NULL, avctx->thread_count);
  492. }
  493. up_step = down_step = 2<<LAMBDA_FRAC_BITS;
  494. lambda = ctx->lambda;
  495. for (;;) {
  496. int bits = 0;
  497. int end = 0;
  498. if (lambda == last_higher) {
  499. lambda++;
  500. end = 1; // need to set final qscales/bits
  501. }
  502. for (y = 0; y < ctx->m.mb_height; y++) {
  503. for (x = 0; x < ctx->m.mb_width; x++) {
  504. unsigned min = UINT_MAX;
  505. int qscale = 1;
  506. int mb = y*ctx->m.mb_width+x;
  507. for (q = 1; q < avctx->qmax; q++) {
  508. unsigned score = ctx->mb_rc[q][mb].bits*lambda+(ctx->mb_rc[q][mb].ssd<<LAMBDA_FRAC_BITS);
  509. if (score < min) {
  510. min = score;
  511. qscale = q;
  512. }
  513. }
  514. bits += ctx->mb_rc[qscale][mb].bits;
  515. ctx->mb_qscale[mb] = qscale;
  516. ctx->mb_bits[mb] = ctx->mb_rc[qscale][mb].bits;
  517. }
  518. bits = (bits+31)&~31; // padding
  519. if (bits > ctx->frame_bits)
  520. break;
  521. }
  522. //dprintf(ctx->m.avctx, "lambda %d, up %u, down %u, bits %d, frame %d\n",
  523. // lambda, last_higher, last_lower, bits, ctx->frame_bits);
  524. if (end) {
  525. if (bits > ctx->frame_bits)
  526. return -1;
  527. break;
  528. }
  529. if (bits < ctx->frame_bits) {
  530. last_lower = FFMIN(lambda, last_lower);
  531. if (last_higher != 0)
  532. lambda = (lambda+last_higher)>>1;
  533. else
  534. lambda -= down_step;
  535. down_step *= 5; // XXX tune ?
  536. up_step = 1<<LAMBDA_FRAC_BITS;
  537. lambda = FFMAX(1, lambda);
  538. if (lambda == last_lower)
  539. break;
  540. } else {
  541. last_higher = FFMAX(lambda, last_higher);
  542. if (last_lower != INT_MAX)
  543. lambda = (lambda+last_lower)>>1;
  544. else
  545. lambda += up_step;
  546. up_step *= 5;
  547. down_step = 1<<LAMBDA_FRAC_BITS;
  548. }
  549. }
  550. //dprintf(ctx->m.avctx, "out lambda %d\n", lambda);
  551. ctx->lambda = lambda;
  552. return 0;
  553. }
  554. static int dnxhd_find_qscale(DNXHDEncContext *ctx)
  555. {
  556. int bits = 0;
  557. int up_step = 1;
  558. int down_step = 1;
  559. int last_higher = 0;
  560. int last_lower = INT_MAX;
  561. int qscale;
  562. int x, y;
  563. qscale = ctx->qscale;
  564. for (;;) {
  565. bits = 0;
  566. ctx->qscale = qscale;
  567. // XXX avoid recalculating bits
  568. ctx->m.avctx->execute(ctx->m.avctx, dnxhd_calc_bits_thread, (void**)&ctx->thread[0], NULL, ctx->m.avctx->thread_count);
  569. for (y = 0; y < ctx->m.mb_height; y++) {
  570. for (x = 0; x < ctx->m.mb_width; x++)
  571. bits += ctx->mb_rc[qscale][y*ctx->m.mb_width+x].bits;
  572. bits = (bits+31)&~31; // padding
  573. if (bits > ctx->frame_bits)
  574. break;
  575. }
  576. //dprintf(ctx->m.avctx, "%d, qscale %d, bits %d, frame %d, higher %d, lower %d\n",
  577. // ctx->m.avctx->frame_number, qscale, bits, ctx->frame_bits, last_higher, last_lower);
  578. if (bits < ctx->frame_bits) {
  579. if (qscale == 1)
  580. return 1;
  581. if (last_higher == qscale - 1) {
  582. qscale = last_higher;
  583. break;
  584. }
  585. last_lower = FFMIN(qscale, last_lower);
  586. if (last_higher != 0)
  587. qscale = (qscale+last_higher)>>1;
  588. else
  589. qscale -= down_step++;
  590. if (qscale < 1)
  591. qscale = 1;
  592. up_step = 1;
  593. } else {
  594. if (last_lower == qscale + 1)
  595. break;
  596. last_higher = FFMAX(qscale, last_higher);
  597. if (last_lower != INT_MAX)
  598. qscale = (qscale+last_lower)>>1;
  599. else
  600. qscale += up_step++;
  601. down_step = 1;
  602. if (qscale >= ctx->m.avctx->qmax)
  603. return -1;
  604. }
  605. }
  606. //dprintf(ctx->m.avctx, "out qscale %d\n", qscale);
  607. ctx->qscale = qscale;
  608. return 0;
  609. }
  610. static int dnxhd_rc_cmp(const void *a, const void *b)
  611. {
  612. return ((const RCCMPEntry *)b)->value - ((const RCCMPEntry *)a)->value;
  613. }
  614. static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
  615. {
  616. int max_bits = 0;
  617. int ret, x, y;
  618. if ((ret = dnxhd_find_qscale(ctx)) < 0)
  619. return -1;
  620. for (y = 0; y < ctx->m.mb_height; y++) {
  621. for (x = 0; x < ctx->m.mb_width; x++) {
  622. int mb = y*ctx->m.mb_width+x;
  623. int delta_bits;
  624. ctx->mb_qscale[mb] = ctx->qscale;
  625. ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale][mb].bits;
  626. max_bits += ctx->mb_rc[ctx->qscale][mb].bits;
  627. if (!RC_VARIANCE) {
  628. delta_bits = ctx->mb_rc[ctx->qscale][mb].bits-ctx->mb_rc[ctx->qscale+1][mb].bits;
  629. ctx->mb_cmp[mb].mb = mb;
  630. ctx->mb_cmp[mb].value = delta_bits ?
  631. ((ctx->mb_rc[ctx->qscale][mb].ssd-ctx->mb_rc[ctx->qscale+1][mb].ssd)*100)/delta_bits
  632. : INT_MIN; //avoid increasing qscale
  633. }
  634. }
  635. max_bits += 31; //worst padding
  636. }
  637. if (!ret) {
  638. if (RC_VARIANCE)
  639. avctx->execute(avctx, dnxhd_mb_var_thread, (void**)&ctx->thread[0], NULL, avctx->thread_count);
  640. qsort(ctx->mb_cmp, ctx->m.mb_num, sizeof(RCEntry), dnxhd_rc_cmp);
  641. for (x = 0; x < ctx->m.mb_num && max_bits > ctx->frame_bits; x++) {
  642. int mb = ctx->mb_cmp[x].mb;
  643. max_bits -= ctx->mb_rc[ctx->qscale][mb].bits - ctx->mb_rc[ctx->qscale+1][mb].bits;
  644. ctx->mb_qscale[mb] = ctx->qscale+1;
  645. ctx->mb_bits[mb] = ctx->mb_rc[ctx->qscale+1][mb].bits;
  646. }
  647. }
  648. return 0;
  649. }
  650. static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
  651. {
  652. int i;
  653. for (i = 0; i < 3; i++) {
  654. ctx->frame.data[i] = frame->data[i];
  655. ctx->frame.linesize[i] = frame->linesize[i];
  656. }
  657. for (i = 0; i < ctx->m.avctx->thread_count; i++) {
  658. ctx->thread[i]->m.linesize = ctx->frame.linesize[0]<<ctx->interlaced;
  659. ctx->thread[i]->m.uvlinesize = ctx->frame.linesize[1]<<ctx->interlaced;
  660. ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
  661. ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
  662. }
  663. ctx->frame.interlaced_frame = frame->interlaced_frame;
  664. ctx->cur_field = frame->interlaced_frame && !frame->top_field_first;
  665. }
  666. static int dnxhd_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, const void *data)
  667. {
  668. DNXHDEncContext *ctx = avctx->priv_data;
  669. int first_field = 1;
  670. int offset, i, ret;
  671. if (buf_size < ctx->cid_table->frame_size) {
  672. av_log(avctx, AV_LOG_ERROR, "output buffer is too small to compress picture\n");
  673. return -1;
  674. }
  675. dnxhd_load_picture(ctx, data);
  676. encode_coding_unit:
  677. for (i = 0; i < 3; i++) {
  678. ctx->src[i] = ctx->frame.data[i];
  679. if (ctx->interlaced && ctx->cur_field)
  680. ctx->src[i] += ctx->frame.linesize[i];
  681. }
  682. dnxhd_write_header(avctx, buf);
  683. if (avctx->mb_decision == FF_MB_DECISION_RD)
  684. ret = dnxhd_encode_rdo(avctx, ctx);
  685. else
  686. ret = dnxhd_encode_fast(avctx, ctx);
  687. if (ret < 0) {
  688. av_log(avctx, AV_LOG_ERROR, "picture could not fit ratecontrol constraints\n");
  689. return -1;
  690. }
  691. dnxhd_setup_threads_slices(ctx, buf);
  692. offset = 0;
  693. for (i = 0; i < ctx->m.mb_height; i++) {
  694. AV_WB32(ctx->msip + i * 4, offset);
  695. offset += ctx->slice_size[i];
  696. assert(!(ctx->slice_size[i] & 3));
  697. }
  698. avctx->execute(avctx, dnxhd_encode_thread, (void**)&ctx->thread[0], NULL, avctx->thread_count);
  699. AV_WB32(buf + ctx->cid_table->coding_unit_size - 4, 0x600DC0DE); // EOF
  700. if (ctx->interlaced && first_field) {
  701. first_field = 0;
  702. ctx->cur_field ^= 1;
  703. buf += ctx->cid_table->coding_unit_size;
  704. buf_size -= ctx->cid_table->coding_unit_size;
  705. goto encode_coding_unit;
  706. }
  707. return ctx->cid_table->frame_size;
  708. }
  709. static int dnxhd_encode_end(AVCodecContext *avctx)
  710. {
  711. DNXHDEncContext *ctx = avctx->priv_data;
  712. int i;
  713. av_freep(&ctx->table_vlc_codes);
  714. av_freep(&ctx->table_vlc_bits);
  715. av_freep(&ctx->table_run_codes);
  716. av_freep(&ctx->table_run_bits);
  717. av_freep(&ctx->mb_bits);
  718. av_freep(&ctx->mb_qscale);
  719. av_freep(&ctx->mb_rc);
  720. av_freep(&ctx->mb_cmp);
  721. av_freep(&ctx->slice_size);
  722. av_freep(&ctx->qmatrix_c);
  723. av_freep(&ctx->qmatrix_l);
  724. av_freep(&ctx->qmatrix_c16);
  725. av_freep(&ctx->qmatrix_l16);
  726. for (i = 1; i < avctx->thread_count; i++)
  727. av_freep(&ctx->thread[i]);
  728. return 0;
  729. }
  730. AVCodec dnxhd_encoder = {
  731. "dnxhd",
  732. CODEC_TYPE_VIDEO,
  733. CODEC_ID_DNXHD,
  734. sizeof(DNXHDEncContext),
  735. dnxhd_encode_init,
  736. dnxhd_encode_picture,
  737. dnxhd_encode_end,
  738. .pix_fmts = (enum PixelFormat[]){PIX_FMT_YUV422P, -1},
  739. };