You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

485 lines
17KB

  1. /*
  2. * AVS encoding using the xavs library
  3. * Copyright (C) 2010 Amanda, Y.N. Wu <amanda11192003@gmail.com>
  4. *
  5. * This file is part of FFmpeg.
  6. *
  7. * FFmpeg is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU Lesser General Public
  9. * License as published by the Free Software Foundation; either
  10. * version 2.1 of the License, or (at your option) any later version.
  11. *
  12. * FFmpeg is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  15. * Lesser General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU Lesser General Public
  18. * License along with FFmpeg; if not, write to the Free Software
  19. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  20. */
  21. #include <stdio.h>
  22. #include <stdlib.h>
  23. #include <string.h>
  24. #include <math.h>
  25. #include <stdint.h>
  26. #include <float.h>
  27. #include <xavs.h>
  28. #include "avcodec.h"
  29. #include "internal.h"
  30. #include "packet_internal.h"
  31. #include "libavutil/internal.h"
  32. #include "libavutil/mem.h"
  33. #include "libavutil/opt.h"
  34. #define END_OF_STREAM 0x001
  35. #define XAVS_PART_I8X8 0x002 /* Analyze i8x8 (requires 8x8 transform) */
  36. #define XAVS_PART_P8X8 0x010 /* Analyze p16x8, p8x16 and p8x8 */
  37. #define XAVS_PART_B8X8 0x100 /* Analyze b16x8, b*/
  38. typedef struct XavsContext {
  39. AVClass *class;
  40. xavs_param_t params;
  41. xavs_t *enc;
  42. xavs_picture_t pic;
  43. uint8_t *sei;
  44. int sei_size;
  45. int end_of_stream;
  46. float crf;
  47. int cqp;
  48. int b_bias;
  49. float cplxblur;
  50. int direct_pred;
  51. int aud;
  52. int fast_pskip;
  53. int motion_est;
  54. int mbtree;
  55. int mixed_refs;
  56. int b_frame_strategy;
  57. int chroma_offset;
  58. int scenechange_threshold;
  59. int noise_reduction;
  60. int64_t *pts_buffer;
  61. int out_frame_count;
  62. } XavsContext;
  63. static void XAVS_log(void *p, int level, const char *fmt, va_list args)
  64. {
  65. static const int level_map[] = {
  66. [XAVS_LOG_ERROR] = AV_LOG_ERROR,
  67. [XAVS_LOG_WARNING] = AV_LOG_WARNING,
  68. [XAVS_LOG_INFO] = AV_LOG_INFO,
  69. [XAVS_LOG_DEBUG] = AV_LOG_DEBUG
  70. };
  71. if (level < 0 || level > XAVS_LOG_DEBUG)
  72. return;
  73. av_vlog(p, level_map[level], fmt, args);
  74. }
  75. static int encode_nals(AVCodecContext *ctx, AVPacket *pkt,
  76. xavs_nal_t *nals, int nnal)
  77. {
  78. XavsContext *x4 = ctx->priv_data;
  79. uint8_t *p;
  80. int i, s, ret, size = x4->sei_size + AV_INPUT_BUFFER_MIN_SIZE;
  81. if (!nnal)
  82. return 0;
  83. for (i = 0; i < nnal; i++)
  84. size += nals[i].i_payload;
  85. if ((ret = ff_alloc_packet2(ctx, pkt, size, 0)) < 0)
  86. return ret;
  87. p = pkt->data;
  88. /* Write the SEI as part of the first frame. */
  89. if (x4->sei_size > 0 && nnal > 0) {
  90. memcpy(p, x4->sei, x4->sei_size);
  91. p += x4->sei_size;
  92. x4->sei_size = 0;
  93. }
  94. for (i = 0; i < nnal; i++) {
  95. s = xavs_nal_encode(p, &size, 1, nals + i);
  96. if (s < 0)
  97. return -1;
  98. p += s;
  99. }
  100. pkt->size = p - pkt->data;
  101. return 1;
  102. }
  103. static int XAVS_frame(AVCodecContext *avctx, AVPacket *pkt,
  104. const AVFrame *frame, int *got_packet)
  105. {
  106. XavsContext *x4 = avctx->priv_data;
  107. xavs_nal_t *nal;
  108. int nnal, i, ret;
  109. xavs_picture_t pic_out;
  110. int pict_type;
  111. x4->pic.img.i_csp = XAVS_CSP_I420;
  112. x4->pic.img.i_plane = 3;
  113. if (frame) {
  114. for (i = 0; i < 3; i++) {
  115. x4->pic.img.plane[i] = frame->data[i];
  116. x4->pic.img.i_stride[i] = frame->linesize[i];
  117. }
  118. x4->pic.i_pts = frame->pts;
  119. x4->pic.i_type = XAVS_TYPE_AUTO;
  120. x4->pts_buffer[avctx->frame_number % (avctx->max_b_frames+1)] = frame->pts;
  121. }
  122. if (xavs_encoder_encode(x4->enc, &nal, &nnal,
  123. frame? &x4->pic: NULL, &pic_out) < 0)
  124. return -1;
  125. ret = encode_nals(avctx, pkt, nal, nnal);
  126. if (ret < 0)
  127. return -1;
  128. if (!ret) {
  129. if (!frame && !(x4->end_of_stream)) {
  130. if ((ret = ff_alloc_packet2(avctx, pkt, 4, 0)) < 0)
  131. return ret;
  132. pkt->data[0] = 0x0;
  133. pkt->data[1] = 0x0;
  134. pkt->data[2] = 0x01;
  135. pkt->data[3] = 0xb1;
  136. pkt->dts = 2*x4->pts_buffer[(x4->out_frame_count-1)%(avctx->max_b_frames+1)] -
  137. x4->pts_buffer[(x4->out_frame_count-2)%(avctx->max_b_frames+1)];
  138. x4->end_of_stream = END_OF_STREAM;
  139. *got_packet = 1;
  140. }
  141. return 0;
  142. }
  143. #if FF_API_CODED_FRAME
  144. FF_DISABLE_DEPRECATION_WARNINGS
  145. avctx->coded_frame->pts = pic_out.i_pts;
  146. FF_ENABLE_DEPRECATION_WARNINGS
  147. #endif
  148. pkt->pts = pic_out.i_pts;
  149. if (avctx->has_b_frames) {
  150. if (!x4->out_frame_count)
  151. pkt->dts = pkt->pts - (x4->pts_buffer[1] - x4->pts_buffer[0]);
  152. else
  153. pkt->dts = x4->pts_buffer[(x4->out_frame_count-1)%(avctx->max_b_frames+1)];
  154. } else
  155. pkt->dts = pkt->pts;
  156. switch (pic_out.i_type) {
  157. case XAVS_TYPE_IDR:
  158. case XAVS_TYPE_I:
  159. pict_type = AV_PICTURE_TYPE_I;
  160. break;
  161. case XAVS_TYPE_P:
  162. pict_type = AV_PICTURE_TYPE_P;
  163. break;
  164. case XAVS_TYPE_B:
  165. case XAVS_TYPE_BREF:
  166. pict_type = AV_PICTURE_TYPE_B;
  167. break;
  168. default:
  169. pict_type = AV_PICTURE_TYPE_NONE;
  170. }
  171. #if FF_API_CODED_FRAME
  172. FF_DISABLE_DEPRECATION_WARNINGS
  173. avctx->coded_frame->pict_type = pict_type;
  174. FF_ENABLE_DEPRECATION_WARNINGS
  175. #endif
  176. /* There is no IDR frame in AVS JiZhun */
  177. /* Sequence header is used as a flag */
  178. if (pic_out.i_type == XAVS_TYPE_I) {
  179. #if FF_API_CODED_FRAME
  180. FF_DISABLE_DEPRECATION_WARNINGS
  181. avctx->coded_frame->key_frame = 1;
  182. FF_ENABLE_DEPRECATION_WARNINGS
  183. #endif
  184. pkt->flags |= AV_PKT_FLAG_KEY;
  185. }
  186. #if FF_API_CODED_FRAME
  187. FF_DISABLE_DEPRECATION_WARNINGS
  188. avctx->coded_frame->quality = (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA;
  189. FF_ENABLE_DEPRECATION_WARNINGS
  190. #endif
  191. ff_side_data_set_encoder_stats(pkt, (pic_out.i_qpplus1 - 1) * FF_QP2LAMBDA, NULL, 0, pict_type);
  192. x4->out_frame_count++;
  193. *got_packet = ret;
  194. return 0;
  195. }
  196. static av_cold int XAVS_close(AVCodecContext *avctx)
  197. {
  198. XavsContext *x4 = avctx->priv_data;
  199. av_freep(&avctx->extradata);
  200. av_freep(&x4->sei);
  201. av_freep(&x4->pts_buffer);
  202. if (x4->enc)
  203. xavs_encoder_close(x4->enc);
  204. return 0;
  205. }
  206. static av_cold int XAVS_init(AVCodecContext *avctx)
  207. {
  208. XavsContext *x4 = avctx->priv_data;
  209. x4->sei_size = 0;
  210. xavs_param_default(&x4->params);
  211. x4->params.pf_log = XAVS_log;
  212. x4->params.p_log_private = avctx;
  213. x4->params.i_keyint_max = avctx->gop_size;
  214. if (avctx->bit_rate) {
  215. x4->params.rc.i_bitrate = avctx->bit_rate / 1000;
  216. x4->params.rc.i_rc_method = XAVS_RC_ABR;
  217. }
  218. x4->params.rc.i_vbv_buffer_size = avctx->rc_buffer_size / 1000;
  219. x4->params.rc.i_vbv_max_bitrate = avctx->rc_max_rate / 1000;
  220. x4->params.rc.b_stat_write = avctx->flags & AV_CODEC_FLAG_PASS1;
  221. if (avctx->flags & AV_CODEC_FLAG_PASS2) {
  222. x4->params.rc.b_stat_read = 1;
  223. } else {
  224. if (x4->crf >= 0) {
  225. x4->params.rc.i_rc_method = XAVS_RC_CRF;
  226. x4->params.rc.f_rf_constant = x4->crf;
  227. } else if (x4->cqp >= 0) {
  228. x4->params.rc.i_rc_method = XAVS_RC_CQP;
  229. x4->params.rc.i_qp_constant = x4->cqp;
  230. }
  231. }
  232. if (x4->aud >= 0)
  233. x4->params.b_aud = x4->aud;
  234. if (x4->mbtree >= 0)
  235. x4->params.rc.b_mb_tree = x4->mbtree;
  236. if (x4->direct_pred >= 0)
  237. x4->params.analyse.i_direct_mv_pred = x4->direct_pred;
  238. if (x4->fast_pskip >= 0)
  239. x4->params.analyse.b_fast_pskip = x4->fast_pskip;
  240. if (x4->motion_est >= 0)
  241. x4->params.analyse.i_me_method = x4->motion_est;
  242. if (x4->mixed_refs >= 0)
  243. x4->params.analyse.b_mixed_references = x4->mixed_refs;
  244. if (x4->b_bias != INT_MIN)
  245. x4->params.i_bframe_bias = x4->b_bias;
  246. if (x4->cplxblur >= 0)
  247. x4->params.rc.f_complexity_blur = x4->cplxblur;
  248. x4->params.i_bframe = avctx->max_b_frames;
  249. /* cabac is not included in AVS JiZhun Profile */
  250. x4->params.b_cabac = 0;
  251. #if FF_API_PRIVATE_OPT
  252. FF_DISABLE_DEPRECATION_WARNINGS
  253. if (avctx->b_frame_strategy)
  254. x4->b_frame_strategy = avctx->b_frame_strategy;
  255. FF_ENABLE_DEPRECATION_WARNINGS
  256. #endif
  257. x4->params.i_bframe_adaptive = x4->b_frame_strategy;
  258. avctx->has_b_frames = !!avctx->max_b_frames;
  259. /* AVS doesn't allow B picture as reference */
  260. /* The max allowed reference frame number of B is 2 */
  261. x4->params.i_keyint_min = avctx->keyint_min;
  262. if (x4->params.i_keyint_min > x4->params.i_keyint_max)
  263. x4->params.i_keyint_min = x4->params.i_keyint_max;
  264. #if FF_API_PRIVATE_OPT
  265. FF_DISABLE_DEPRECATION_WARNINGS
  266. if (avctx->scenechange_threshold)
  267. x4->scenechange_threshold = avctx->scenechange_threshold;
  268. FF_ENABLE_DEPRECATION_WARNINGS
  269. #endif
  270. x4->params.i_scenecut_threshold = x4->scenechange_threshold;
  271. // x4->params.b_deblocking_filter = avctx->flags & AV_CODEC_FLAG_LOOP_FILTER;
  272. x4->params.rc.i_qp_min = avctx->qmin;
  273. x4->params.rc.i_qp_max = avctx->qmax;
  274. x4->params.rc.i_qp_step = avctx->max_qdiff;
  275. x4->params.rc.f_qcompress = avctx->qcompress; /* 0.0 => cbr, 1.0 => constant qp */
  276. x4->params.rc.f_qblur = avctx->qblur; /* temporally blur quants */
  277. x4->params.i_frame_reference = avctx->refs;
  278. x4->params.i_width = avctx->width;
  279. x4->params.i_height = avctx->height;
  280. x4->params.vui.i_sar_width = avctx->sample_aspect_ratio.num;
  281. x4->params.vui.i_sar_height = avctx->sample_aspect_ratio.den;
  282. /* This is only used for counting the fps */
  283. x4->params.i_fps_num = avctx->time_base.den;
  284. x4->params.i_fps_den = avctx->time_base.num;
  285. x4->params.analyse.inter = XAVS_ANALYSE_I8x8 |XAVS_ANALYSE_PSUB16x16| XAVS_ANALYSE_BSUB16x16;
  286. x4->params.analyse.i_me_range = avctx->me_range;
  287. x4->params.analyse.i_subpel_refine = avctx->me_subpel_quality;
  288. x4->params.analyse.b_chroma_me = avctx->me_cmp & FF_CMP_CHROMA;
  289. /* AVS P2 only enables 8x8 transform */
  290. x4->params.analyse.b_transform_8x8 = 1; //avctx->flags2 & AV_CODEC_FLAG2_8X8DCT;
  291. x4->params.analyse.i_trellis = avctx->trellis;
  292. #if FF_API_PRIVATE_OPT
  293. FF_DISABLE_DEPRECATION_WARNINGS
  294. if (avctx->noise_reduction >= 0)
  295. x4->noise_reduction = avctx->noise_reduction;
  296. FF_ENABLE_DEPRECATION_WARNINGS
  297. #endif
  298. x4->params.analyse.i_noise_reduction = x4->noise_reduction;
  299. if (avctx->level > 0)
  300. x4->params.i_level_idc = avctx->level;
  301. if (avctx->bit_rate > 0)
  302. x4->params.rc.f_rate_tolerance =
  303. (float)avctx->bit_rate_tolerance / avctx->bit_rate;
  304. if ((avctx->rc_buffer_size) &&
  305. (avctx->rc_initial_buffer_occupancy <= avctx->rc_buffer_size)) {
  306. x4->params.rc.f_vbv_buffer_init =
  307. (float)avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size;
  308. } else
  309. x4->params.rc.f_vbv_buffer_init = 0.9;
  310. /* TAG:do we have MB tree RC method */
  311. /* what is the RC method we are now using? Default NO */
  312. x4->params.rc.f_ip_factor = 1 / fabs(avctx->i_quant_factor);
  313. x4->params.rc.f_pb_factor = avctx->b_quant_factor;
  314. #if FF_API_PRIVATE_OPT
  315. FF_DISABLE_DEPRECATION_WARNINGS
  316. if (avctx->chromaoffset)
  317. x4->chroma_offset = avctx->chromaoffset;
  318. FF_ENABLE_DEPRECATION_WARNINGS
  319. #endif
  320. x4->params.analyse.i_chroma_qp_offset = x4->chroma_offset;
  321. x4->params.analyse.b_psnr = avctx->flags & AV_CODEC_FLAG_PSNR;
  322. x4->params.i_log_level = XAVS_LOG_DEBUG;
  323. x4->params.i_threads = avctx->thread_count;
  324. x4->params.b_interlaced = avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT;
  325. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)
  326. x4->params.b_repeat_headers = 0;
  327. x4->enc = xavs_encoder_open(&x4->params);
  328. if (!x4->enc)
  329. return -1;
  330. if (!(x4->pts_buffer = av_mallocz_array((avctx->max_b_frames+1), sizeof(*x4->pts_buffer))))
  331. return AVERROR(ENOMEM);
  332. /* TAG: Do we have GLOBAL HEADER in AVS */
  333. /* We Have PPS and SPS in AVS */
  334. if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER && 0) {
  335. xavs_nal_t *nal;
  336. int nnal, s, i, size;
  337. uint8_t *p;
  338. s = xavs_encoder_headers(x4->enc, &nal, &nnal);
  339. avctx->extradata = p = av_malloc(s);
  340. for (i = 0; i < nnal; i++) {
  341. /* Don't put the SEI in extradata. */
  342. if (nal[i].i_type == NAL_SEI) {
  343. x4->sei = av_malloc( 5 + nal[i].i_payload * 4 / 3 );
  344. if (xavs_nal_encode(x4->sei, &x4->sei_size, 1, nal + i) < 0)
  345. return -1;
  346. continue;
  347. }
  348. size = xavs_nal_encode(p, &s, 1, nal + i);
  349. if (size < 0)
  350. return -1;
  351. p += size;
  352. }
  353. avctx->extradata_size = p - avctx->extradata;
  354. }
  355. return 0;
  356. }
  357. #define OFFSET(x) offsetof(XavsContext, x)
  358. #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
  359. static const AVOption options[] = {
  360. { "crf", "Select the quality for constant quality mode", OFFSET(crf), AV_OPT_TYPE_FLOAT, {.dbl = -1 }, -1, FLT_MAX, VE },
  361. { "qp", "Constant quantization parameter rate control method",OFFSET(cqp), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, INT_MAX, VE },
  362. { "b-bias", "Influences how often B-frames are used", OFFSET(b_bias), AV_OPT_TYPE_INT, {.i64 = INT_MIN}, INT_MIN, INT_MAX, VE },
  363. { "cplxblur", "Reduce fluctuations in QP (before curve compression)", OFFSET(cplxblur), AV_OPT_TYPE_FLOAT, {.dbl = -1 }, -1, FLT_MAX, VE},
  364. { "direct-pred", "Direct MV prediction mode", OFFSET(direct_pred), AV_OPT_TYPE_INT, {.i64 = -1 }, -1, INT_MAX, VE, "direct-pred" },
  365. { "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = XAVS_DIRECT_PRED_NONE }, 0, 0, VE, "direct-pred" },
  366. { "spatial", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = XAVS_DIRECT_PRED_SPATIAL }, 0, 0, VE, "direct-pred" },
  367. { "temporal", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = XAVS_DIRECT_PRED_TEMPORAL }, 0, 0, VE, "direct-pred" },
  368. { "auto", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = XAVS_DIRECT_PRED_AUTO }, 0, 0, VE, "direct-pred" },
  369. { "aud", "Use access unit delimiters.", OFFSET(aud), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE},
  370. { "mbtree", "Use macroblock tree ratecontrol.", OFFSET(mbtree), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE},
  371. { "mixed-refs", "One reference per partition, as opposed to one reference per macroblock", OFFSET(mixed_refs), AV_OPT_TYPE_BOOL, {.i64 = -1}, -1, 1, VE },
  372. { "fast-pskip", NULL, OFFSET(fast_pskip), AV_OPT_TYPE_BOOL, {.i64 = -1 }, -1, 1, VE},
  373. { "motion-est", "Set motion estimation method", OFFSET(motion_est), AV_OPT_TYPE_INT, { .i64 = XAVS_ME_DIA }, -1, XAVS_ME_TESA, VE, "motion-est"},
  374. { "dia", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = XAVS_ME_DIA }, INT_MIN, INT_MAX, VE, "motion-est" },
  375. { "hex", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = XAVS_ME_HEX }, INT_MIN, INT_MAX, VE, "motion-est" },
  376. { "umh", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = XAVS_ME_UMH }, INT_MIN, INT_MAX, VE, "motion-est" },
  377. { "esa", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = XAVS_ME_ESA }, INT_MIN, INT_MAX, VE, "motion-est" },
  378. { "tesa", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = XAVS_ME_TESA }, INT_MIN, INT_MAX, VE, "motion-est" },
  379. { "b_strategy", "Strategy to choose between I/P/B-frames", OFFSET(b_frame_strategy), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 2, VE},
  380. { "chromaoffset", "QP difference between chroma and luma", OFFSET(chroma_offset), AV_OPT_TYPE_INT, {.i64 = 0 }, INT_MIN, INT_MAX, VE},
  381. { "sc_threshold", "Scene change threshold", OFFSET(scenechange_threshold), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, VE},
  382. { "noise_reduction", "Noise reduction", OFFSET(noise_reduction), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, INT_MAX, VE},
  383. { NULL },
  384. };
  385. static const AVClass xavs_class = {
  386. .class_name = "libxavs",
  387. .item_name = av_default_item_name,
  388. .option = options,
  389. .version = LIBAVUTIL_VERSION_INT,
  390. };
  391. static const AVCodecDefault xavs_defaults[] = {
  392. { "b", "0" },
  393. { NULL },
  394. };
  395. AVCodec ff_libxavs_encoder = {
  396. .name = "libxavs",
  397. .long_name = NULL_IF_CONFIG_SMALL("libxavs Chinese AVS (Audio Video Standard)"),
  398. .type = AVMEDIA_TYPE_VIDEO,
  399. .id = AV_CODEC_ID_CAVS,
  400. .priv_data_size = sizeof(XavsContext),
  401. .init = XAVS_init,
  402. .encode2 = XAVS_frame,
  403. .close = XAVS_close,
  404. .capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
  405. .caps_internal = FF_CODEC_CAP_AUTO_THREADS,
  406. .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
  407. .priv_class = &xavs_class,
  408. .defaults = xavs_defaults,
  409. .wrapper_name = "libxavs",
  410. };