You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

235 lines
8.5KB

  1. /*
  2. * This file is part of Libav.
  3. *
  4. * Libav is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * Libav is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with Libav; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <va/va.h>
  19. #include <va/va_dec_vp8.h>
  20. #include "vaapi_decode.h"
  21. #include "vp8.h"
  22. static VASurfaceID vaapi_vp8_surface_id(VP8Frame *vf)
  23. {
  24. if (vf)
  25. return ff_vaapi_get_surface_id(vf->tf.f);
  26. else
  27. return VA_INVALID_SURFACE;
  28. }
  29. static int vaapi_vp8_start_frame(AVCodecContext *avctx,
  30. av_unused const uint8_t *buffer,
  31. av_unused uint32_t size)
  32. {
  33. const VP8Context *s = avctx->priv_data;
  34. VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private;
  35. VAPictureParameterBufferVP8 pp;
  36. VAProbabilityDataBufferVP8 prob;
  37. VAIQMatrixBufferVP8 quant;
  38. int err, i, j, k;
  39. pic->output_surface = vaapi_vp8_surface_id(s->framep[VP56_FRAME_CURRENT]);
  40. pp = (VAPictureParameterBufferVP8) {
  41. .frame_width = avctx->width,
  42. .frame_height = avctx->height,
  43. .last_ref_frame = vaapi_vp8_surface_id(s->framep[VP56_FRAME_PREVIOUS]),
  44. .golden_ref_frame = vaapi_vp8_surface_id(s->framep[VP56_FRAME_GOLDEN]),
  45. .alt_ref_frame = vaapi_vp8_surface_id(s->framep[VP56_FRAME_GOLDEN2]),
  46. .out_of_loop_frame = VA_INVALID_SURFACE,
  47. .pic_fields.bits = {
  48. .key_frame = !s->keyframe,
  49. .version = s->profile,
  50. .segmentation_enabled = s->segmentation.enabled,
  51. .update_mb_segmentation_map = s->segmentation.update_map,
  52. .update_segment_feature_data = s->segmentation.update_feature_data,
  53. .filter_type = s->filter.simple,
  54. .sharpness_level = s->filter.sharpness,
  55. .loop_filter_adj_enable = s->lf_delta.enabled,
  56. .mode_ref_lf_delta_update = s->lf_delta.update,
  57. .sign_bias_golden = s->sign_bias[VP56_FRAME_GOLDEN],
  58. .sign_bias_alternate = s->sign_bias[VP56_FRAME_GOLDEN2],
  59. .mb_no_coeff_skip = s->mbskip_enabled,
  60. .loop_filter_disable = s->filter.level == 0,
  61. },
  62. .prob_skip_false = s->prob->mbskip,
  63. .prob_intra = s->prob->intra,
  64. .prob_last = s->prob->last,
  65. .prob_gf = s->prob->golden,
  66. };
  67. for (i = 0; i < 3; i++)
  68. pp.mb_segment_tree_probs[i] = s->prob->segmentid[i];
  69. for (i = 0; i < 4; i++) {
  70. if (s->segmentation.enabled) {
  71. pp.loop_filter_level[i] = s->segmentation.filter_level[i];
  72. if (!s->segmentation.absolute_vals)
  73. pp.loop_filter_level[i] += s->filter.level;
  74. } else {
  75. pp.loop_filter_level[i] = s->filter.level;
  76. }
  77. pp.loop_filter_level[i] = av_clip_uintp2(pp.loop_filter_level[i], 6);
  78. }
  79. for (i = 0; i < 4; i++) {
  80. pp.loop_filter_deltas_ref_frame[i] = s->lf_delta.ref[i];
  81. pp.loop_filter_deltas_mode[i] = s->lf_delta.mode[i + 4];
  82. }
  83. if (s->keyframe) {
  84. static const uint8_t keyframe_y_mode_probs[4] = {
  85. 145, 156, 163, 128
  86. };
  87. static const uint8_t keyframe_uv_mode_probs[3] = {
  88. 142, 114, 183
  89. };
  90. memcpy(pp.y_mode_probs, keyframe_y_mode_probs, 4);
  91. memcpy(pp.uv_mode_probs, keyframe_uv_mode_probs, 3);
  92. } else {
  93. for (i = 0; i < 4; i++)
  94. pp.y_mode_probs[i] = s->prob->pred16x16[i];
  95. for (i = 0; i < 3; i++)
  96. pp.uv_mode_probs[i] = s->prob->pred8x8c[i];
  97. }
  98. for (i = 0; i < 2; i++)
  99. for (j = 0; j < 19; j++)
  100. pp.mv_probs[i][j] = s->prob->mvc[i][j];
  101. pp.bool_coder_ctx.range = s->coder_state_at_header_end.range;
  102. pp.bool_coder_ctx.value = s->coder_state_at_header_end.value;
  103. pp.bool_coder_ctx.count = s->coder_state_at_header_end.bit_count;
  104. err = ff_vaapi_decode_make_param_buffer(avctx, pic,
  105. VAPictureParameterBufferType,
  106. &pp, sizeof(pp));
  107. if (err < 0)
  108. goto fail;
  109. for (i = 0; i < 4; i++) {
  110. for (j = 0; j < 8; j++) {
  111. static const int coeff_bands_inverse[8] = {
  112. 0, 1, 2, 3, 5, 6, 4, 15
  113. };
  114. int coeff_pos = coeff_bands_inverse[j];
  115. for (k = 0; k < 3; k++) {
  116. memcpy(prob.dct_coeff_probs[i][j][k],
  117. s->prob->token[i][coeff_pos][k], 11);
  118. }
  119. }
  120. }
  121. err = ff_vaapi_decode_make_param_buffer(avctx, pic,
  122. VAProbabilityBufferType,
  123. &prob, sizeof(prob));
  124. if (err < 0)
  125. goto fail;
  126. for (i = 0; i < 4; i++) {
  127. int base_qi = s->segmentation.base_quant[i];
  128. if (!s->segmentation.absolute_vals)
  129. base_qi += s->quant.yac_qi;
  130. quant.quantization_index[i][0] = av_clip_uintp2(base_qi, 7);
  131. quant.quantization_index[i][1] = av_clip_uintp2(base_qi + s->quant.ydc_delta, 7);
  132. quant.quantization_index[i][2] = av_clip_uintp2(base_qi + s->quant.y2dc_delta, 7);
  133. quant.quantization_index[i][3] = av_clip_uintp2(base_qi + s->quant.y2ac_delta, 7);
  134. quant.quantization_index[i][4] = av_clip_uintp2(base_qi + s->quant.uvdc_delta, 7);
  135. quant.quantization_index[i][5] = av_clip_uintp2(base_qi + s->quant.uvac_delta, 7);
  136. }
  137. err = ff_vaapi_decode_make_param_buffer(avctx, pic,
  138. VAIQMatrixBufferType,
  139. &quant, sizeof(quant));
  140. if (err < 0)
  141. goto fail;
  142. return 0;
  143. fail:
  144. ff_vaapi_decode_cancel(avctx, pic);
  145. return err;
  146. }
  147. static int vaapi_vp8_end_frame(AVCodecContext *avctx)
  148. {
  149. const VP8Context *s = avctx->priv_data;
  150. VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private;
  151. return ff_vaapi_decode_issue(avctx, pic);
  152. }
  153. static int vaapi_vp8_decode_slice(AVCodecContext *avctx,
  154. const uint8_t *buffer,
  155. uint32_t size)
  156. {
  157. const VP8Context *s = avctx->priv_data;
  158. VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private;
  159. VASliceParameterBufferVP8 sp;
  160. int err, i;
  161. unsigned int header_size = 3 + 7 * s->keyframe;
  162. const uint8_t *data = buffer + header_size;
  163. unsigned int data_size = size - header_size;
  164. sp = (VASliceParameterBufferVP8) {
  165. .slice_data_size = data_size,
  166. .slice_data_offset = 0,
  167. .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
  168. .macroblock_offset = (8 * (s->coder_state_at_header_end.input - data) -
  169. s->coder_state_at_header_end.bit_count - 8),
  170. .num_of_partitions = s->num_coeff_partitions + 1,
  171. };
  172. sp.partition_size[0] = s->header_partition_size - ((sp.macroblock_offset + 7) / 8);
  173. for (i = 0; i < 8; i++)
  174. sp.partition_size[i+1] = s->coeff_partition_size[i];
  175. err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &sp, sizeof(sp), data, data_size);
  176. if (err)
  177. goto fail;
  178. return 0;
  179. fail:
  180. ff_vaapi_decode_cancel(avctx, pic);
  181. return err;
  182. }
  183. AVHWAccel ff_vp8_vaapi_hwaccel = {
  184. .name = "vp8_vaapi",
  185. .type = AVMEDIA_TYPE_VIDEO,
  186. .id = AV_CODEC_ID_VP8,
  187. .pix_fmt = AV_PIX_FMT_VAAPI,
  188. .start_frame = &vaapi_vp8_start_frame,
  189. .end_frame = &vaapi_vp8_end_frame,
  190. .decode_slice = &vaapi_vp8_decode_slice,
  191. .frame_priv_data_size = sizeof(VAAPIDecodePicture),
  192. .init = &ff_vaapi_decode_init,
  193. .uninit = &ff_vaapi_decode_uninit,
  194. .priv_data_size = sizeof(VAAPIDecodeContext),
  195. };