You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

536 lines
20KB

  1. /*
  2. * VC-1 HW decode acceleration through VA API
  3. *
  4. * Copyright (C) 2008-2009 Splitted-Desktop Systems
  5. *
  6. * This file is part of FFmpeg.
  7. *
  8. * FFmpeg is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * FFmpeg is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with FFmpeg; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "hwaccel.h"
  23. #include "internal.h"
  24. #include "vaapi_decode.h"
  25. #include "vc1.h"
  26. #include "vc1data.h"
  27. /** Translate FFmpeg MV modes to VA API */
  28. static int get_VAMvModeVC1(enum MVModes mv_mode)
  29. {
  30. switch (mv_mode) {
  31. case MV_PMODE_1MV_HPEL_BILIN: return VAMvMode1MvHalfPelBilinear;
  32. case MV_PMODE_1MV: return VAMvMode1Mv;
  33. case MV_PMODE_1MV_HPEL: return VAMvMode1MvHalfPel;
  34. case MV_PMODE_MIXED_MV: return VAMvModeMixedMv;
  35. case MV_PMODE_INTENSITY_COMP: return VAMvModeIntensityCompensation;
  36. }
  37. return 0;
  38. }
  39. /** Check whether the MVTYPEMB bitplane is present */
  40. static inline int vc1_has_MVTYPEMB_bitplane(const VC1Context *v)
  41. {
  42. if (v->mv_type_is_raw)
  43. return 0;
  44. return v->fcm == PROGRESSIVE &&
  45. (v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
  46. (v->mv_mode == MV_PMODE_MIXED_MV ||
  47. (v->mv_mode == MV_PMODE_INTENSITY_COMP &&
  48. v->mv_mode2 == MV_PMODE_MIXED_MV));
  49. }
  50. /** Check whether the SKIPMB bitplane is present */
  51. static inline int vc1_has_SKIPMB_bitplane(const VC1Context *v)
  52. {
  53. if (v->skip_is_raw)
  54. return 0;
  55. return (v->fcm == PROGRESSIVE || v->fcm == ILACE_FRAME) &&
  56. ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) ||
  57. (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type));
  58. }
  59. /** Check whether the DIRECTMB bitplane is present */
  60. static inline int vc1_has_DIRECTMB_bitplane(const VC1Context *v)
  61. {
  62. if (v->dmb_is_raw)
  63. return 0;
  64. return (v->fcm == PROGRESSIVE || v->fcm == ILACE_FRAME) &&
  65. (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type);
  66. }
  67. /** Check whether the ACPRED bitplane is present */
  68. static inline int vc1_has_ACPRED_bitplane(const VC1Context *v)
  69. {
  70. if (v->acpred_is_raw)
  71. return 0;
  72. return v->profile == PROFILE_ADVANCED &&
  73. (v->s.pict_type == AV_PICTURE_TYPE_I ||
  74. (v->s.pict_type == AV_PICTURE_TYPE_B && v->bi_type));
  75. }
  76. /** Check whether the OVERFLAGS bitplane is present */
  77. static inline int vc1_has_OVERFLAGS_bitplane(const VC1Context *v)
  78. {
  79. if (v->overflg_is_raw)
  80. return 0;
  81. return v->profile == PROFILE_ADVANCED &&
  82. (v->s.pict_type == AV_PICTURE_TYPE_I ||
  83. (v->s.pict_type == AV_PICTURE_TYPE_B && v->bi_type)) &&
  84. (v->overlap && v->pq <= 8) &&
  85. v->condover == CONDOVER_SELECT;
  86. }
  87. /** Check whether the FIELDTX bitplane is present */
  88. static inline int vc1_has_FIELDTX_bitplane(const VC1Context *v)
  89. {
  90. if (v->fieldtx_is_raw)
  91. return 0;
  92. return v->fcm == ILACE_FRAME &&
  93. (v->s.pict_type == AV_PICTURE_TYPE_I ||
  94. (v->s.pict_type == AV_PICTURE_TYPE_B && v->bi_type));
  95. }
  96. /** Check whether the FORWARDMB bitplane is present */
  97. static inline int vc1_has_FORWARDMB_bitplane(const VC1Context *v)
  98. {
  99. if (v->fmb_is_raw)
  100. return 0;
  101. return v->fcm == ILACE_FIELD &&
  102. (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type);
  103. }
  104. /** Reconstruct bitstream PTYPE (7.1.1.4, index into Table-35) */
  105. static int vc1_get_PTYPE(const VC1Context *v)
  106. {
  107. const MpegEncContext *s = &v->s;
  108. switch (s->pict_type) {
  109. case AV_PICTURE_TYPE_I: return 0;
  110. case AV_PICTURE_TYPE_P: return v->p_frame_skipped ? 4 : 1;
  111. case AV_PICTURE_TYPE_B: return v->bi_type ? 3 : 2;
  112. }
  113. return 0;
  114. }
  115. /** Reconstruct bitstream FPTYPE (9.1.1.42, index into Table-105) */
  116. static int vc1_get_FPTYPE(const VC1Context *v)
  117. {
  118. const MpegEncContext *s = &v->s;
  119. switch (s->pict_type) {
  120. case AV_PICTURE_TYPE_I: return 0;
  121. case AV_PICTURE_TYPE_P: return 3;
  122. case AV_PICTURE_TYPE_B: return v->bi_type ? 7 : 4;
  123. }
  124. return 0;
  125. }
  126. /** Reconstruct bitstream MVMODE (7.1.1.32) */
  127. static inline VAMvModeVC1 vc1_get_MVMODE(const VC1Context *v)
  128. {
  129. if ((v->fcm == PROGRESSIVE || v->fcm == ILACE_FIELD) &&
  130. ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) ||
  131. (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type)))
  132. return get_VAMvModeVC1(v->mv_mode);
  133. return 0;
  134. }
  135. /** Reconstruct bitstream MVMODE2 (7.1.1.33) */
  136. static inline VAMvModeVC1 vc1_get_MVMODE2(const VC1Context *v)
  137. {
  138. if ((v->fcm == PROGRESSIVE || v->fcm == ILACE_FIELD) &&
  139. (v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
  140. v->mv_mode == MV_PMODE_INTENSITY_COMP)
  141. return get_VAMvModeVC1(v->mv_mode2);
  142. return 0;
  143. }
  144. av_unused static inline int vc1_get_INTCOMPFIELD(const VC1Context *v)
  145. {
  146. if ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
  147. v->fcm == ILACE_FIELD &&
  148. v->mv_mode == MV_PMODE_INTENSITY_COMP)
  149. switch (v->intcompfield) {
  150. case 1: return 1;
  151. case 2: return 2;
  152. case 3: return 0;
  153. }
  154. return 0;
  155. }
  156. static inline int vc1_get_LUMSCALE(const VC1Context *v)
  157. {
  158. if (v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) {
  159. if ((v->fcm == PROGRESSIVE && v->mv_mode == MV_PMODE_INTENSITY_COMP) ||
  160. (v->fcm == ILACE_FRAME && v->intcomp))
  161. return v->lumscale;
  162. else if (v->fcm == ILACE_FIELD && v->mv_mode == MV_PMODE_INTENSITY_COMP)
  163. switch (v->intcompfield) {
  164. case 1: return v->lumscale;
  165. case 2: return v->lumscale2;
  166. case 3: return v->lumscale;
  167. }
  168. }
  169. return 0;
  170. }
  171. static inline int vc1_get_LUMSHIFT(const VC1Context *v)
  172. {
  173. if (v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) {
  174. if ((v->fcm == PROGRESSIVE && v->mv_mode == MV_PMODE_INTENSITY_COMP) ||
  175. (v->fcm == ILACE_FRAME && v->intcomp))
  176. return v->lumshift;
  177. else if (v->fcm == ILACE_FIELD && v->mv_mode == MV_PMODE_INTENSITY_COMP)
  178. switch (v->intcompfield) {
  179. case 1: return v->lumshift;
  180. case 2: return v->lumshift2;
  181. case 3: return v->lumshift;
  182. }
  183. }
  184. return 0;
  185. }
  186. av_unused static inline int vc1_get_LUMSCALE2(const VC1Context *v)
  187. {
  188. if ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
  189. v->fcm == ILACE_FIELD &&
  190. v->mv_mode == MV_PMODE_INTENSITY_COMP &&
  191. v->intcompfield == 3)
  192. return v->lumscale2;
  193. return 0;
  194. }
  195. av_unused static inline int vc1_get_LUMSHIFT2(const VC1Context *v)
  196. {
  197. if ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
  198. v->fcm == ILACE_FIELD &&
  199. v->mv_mode == MV_PMODE_INTENSITY_COMP &&
  200. v->intcompfield == 3)
  201. return v->lumshift2;
  202. return 0;
  203. }
  204. /** Reconstruct bitstream TTFRM (7.1.1.41, Table-53) */
  205. static inline int vc1_get_TTFRM(const VC1Context *v)
  206. {
  207. switch (v->ttfrm) {
  208. case TT_8X8: return 0;
  209. case TT_8X4: return 1;
  210. case TT_4X8: return 2;
  211. case TT_4X4: return 3;
  212. }
  213. return 0;
  214. }
  215. /** Pack FFmpeg bitplanes into a VABitPlaneBuffer element */
  216. static inline void vc1_pack_bitplanes(uint8_t *bitplane, int n, const uint8_t *ff_bp[3], int x, int y, int stride)
  217. {
  218. const int bitplane_index = n / 2;
  219. const int ff_bp_index = y * stride + x;
  220. uint8_t v = 0;
  221. if (ff_bp[0])
  222. v = ff_bp[0][ff_bp_index];
  223. if (ff_bp[1])
  224. v |= ff_bp[1][ff_bp_index] << 1;
  225. if (ff_bp[2])
  226. v |= ff_bp[2][ff_bp_index] << 2;
  227. bitplane[bitplane_index] = (bitplane[bitplane_index] << 4) | v;
  228. }
  229. static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
  230. {
  231. const VC1Context *v = avctx->priv_data;
  232. const MpegEncContext *s = &v->s;
  233. VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
  234. VAPictureParameterBufferVC1 pic_param;
  235. int err;
  236. pic->output_surface = ff_vaapi_get_surface_id(s->current_picture_ptr->f);
  237. pic_param = (VAPictureParameterBufferVC1) {
  238. .forward_reference_picture = VA_INVALID_ID,
  239. .backward_reference_picture = VA_INVALID_ID,
  240. .inloop_decoded_picture = VA_INVALID_ID,
  241. .sequence_fields.bits = {
  242. .pulldown = v->broadcast,
  243. .interlace = v->interlace,
  244. .tfcntrflag = v->tfcntrflag,
  245. .finterpflag = v->finterpflag,
  246. .psf = v->psf,
  247. .multires = v->multires,
  248. .overlap = v->overlap,
  249. .syncmarker = v->resync_marker,
  250. .rangered = v->rangered,
  251. .max_b_frames = s->avctx->max_b_frames,
  252. .profile = v->profile,
  253. },
  254. .coded_width = s->avctx->coded_width,
  255. .coded_height = s->avctx->coded_height,
  256. .entrypoint_fields.bits = {
  257. .broken_link = v->broken_link,
  258. .closed_entry = v->closed_entry,
  259. .panscan_flag = v->panscanflag,
  260. .loopfilter = s->loop_filter,
  261. },
  262. .conditional_overlap_flag = v->condover,
  263. .fast_uvmc_flag = v->fastuvmc,
  264. .range_mapping_fields.bits = {
  265. .luma_flag = v->range_mapy_flag,
  266. .luma = v->range_mapy,
  267. .chroma_flag = v->range_mapuv_flag,
  268. .chroma = v->range_mapuv,
  269. },
  270. .b_picture_fraction = v->bfraction_lut_index,
  271. .cbp_table = (v->fcm == PROGRESSIVE ? v->cbptab : v->icbptab),
  272. .mb_mode_table = v->mbmodetab,
  273. .range_reduction_frame = v->rangeredfrm,
  274. .rounding_control = v->rnd,
  275. .post_processing = v->postproc,
  276. .picture_resolution_index = v->respic,
  277. .picture_fields.bits = {
  278. .picture_type = (v->fcm == ILACE_FIELD ? vc1_get_FPTYPE(v) : vc1_get_PTYPE(v)),
  279. .frame_coding_mode = v->fcm,
  280. .top_field_first = v->tff,
  281. .is_first_field = !v->second_field,
  282. .intensity_compensation = v->intcomp,
  283. },
  284. .luma_scale = vc1_get_LUMSCALE(v),
  285. .luma_shift = vc1_get_LUMSHIFT(v),
  286. #if VA_CHECK_VERSION(1, 1, 0)
  287. .luma_scale2 = vc1_get_LUMSCALE2(v),
  288. .luma_shift2 = vc1_get_LUMSHIFT2(v),
  289. .intensity_compensation_field = vc1_get_INTCOMPFIELD(v),
  290. #endif
  291. .raw_coding.flags = {
  292. .mv_type_mb = v->mv_type_is_raw,
  293. .direct_mb = v->dmb_is_raw,
  294. .skip_mb = v->skip_is_raw,
  295. .field_tx = v->fieldtx_is_raw,
  296. .forward_mb = v->fmb_is_raw,
  297. .ac_pred = v->acpred_is_raw,
  298. .overflags = v->overflg_is_raw,
  299. },
  300. .bitplane_present.flags = {
  301. .bp_mv_type_mb = vc1_has_MVTYPEMB_bitplane(v),
  302. .bp_direct_mb = vc1_has_DIRECTMB_bitplane(v),
  303. .bp_skip_mb = vc1_has_SKIPMB_bitplane(v),
  304. .bp_field_tx = vc1_has_FIELDTX_bitplane(v),
  305. .bp_forward_mb = vc1_has_FORWARDMB_bitplane(v),
  306. .bp_ac_pred = vc1_has_ACPRED_bitplane(v),
  307. .bp_overflags = vc1_has_OVERFLAGS_bitplane(v),
  308. },
  309. .reference_fields.bits = {
  310. .reference_distance_flag = v->refdist_flag,
  311. .reference_distance = v->refdist,
  312. .num_reference_pictures = v->numref,
  313. .reference_field_pic_indicator = v->reffield,
  314. },
  315. .mv_fields.bits = {
  316. .mv_mode = vc1_get_MVMODE(v),
  317. .mv_mode2 = vc1_get_MVMODE2(v),
  318. .mv_table = (v->fcm == PROGRESSIVE ? s->mv_table_index : v->imvtab),
  319. .two_mv_block_pattern_table = v->twomvbptab,
  320. .four_mv_switch = v->fourmvswitch,
  321. .four_mv_block_pattern_table = v->fourmvbptab,
  322. .extended_mv_flag = v->extended_mv,
  323. .extended_mv_range = v->mvrange,
  324. .extended_dmv_flag = v->extended_dmv,
  325. .extended_dmv_range = v->dmvrange,
  326. },
  327. .pic_quantizer_fields.bits = {
  328. .dquant = v->dquant,
  329. .quantizer = v->quantizer_mode,
  330. .half_qp = v->halfpq,
  331. .pic_quantizer_scale = v->pq,
  332. .pic_quantizer_type = v->pquantizer,
  333. .dq_frame = v->dquantfrm,
  334. .dq_profile = v->dqprofile,
  335. .dq_sb_edge = v->dqprofile == DQPROFILE_SINGLE_EDGE ? v->dqsbedge : 0,
  336. .dq_db_edge = v->dqprofile == DQPROFILE_DOUBLE_EDGES ? v->dqsbedge : 0,
  337. .dq_binary_level = v->dqbilevel,
  338. .alt_pic_quantizer = v->altpq,
  339. },
  340. .transform_fields.bits = {
  341. .variable_sized_transform_flag = v->vstransform,
  342. .mb_level_transform_type_flag = v->ttmbf,
  343. .frame_level_transform_type = vc1_get_TTFRM(v),
  344. .transform_ac_codingset_idx1 = v->c_ac_table_index,
  345. .transform_ac_codingset_idx2 = v->y_ac_table_index,
  346. .intra_transform_dc_table = v->s.dc_table_index,
  347. },
  348. };
  349. switch (s->pict_type) {
  350. case AV_PICTURE_TYPE_B:
  351. pic_param.backward_reference_picture = ff_vaapi_get_surface_id(s->next_picture.f);
  352. // fall-through
  353. case AV_PICTURE_TYPE_P:
  354. pic_param.forward_reference_picture = ff_vaapi_get_surface_id(s->last_picture.f);
  355. break;
  356. }
  357. err = ff_vaapi_decode_make_param_buffer(avctx, pic,
  358. VAPictureParameterBufferType,
  359. &pic_param, sizeof(pic_param));
  360. if (err)
  361. goto fail;
  362. if (pic_param.bitplane_present.value & 0x7f) {
  363. uint8_t *bitplane;
  364. const uint8_t *ff_bp[3];
  365. int x, y, n;
  366. size_t size = (s->mb_width * s->mb_height + 1) / 2;
  367. bitplane = av_mallocz(size);
  368. if (!bitplane) {
  369. err = AVERROR(ENOMEM);
  370. goto fail;
  371. }
  372. switch (s->pict_type) {
  373. case AV_PICTURE_TYPE_P:
  374. ff_bp[0] = pic_param.bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL;
  375. ff_bp[1] = pic_param.bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL;
  376. ff_bp[2] = pic_param.bitplane_present.flags.bp_mv_type_mb ? v->mv_type_mb_plane : NULL;
  377. break;
  378. case AV_PICTURE_TYPE_B:
  379. if (!v->bi_type) {
  380. ff_bp[0] = pic_param.bitplane_present.flags.bp_direct_mb ? v->direct_mb_plane : NULL;
  381. ff_bp[1] = pic_param.bitplane_present.flags.bp_skip_mb ? s->mbskip_table : NULL;
  382. ff_bp[2] = pic_param.bitplane_present.flags.bp_forward_mb ? v->forward_mb_plane : NULL;
  383. break;
  384. }
  385. /* fall-through (BI-type) */
  386. case AV_PICTURE_TYPE_I:
  387. ff_bp[0] = pic_param.bitplane_present.flags.bp_field_tx ? v->fieldtx_plane : NULL;
  388. ff_bp[1] = pic_param.bitplane_present.flags.bp_ac_pred ? v->acpred_plane : NULL;
  389. ff_bp[2] = pic_param.bitplane_present.flags.bp_overflags ? v->over_flags_plane : NULL;
  390. break;
  391. default:
  392. ff_bp[0] = NULL;
  393. ff_bp[1] = NULL;
  394. ff_bp[2] = NULL;
  395. break;
  396. }
  397. n = 0;
  398. for (y = 0; y < s->mb_height; y++)
  399. for (x = 0; x < s->mb_width; x++, n++)
  400. vc1_pack_bitplanes(bitplane, n, ff_bp, x, y, s->mb_stride);
  401. if (n & 1) /* move last nibble to the high order */
  402. bitplane[n/2] <<= 4;
  403. err = ff_vaapi_decode_make_param_buffer(avctx, pic,
  404. VABitPlaneBufferType,
  405. bitplane, size);
  406. av_free(bitplane);
  407. if (err)
  408. goto fail;
  409. }
  410. return 0;
  411. fail:
  412. ff_vaapi_decode_cancel(avctx, pic);
  413. return err;
  414. }
  415. static int vaapi_vc1_end_frame(AVCodecContext *avctx)
  416. {
  417. VC1Context *v = avctx->priv_data;
  418. MpegEncContext *s = &v->s;
  419. VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
  420. int ret;
  421. ret = ff_vaapi_decode_issue(avctx, pic);
  422. if (ret < 0)
  423. goto fail;
  424. ff_mpeg_draw_horiz_band(s, 0, s->avctx->height);
  425. fail:
  426. return ret;
  427. }
  428. static int vaapi_vc1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
  429. {
  430. const VC1Context *v = avctx->priv_data;
  431. const MpegEncContext *s = &v->s;
  432. VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
  433. VASliceParameterBufferVC1 slice_param;
  434. int mb_height;
  435. int err;
  436. /* Current bit buffer is beyond any marker for VC-1, so skip it */
  437. if (avctx->codec_id == AV_CODEC_ID_VC1 && IS_MARKER(AV_RB32(buffer))) {
  438. buffer += 4;
  439. size -= 4;
  440. }
  441. if (v->fcm == ILACE_FIELD)
  442. mb_height = avctx->coded_height + 31 >> 5;
  443. else
  444. mb_height = avctx->coded_height + 15 >> 4;
  445. slice_param = (VASliceParameterBufferVC1) {
  446. .slice_data_size = size,
  447. .slice_data_offset = 0,
  448. .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
  449. .macroblock_offset = get_bits_count(&s->gb),
  450. .slice_vertical_position = s->mb_y % mb_height,
  451. };
  452. err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
  453. &slice_param, sizeof(slice_param),
  454. buffer, size);
  455. if (err < 0) {
  456. ff_vaapi_decode_cancel(avctx, pic);
  457. return err;
  458. }
  459. return 0;
  460. }
  461. #if CONFIG_WMV3_VAAPI_HWACCEL
  462. const AVHWAccel ff_wmv3_vaapi_hwaccel = {
  463. .name = "wmv3_vaapi",
  464. .type = AVMEDIA_TYPE_VIDEO,
  465. .id = AV_CODEC_ID_WMV3,
  466. .pix_fmt = AV_PIX_FMT_VAAPI,
  467. .start_frame = &vaapi_vc1_start_frame,
  468. .end_frame = &vaapi_vc1_end_frame,
  469. .decode_slice = &vaapi_vc1_decode_slice,
  470. .frame_priv_data_size = sizeof(VAAPIDecodePicture),
  471. .init = &ff_vaapi_decode_init,
  472. .uninit = &ff_vaapi_decode_uninit,
  473. .frame_params = &ff_vaapi_common_frame_params,
  474. .priv_data_size = sizeof(VAAPIDecodeContext),
  475. .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
  476. };
  477. #endif
  478. const AVHWAccel ff_vc1_vaapi_hwaccel = {
  479. .name = "vc1_vaapi",
  480. .type = AVMEDIA_TYPE_VIDEO,
  481. .id = AV_CODEC_ID_VC1,
  482. .pix_fmt = AV_PIX_FMT_VAAPI,
  483. .start_frame = &vaapi_vc1_start_frame,
  484. .end_frame = &vaapi_vc1_end_frame,
  485. .decode_slice = &vaapi_vc1_decode_slice,
  486. .frame_priv_data_size = sizeof(VAAPIDecodePicture),
  487. .init = &ff_vaapi_decode_init,
  488. .uninit = &ff_vaapi_decode_uninit,
  489. .frame_params = &ff_vaapi_common_frame_params,
  490. .priv_data_size = sizeof(VAAPIDecodeContext),
  491. .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
  492. };