You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1317 lines
42KB

  1. /*
  2. * This file is part of Libav.
  3. *
  4. * Libav is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * Libav is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with Libav; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <inttypes.h>
  19. #include <string.h>
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/common.h"
  22. #include "libavutil/log.h"
  23. #include "libavutil/pixdesc.h"
  24. #include "vaapi_encode.h"
  25. #include "avcodec.h"
  26. static const char *picture_type_name[] = { "IDR", "I", "P", "B" };
  27. static int vaapi_encode_make_packed_header(AVCodecContext *avctx,
  28. VAAPIEncodePicture *pic,
  29. int type, char *data, size_t bit_len)
  30. {
  31. VAAPIEncodeContext *ctx = avctx->priv_data;
  32. VAStatus vas;
  33. VABufferID param_buffer, data_buffer;
  34. VAEncPackedHeaderParameterBuffer params = {
  35. .type = type,
  36. .bit_length = bit_len,
  37. .has_emulation_bytes = 1,
  38. };
  39. av_assert0(pic->nb_param_buffers + 2 <= MAX_PARAM_BUFFERS);
  40. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  41. VAEncPackedHeaderParameterBufferType,
  42. sizeof(params), 1, &params, &param_buffer);
  43. if (vas != VA_STATUS_SUCCESS) {
  44. av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer "
  45. "for packed header (type %d): %d (%s).\n",
  46. type, vas, vaErrorStr(vas));
  47. return AVERROR(EIO);
  48. }
  49. pic->param_buffers[pic->nb_param_buffers++] = param_buffer;
  50. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  51. VAEncPackedHeaderDataBufferType,
  52. (bit_len + 7) / 8, 1, data, &data_buffer);
  53. if (vas != VA_STATUS_SUCCESS) {
  54. av_log(avctx, AV_LOG_ERROR, "Failed to create data buffer "
  55. "for packed header (type %d): %d (%s).\n",
  56. type, vas, vaErrorStr(vas));
  57. return AVERROR(EIO);
  58. }
  59. pic->param_buffers[pic->nb_param_buffers++] = data_buffer;
  60. av_log(avctx, AV_LOG_DEBUG, "Packed header buffer (%d) is %#x/%#x "
  61. "(%zu bits).\n", type, param_buffer, data_buffer, bit_len);
  62. return 0;
  63. }
  64. static int vaapi_encode_make_param_buffer(AVCodecContext *avctx,
  65. VAAPIEncodePicture *pic,
  66. int type, char *data, size_t len)
  67. {
  68. VAAPIEncodeContext *ctx = avctx->priv_data;
  69. VAStatus vas;
  70. VABufferID buffer;
  71. av_assert0(pic->nb_param_buffers + 1 <= MAX_PARAM_BUFFERS);
  72. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  73. type, len, 1, data, &buffer);
  74. if (vas != VA_STATUS_SUCCESS) {
  75. av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer "
  76. "(type %d): %d (%s).\n", type, vas, vaErrorStr(vas));
  77. return AVERROR(EIO);
  78. }
  79. pic->param_buffers[pic->nb_param_buffers++] = buffer;
  80. av_log(avctx, AV_LOG_DEBUG, "Param buffer (%d) is %#x.\n",
  81. type, buffer);
  82. return 0;
  83. }
  84. static int vaapi_encode_wait(AVCodecContext *avctx,
  85. VAAPIEncodePicture *pic)
  86. {
  87. VAAPIEncodeContext *ctx = avctx->priv_data;
  88. VAStatus vas;
  89. av_assert0(pic->encode_issued);
  90. if (pic->encode_complete) {
  91. // Already waited for this picture.
  92. return 0;
  93. }
  94. av_log(avctx, AV_LOG_DEBUG, "Sync to pic %"PRId64"/%"PRId64" "
  95. "(recon surface %#x).\n", pic->display_order,
  96. pic->encode_order, pic->recon_surface);
  97. vas = vaSyncSurface(ctx->hwctx->display, pic->recon_surface);
  98. if (vas != VA_STATUS_SUCCESS) {
  99. av_log(avctx, AV_LOG_ERROR, "Failed to sync to picture completion: "
  100. "%d (%s).\n", vas, vaErrorStr(vas));
  101. return AVERROR(EIO);
  102. }
  103. // Input is definitely finished with now.
  104. av_frame_free(&pic->input_image);
  105. pic->encode_complete = 1;
  106. return 0;
  107. }
  108. static int vaapi_encode_issue(AVCodecContext *avctx,
  109. VAAPIEncodePicture *pic)
  110. {
  111. VAAPIEncodeContext *ctx = avctx->priv_data;
  112. VAAPIEncodeSlice *slice;
  113. VAStatus vas;
  114. int err, i;
  115. char data[MAX_PARAM_BUFFER_SIZE];
  116. size_t bit_len;
  117. av_log(avctx, AV_LOG_DEBUG, "Issuing encode for pic %"PRId64"/%"PRId64" "
  118. "as type %s.\n", pic->display_order, pic->encode_order,
  119. picture_type_name[pic->type]);
  120. if (pic->nb_refs == 0) {
  121. av_log(avctx, AV_LOG_DEBUG, "No reference pictures.\n");
  122. } else {
  123. av_log(avctx, AV_LOG_DEBUG, "Refers to:");
  124. for (i = 0; i < pic->nb_refs; i++) {
  125. av_log(avctx, AV_LOG_DEBUG, " %"PRId64"/%"PRId64,
  126. pic->refs[i]->display_order, pic->refs[i]->encode_order);
  127. }
  128. av_log(avctx, AV_LOG_DEBUG, ".\n");
  129. }
  130. av_assert0(pic->input_available && !pic->encode_issued);
  131. for (i = 0; i < pic->nb_refs; i++) {
  132. av_assert0(pic->refs[i]);
  133. // If we are serialised then the references must have already
  134. // completed. If not, they must have been issued but need not
  135. // have completed yet.
  136. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING)
  137. av_assert0(pic->refs[i]->encode_complete);
  138. else
  139. av_assert0(pic->refs[i]->encode_issued);
  140. }
  141. av_log(avctx, AV_LOG_DEBUG, "Input surface is %#x.\n", pic->input_surface);
  142. pic->recon_image = av_frame_alloc();
  143. if (!pic->recon_image) {
  144. err = AVERROR(ENOMEM);
  145. goto fail;
  146. }
  147. err = av_hwframe_get_buffer(ctx->recon_frames_ref, pic->recon_image, 0);
  148. if (err < 0) {
  149. err = AVERROR(ENOMEM);
  150. goto fail;
  151. }
  152. pic->recon_surface = (VASurfaceID)(uintptr_t)pic->recon_image->data[3];
  153. av_log(avctx, AV_LOG_DEBUG, "Recon surface is %#x.\n", pic->recon_surface);
  154. pic->output_buffer_ref = av_buffer_pool_get(ctx->output_buffer_pool);
  155. if (!pic->output_buffer_ref) {
  156. err = AVERROR(ENOMEM);
  157. goto fail;
  158. }
  159. pic->output_buffer = (VABufferID)(uintptr_t)pic->output_buffer_ref->data;
  160. av_log(avctx, AV_LOG_DEBUG, "Output buffer is %#x.\n",
  161. pic->output_buffer);
  162. if (ctx->codec->picture_params_size > 0) {
  163. pic->codec_picture_params = av_malloc(ctx->codec->picture_params_size);
  164. if (!pic->codec_picture_params)
  165. goto fail;
  166. memcpy(pic->codec_picture_params, ctx->codec_picture_params,
  167. ctx->codec->picture_params_size);
  168. } else {
  169. av_assert0(!ctx->codec_picture_params);
  170. }
  171. pic->nb_param_buffers = 0;
  172. if (pic->encode_order == 0) {
  173. // Global parameter buffers are set on the first picture only.
  174. for (i = 0; i < ctx->nb_global_params; i++) {
  175. err = vaapi_encode_make_param_buffer(avctx, pic,
  176. VAEncMiscParameterBufferType,
  177. (char*)ctx->global_params[i],
  178. ctx->global_params_size[i]);
  179. if (err < 0)
  180. goto fail;
  181. }
  182. }
  183. if (pic->type == PICTURE_TYPE_IDR && ctx->codec->init_sequence_params) {
  184. err = vaapi_encode_make_param_buffer(avctx, pic,
  185. VAEncSequenceParameterBufferType,
  186. ctx->codec_sequence_params,
  187. ctx->codec->sequence_params_size);
  188. if (err < 0)
  189. goto fail;
  190. }
  191. if (ctx->codec->init_picture_params) {
  192. err = ctx->codec->init_picture_params(avctx, pic);
  193. if (err < 0) {
  194. av_log(avctx, AV_LOG_ERROR, "Failed to initialise picture "
  195. "parameters: %d.\n", err);
  196. goto fail;
  197. }
  198. err = vaapi_encode_make_param_buffer(avctx, pic,
  199. VAEncPictureParameterBufferType,
  200. pic->codec_picture_params,
  201. ctx->codec->picture_params_size);
  202. if (err < 0)
  203. goto fail;
  204. }
  205. if (pic->type == PICTURE_TYPE_IDR) {
  206. if (ctx->codec->write_sequence_header) {
  207. bit_len = 8 * sizeof(data);
  208. err = ctx->codec->write_sequence_header(avctx, data, &bit_len);
  209. if (err < 0) {
  210. av_log(avctx, AV_LOG_ERROR, "Failed to write per-sequence "
  211. "header: %d.\n", err);
  212. goto fail;
  213. }
  214. err = vaapi_encode_make_packed_header(avctx, pic,
  215. ctx->codec->sequence_header_type,
  216. data, bit_len);
  217. if (err < 0)
  218. goto fail;
  219. }
  220. }
  221. if (ctx->codec->write_picture_header) {
  222. bit_len = 8 * sizeof(data);
  223. err = ctx->codec->write_picture_header(avctx, pic, data, &bit_len);
  224. if (err < 0) {
  225. av_log(avctx, AV_LOG_ERROR, "Failed to write per-picture "
  226. "header: %d.\n", err);
  227. goto fail;
  228. }
  229. err = vaapi_encode_make_packed_header(avctx, pic,
  230. ctx->codec->picture_header_type,
  231. data, bit_len);
  232. if (err < 0)
  233. goto fail;
  234. }
  235. if (ctx->codec->write_extra_buffer) {
  236. for (i = 0;; i++) {
  237. size_t len = sizeof(data);
  238. int type;
  239. err = ctx->codec->write_extra_buffer(avctx, pic, i, &type,
  240. data, &len);
  241. if (err == AVERROR_EOF)
  242. break;
  243. if (err < 0) {
  244. av_log(avctx, AV_LOG_ERROR, "Failed to write extra "
  245. "buffer %d: %d.\n", i, err);
  246. goto fail;
  247. }
  248. err = vaapi_encode_make_param_buffer(avctx, pic, type,
  249. data, len);
  250. if (err < 0)
  251. goto fail;
  252. }
  253. }
  254. if (ctx->codec->write_extra_header) {
  255. for (i = 0;; i++) {
  256. int type;
  257. bit_len = 8 * sizeof(data);
  258. err = ctx->codec->write_extra_header(avctx, pic, i, &type,
  259. data, &bit_len);
  260. if (err == AVERROR_EOF)
  261. break;
  262. if (err < 0) {
  263. av_log(avctx, AV_LOG_ERROR, "Failed to write extra "
  264. "header %d: %d.\n", i, err);
  265. goto fail;
  266. }
  267. err = vaapi_encode_make_packed_header(avctx, pic, type,
  268. data, bit_len);
  269. if (err < 0)
  270. goto fail;
  271. }
  272. }
  273. av_assert0(pic->nb_slices <= MAX_PICTURE_SLICES);
  274. for (i = 0; i < pic->nb_slices; i++) {
  275. slice = av_mallocz(sizeof(*slice));
  276. if (!slice) {
  277. err = AVERROR(ENOMEM);
  278. goto fail;
  279. }
  280. pic->slices[i] = slice;
  281. if (ctx->codec->slice_params_size > 0) {
  282. slice->codec_slice_params = av_mallocz(ctx->codec->slice_params_size);
  283. if (!slice->codec_slice_params) {
  284. err = AVERROR(ENOMEM);
  285. goto fail;
  286. }
  287. }
  288. if (ctx->codec->init_slice_params) {
  289. err = ctx->codec->init_slice_params(avctx, pic, slice);
  290. if (err < 0) {
  291. av_log(avctx, AV_LOG_ERROR, "Failed to initalise slice "
  292. "parameters: %d.\n", err);
  293. goto fail;
  294. }
  295. }
  296. if (ctx->codec->write_slice_header) {
  297. bit_len = 8 * sizeof(data);
  298. err = ctx->codec->write_slice_header(avctx, pic, slice,
  299. data, &bit_len);
  300. if (err < 0) {
  301. av_log(avctx, AV_LOG_ERROR, "Failed to write per-slice "
  302. "header: %d.\n", err);
  303. goto fail;
  304. }
  305. err = vaapi_encode_make_packed_header(avctx, pic,
  306. ctx->codec->slice_header_type,
  307. data, bit_len);
  308. if (err < 0)
  309. goto fail;
  310. }
  311. if (ctx->codec->init_slice_params) {
  312. err = vaapi_encode_make_param_buffer(avctx, pic,
  313. VAEncSliceParameterBufferType,
  314. slice->codec_slice_params,
  315. ctx->codec->slice_params_size);
  316. if (err < 0)
  317. goto fail;
  318. }
  319. }
  320. vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context,
  321. pic->input_surface);
  322. if (vas != VA_STATUS_SUCCESS) {
  323. av_log(avctx, AV_LOG_ERROR, "Failed to begin picture encode issue: "
  324. "%d (%s).\n", vas, vaErrorStr(vas));
  325. err = AVERROR(EIO);
  326. goto fail_with_picture;
  327. }
  328. vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
  329. pic->param_buffers, pic->nb_param_buffers);
  330. if (vas != VA_STATUS_SUCCESS) {
  331. av_log(avctx, AV_LOG_ERROR, "Failed to upload encode parameters: "
  332. "%d (%s).\n", vas, vaErrorStr(vas));
  333. err = AVERROR(EIO);
  334. goto fail_with_picture;
  335. }
  336. vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
  337. if (vas != VA_STATUS_SUCCESS) {
  338. av_log(avctx, AV_LOG_ERROR, "Failed to end picture encode issue: "
  339. "%d (%s).\n", vas, vaErrorStr(vas));
  340. err = AVERROR(EIO);
  341. goto fail_at_end;
  342. }
  343. pic->encode_issued = 1;
  344. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING)
  345. return vaapi_encode_wait(avctx, pic);
  346. else
  347. return 0;
  348. fail_with_picture:
  349. vaEndPicture(ctx->hwctx->display, ctx->va_context);
  350. fail:
  351. for(i = 0; i < pic->nb_param_buffers; i++)
  352. vaDestroyBuffer(ctx->hwctx->display, pic->param_buffers[i]);
  353. fail_at_end:
  354. av_freep(&pic->codec_picture_params);
  355. av_frame_free(&pic->recon_image);
  356. return err;
  357. }
  358. static int vaapi_encode_output(AVCodecContext *avctx,
  359. VAAPIEncodePicture *pic, AVPacket *pkt)
  360. {
  361. VAAPIEncodeContext *ctx = avctx->priv_data;
  362. VACodedBufferSegment *buf_list, *buf;
  363. VAStatus vas;
  364. int err;
  365. err = vaapi_encode_wait(avctx, pic);
  366. if (err < 0)
  367. return err;
  368. buf_list = NULL;
  369. vas = vaMapBuffer(ctx->hwctx->display, pic->output_buffer,
  370. (void**)&buf_list);
  371. if (vas != VA_STATUS_SUCCESS) {
  372. av_log(avctx, AV_LOG_ERROR, "Failed to map output buffers: "
  373. "%d (%s).\n", vas, vaErrorStr(vas));
  374. err = AVERROR(EIO);
  375. goto fail;
  376. }
  377. for (buf = buf_list; buf; buf = buf->next) {
  378. av_log(avctx, AV_LOG_DEBUG, "Output buffer: %u bytes "
  379. "(status %08x).\n", buf->size, buf->status);
  380. err = av_new_packet(pkt, buf->size);
  381. if (err < 0)
  382. goto fail_mapped;
  383. memcpy(pkt->data, buf->buf, buf->size);
  384. }
  385. if (pic->type == PICTURE_TYPE_IDR)
  386. pkt->flags |= AV_PKT_FLAG_KEY;
  387. pkt->pts = pic->pts;
  388. vas = vaUnmapBuffer(ctx->hwctx->display, pic->output_buffer);
  389. if (vas != VA_STATUS_SUCCESS) {
  390. av_log(avctx, AV_LOG_ERROR, "Failed to unmap output buffers: "
  391. "%d (%s).\n", vas, vaErrorStr(vas));
  392. err = AVERROR(EIO);
  393. goto fail;
  394. }
  395. av_buffer_unref(&pic->output_buffer_ref);
  396. pic->output_buffer = VA_INVALID_ID;
  397. av_log(avctx, AV_LOG_DEBUG, "Output read for pic %"PRId64"/%"PRId64".\n",
  398. pic->display_order, pic->encode_order);
  399. return 0;
  400. fail_mapped:
  401. vaUnmapBuffer(ctx->hwctx->display, pic->output_buffer);
  402. fail:
  403. av_buffer_unref(&pic->output_buffer_ref);
  404. pic->output_buffer = VA_INVALID_ID;
  405. return err;
  406. }
  407. static int vaapi_encode_discard(AVCodecContext *avctx,
  408. VAAPIEncodePicture *pic)
  409. {
  410. vaapi_encode_wait(avctx, pic);
  411. if (pic->output_buffer_ref) {
  412. av_log(avctx, AV_LOG_DEBUG, "Discard output for pic "
  413. "%"PRId64"/%"PRId64".\n",
  414. pic->display_order, pic->encode_order);
  415. av_buffer_unref(&pic->output_buffer_ref);
  416. pic->output_buffer = VA_INVALID_ID;
  417. }
  418. return 0;
  419. }
  420. static VAAPIEncodePicture *vaapi_encode_alloc(void)
  421. {
  422. VAAPIEncodePicture *pic;
  423. pic = av_mallocz(sizeof(*pic));
  424. if (!pic)
  425. return NULL;
  426. pic->input_surface = VA_INVALID_ID;
  427. pic->recon_surface = VA_INVALID_ID;
  428. pic->output_buffer = VA_INVALID_ID;
  429. return pic;
  430. }
  431. static int vaapi_encode_free(AVCodecContext *avctx,
  432. VAAPIEncodePicture *pic)
  433. {
  434. int i;
  435. if (pic->encode_issued)
  436. vaapi_encode_discard(avctx, pic);
  437. for (i = 0; i < pic->nb_slices; i++) {
  438. av_freep(&pic->slices[i]->priv_data);
  439. av_freep(&pic->slices[i]->codec_slice_params);
  440. av_freep(&pic->slices[i]);
  441. }
  442. av_freep(&pic->codec_picture_params);
  443. av_frame_free(&pic->input_image);
  444. av_frame_free(&pic->recon_image);
  445. // Output buffer should already be destroyed.
  446. av_assert0(pic->output_buffer == VA_INVALID_ID);
  447. av_freep(&pic->priv_data);
  448. av_freep(&pic->codec_picture_params);
  449. av_free(pic);
  450. return 0;
  451. }
  452. static int vaapi_encode_step(AVCodecContext *avctx,
  453. VAAPIEncodePicture *target)
  454. {
  455. VAAPIEncodeContext *ctx = avctx->priv_data;
  456. VAAPIEncodePicture *pic;
  457. int i, err;
  458. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING ||
  459. ctx->issue_mode == ISSUE_MODE_MINIMISE_LATENCY) {
  460. // These two modes are equivalent, except that we wait for
  461. // immediate completion on each operation if serialised.
  462. if (!target) {
  463. // No target, nothing to do yet.
  464. return 0;
  465. }
  466. if (target->encode_complete) {
  467. // Already done.
  468. return 0;
  469. }
  470. pic = target;
  471. for (i = 0; i < pic->nb_refs; i++) {
  472. if (!pic->refs[i]->encode_complete) {
  473. err = vaapi_encode_step(avctx, pic->refs[i]);
  474. if (err < 0)
  475. return err;
  476. }
  477. }
  478. err = vaapi_encode_issue(avctx, pic);
  479. if (err < 0)
  480. return err;
  481. } else if (ctx->issue_mode == ISSUE_MODE_MAXIMISE_THROUGHPUT) {
  482. int activity;
  483. do {
  484. activity = 0;
  485. for (pic = ctx->pic_start; pic; pic = pic->next) {
  486. if (!pic->input_available || pic->encode_issued)
  487. continue;
  488. for (i = 0; i < pic->nb_refs; i++) {
  489. if (!pic->refs[i]->encode_issued)
  490. break;
  491. }
  492. if (i < pic->nb_refs)
  493. continue;
  494. err = vaapi_encode_issue(avctx, pic);
  495. if (err < 0)
  496. return err;
  497. activity = 1;
  498. }
  499. } while(activity);
  500. if (target) {
  501. av_assert0(target->encode_issued && "broken dependencies?");
  502. }
  503. } else {
  504. av_assert0(0);
  505. }
  506. return 0;
  507. }
  508. static int vaapi_encode_get_next(AVCodecContext *avctx,
  509. VAAPIEncodePicture **pic_out)
  510. {
  511. VAAPIEncodeContext *ctx = avctx->priv_data;
  512. VAAPIEncodePicture *start, *end, *pic;
  513. int i;
  514. for (pic = ctx->pic_start; pic; pic = pic->next) {
  515. if (pic->next)
  516. av_assert0(pic->display_order + 1 == pic->next->display_order);
  517. if (pic->display_order == ctx->input_order) {
  518. *pic_out = pic;
  519. return 0;
  520. }
  521. }
  522. if (ctx->input_order == 0) {
  523. // First frame is always an IDR frame.
  524. av_assert0(!ctx->pic_start && !ctx->pic_end);
  525. pic = vaapi_encode_alloc();
  526. if (!pic)
  527. return AVERROR(ENOMEM);
  528. pic->type = PICTURE_TYPE_IDR;
  529. pic->display_order = 0;
  530. pic->encode_order = 0;
  531. ctx->pic_start = ctx->pic_end = pic;
  532. *pic_out = pic;
  533. return 0;
  534. }
  535. pic = vaapi_encode_alloc();
  536. if (!pic)
  537. return AVERROR(ENOMEM);
  538. if (ctx->p_per_i == 0 || ctx->p_counter == ctx->p_per_i) {
  539. if (ctx->i_per_idr == 0 || ctx->i_counter == ctx->i_per_idr) {
  540. pic->type = PICTURE_TYPE_IDR;
  541. ctx->i_counter = 0;
  542. } else {
  543. pic->type = PICTURE_TYPE_I;
  544. ++ctx->i_counter;
  545. }
  546. ctx->p_counter = 0;
  547. } else {
  548. pic->type = PICTURE_TYPE_P;
  549. pic->refs[0] = ctx->pic_end;
  550. pic->nb_refs = 1;
  551. ++ctx->p_counter;
  552. }
  553. start = end = pic;
  554. if (pic->type != PICTURE_TYPE_IDR) {
  555. // If that was not an IDR frame, add B-frames display-before and
  556. // encode-after it.
  557. for (i = 0; i < ctx->b_per_p; i++) {
  558. pic = vaapi_encode_alloc();
  559. if (!pic)
  560. goto fail;
  561. pic->type = PICTURE_TYPE_B;
  562. pic->refs[0] = ctx->pic_end;
  563. pic->refs[1] = end;
  564. pic->nb_refs = 2;
  565. pic->next = start;
  566. pic->display_order = ctx->input_order + ctx->b_per_p - i - 1;
  567. pic->encode_order = pic->display_order + 1;
  568. start = pic;
  569. }
  570. }
  571. for (i = 0, pic = start; pic; i++, pic = pic->next) {
  572. pic->display_order = ctx->input_order + i;
  573. if (end->type == PICTURE_TYPE_IDR)
  574. pic->encode_order = ctx->input_order + i;
  575. else if (pic == end)
  576. pic->encode_order = ctx->input_order;
  577. else
  578. pic->encode_order = ctx->input_order + i + 1;
  579. }
  580. av_assert0(ctx->pic_end);
  581. ctx->pic_end->next = start;
  582. ctx->pic_end = end;
  583. *pic_out = start;
  584. av_log(avctx, AV_LOG_DEBUG, "Pictures:");
  585. for (pic = ctx->pic_start; pic; pic = pic->next) {
  586. av_log(avctx, AV_LOG_DEBUG, " %s (%"PRId64"/%"PRId64")",
  587. picture_type_name[pic->type],
  588. pic->display_order, pic->encode_order);
  589. }
  590. av_log(avctx, AV_LOG_DEBUG, "\n");
  591. return 0;
  592. fail:
  593. while (start) {
  594. pic = start->next;
  595. vaapi_encode_free(avctx, start);
  596. start = pic;
  597. }
  598. return AVERROR(ENOMEM);
  599. }
  600. static int vaapi_encode_mangle_end(AVCodecContext *avctx)
  601. {
  602. VAAPIEncodeContext *ctx = avctx->priv_data;
  603. VAAPIEncodePicture *pic, *last_pic, *next;
  604. // Find the last picture we actually have input for.
  605. for (pic = ctx->pic_start; pic; pic = pic->next) {
  606. if (!pic->input_available)
  607. break;
  608. last_pic = pic;
  609. }
  610. if (pic) {
  611. av_assert0(last_pic);
  612. if (last_pic->type == PICTURE_TYPE_B) {
  613. // Some fixing up is required. Change the type of this
  614. // picture to P, then modify preceding B references which
  615. // point beyond it to point at it instead.
  616. last_pic->type = PICTURE_TYPE_P;
  617. last_pic->encode_order = last_pic->refs[1]->encode_order;
  618. for (pic = ctx->pic_start; pic != last_pic; pic = pic->next) {
  619. if (pic->type == PICTURE_TYPE_B &&
  620. pic->refs[1] == last_pic->refs[1])
  621. pic->refs[1] = last_pic;
  622. }
  623. last_pic->nb_refs = 1;
  624. last_pic->refs[1] = NULL;
  625. } else {
  626. // We can use the current structure (no references point
  627. // beyond the end), but there are unused pics to discard.
  628. }
  629. // Discard all following pics, they will never be used.
  630. for (pic = last_pic->next; pic; pic = next) {
  631. next = pic->next;
  632. vaapi_encode_free(avctx, pic);
  633. }
  634. last_pic->next = NULL;
  635. ctx->pic_end = last_pic;
  636. } else {
  637. // Input is available for all pictures, so we don't need to
  638. // mangle anything.
  639. }
  640. av_log(avctx, AV_LOG_DEBUG, "Pictures at end of stream:");
  641. for (pic = ctx->pic_start; pic; pic = pic->next) {
  642. av_log(avctx, AV_LOG_DEBUG, " %s (%"PRId64"/%"PRId64")",
  643. picture_type_name[pic->type],
  644. pic->display_order, pic->encode_order);
  645. }
  646. av_log(avctx, AV_LOG_DEBUG, "\n");
  647. return 0;
  648. }
  649. static int vaapi_encode_clear_old(AVCodecContext *avctx)
  650. {
  651. VAAPIEncodeContext *ctx = avctx->priv_data;
  652. VAAPIEncodePicture *pic, *old;
  653. int i;
  654. while (ctx->pic_start != ctx->pic_end) {
  655. old = ctx->pic_start;
  656. if (old->encode_order > ctx->output_order)
  657. break;
  658. for (pic = old->next; pic; pic = pic->next) {
  659. if (pic->encode_complete)
  660. continue;
  661. for (i = 0; i < pic->nb_refs; i++) {
  662. if (pic->refs[i] == old) {
  663. // We still need this picture because it's referred to
  664. // directly by a later one, so it and all following
  665. // pictures have to stay.
  666. return 0;
  667. }
  668. }
  669. }
  670. pic = ctx->pic_start;
  671. ctx->pic_start = pic->next;
  672. vaapi_encode_free(avctx, pic);
  673. }
  674. return 0;
  675. }
  676. int ff_vaapi_encode2(AVCodecContext *avctx, AVPacket *pkt,
  677. const AVFrame *input_image, int *got_packet)
  678. {
  679. VAAPIEncodeContext *ctx = avctx->priv_data;
  680. VAAPIEncodePicture *pic;
  681. int err;
  682. if (input_image) {
  683. av_log(avctx, AV_LOG_DEBUG, "Encode frame: %ux%u (%"PRId64").\n",
  684. input_image->width, input_image->height, input_image->pts);
  685. err = vaapi_encode_get_next(avctx, &pic);
  686. if (err) {
  687. av_log(avctx, AV_LOG_ERROR, "Input setup failed: %d.\n", err);
  688. return err;
  689. }
  690. pic->input_image = av_frame_alloc();
  691. if (!pic->input_image) {
  692. err = AVERROR(ENOMEM);
  693. goto fail;
  694. }
  695. err = av_frame_ref(pic->input_image, input_image);
  696. if (err < 0)
  697. goto fail;
  698. pic->input_surface = (VASurfaceID)(uintptr_t)input_image->data[3];
  699. pic->pts = input_image->pts;
  700. if (ctx->input_order == 0)
  701. ctx->first_pts = pic->pts;
  702. if (ctx->input_order == ctx->decode_delay)
  703. ctx->dts_pts_diff = pic->pts - ctx->first_pts;
  704. if (ctx->output_delay > 0)
  705. ctx->ts_ring[ctx->input_order % (3 * ctx->output_delay)] = pic->pts;
  706. pic->input_available = 1;
  707. } else {
  708. if (!ctx->end_of_stream) {
  709. err = vaapi_encode_mangle_end(avctx);
  710. if (err < 0)
  711. goto fail;
  712. ctx->end_of_stream = 1;
  713. }
  714. }
  715. ++ctx->input_order;
  716. ++ctx->output_order;
  717. av_assert0(ctx->output_order + ctx->output_delay + 1 == ctx->input_order);
  718. for (pic = ctx->pic_start; pic; pic = pic->next)
  719. if (pic->encode_order == ctx->output_order)
  720. break;
  721. // pic can be null here if we don't have a specific target in this
  722. // iteration. We might still issue encodes if things can be overlapped,
  723. // even though we don't intend to output anything.
  724. err = vaapi_encode_step(avctx, pic);
  725. if (err < 0) {
  726. av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
  727. goto fail;
  728. }
  729. if (!pic) {
  730. *got_packet = 0;
  731. } else {
  732. err = vaapi_encode_output(avctx, pic, pkt);
  733. if (err < 0) {
  734. av_log(avctx, AV_LOG_ERROR, "Output failed: %d.\n", err);
  735. goto fail;
  736. }
  737. if (ctx->output_delay == 0) {
  738. pkt->dts = pkt->pts;
  739. } else if (ctx->output_order < ctx->decode_delay) {
  740. if (ctx->ts_ring[ctx->output_order] < INT64_MIN + ctx->dts_pts_diff)
  741. pkt->dts = INT64_MIN;
  742. else
  743. pkt->dts = ctx->ts_ring[ctx->output_order] - ctx->dts_pts_diff;
  744. } else {
  745. pkt->dts = ctx->ts_ring[(ctx->output_order - ctx->decode_delay) %
  746. (3 * ctx->output_delay)];
  747. }
  748. *got_packet = 1;
  749. }
  750. err = vaapi_encode_clear_old(avctx);
  751. if (err < 0) {
  752. av_log(avctx, AV_LOG_ERROR, "List clearing failed: %d.\n", err);
  753. goto fail;
  754. }
  755. return 0;
  756. fail:
  757. // Unclear what to clean up on failure. There are probably some things we
  758. // could do usefully clean up here, but for now just leave them for uninit()
  759. // to do instead.
  760. return err;
  761. }
  762. static av_cold int vaapi_encode_check_config(AVCodecContext *avctx)
  763. {
  764. VAAPIEncodeContext *ctx = avctx->priv_data;
  765. VAStatus vas;
  766. int i, n, err;
  767. VAProfile *profiles = NULL;
  768. VAEntrypoint *entrypoints = NULL;
  769. VAConfigAttrib attr[] = {
  770. { VAConfigAttribRateControl },
  771. { VAConfigAttribEncMaxRefFrames },
  772. };
  773. n = vaMaxNumProfiles(ctx->hwctx->display);
  774. profiles = av_malloc_array(n, sizeof(VAProfile));
  775. if (!profiles) {
  776. err = AVERROR(ENOMEM);
  777. goto fail;
  778. }
  779. vas = vaQueryConfigProfiles(ctx->hwctx->display, profiles, &n);
  780. if (vas != VA_STATUS_SUCCESS) {
  781. av_log(ctx, AV_LOG_ERROR, "Failed to query profiles: %d (%s).\n",
  782. vas, vaErrorStr(vas));
  783. err = AVERROR(ENOSYS);
  784. goto fail;
  785. }
  786. for (i = 0; i < n; i++) {
  787. if (profiles[i] == ctx->va_profile)
  788. break;
  789. }
  790. if (i >= n) {
  791. av_log(ctx, AV_LOG_ERROR, "Encoding profile not found (%d).\n",
  792. ctx->va_profile);
  793. err = AVERROR(ENOSYS);
  794. goto fail;
  795. }
  796. n = vaMaxNumEntrypoints(ctx->hwctx->display);
  797. entrypoints = av_malloc_array(n, sizeof(VAEntrypoint));
  798. if (!entrypoints) {
  799. err = AVERROR(ENOMEM);
  800. goto fail;
  801. }
  802. vas = vaQueryConfigEntrypoints(ctx->hwctx->display, ctx->va_profile,
  803. entrypoints, &n);
  804. if (vas != VA_STATUS_SUCCESS) {
  805. av_log(ctx, AV_LOG_ERROR, "Failed to query entrypoints for "
  806. "profile %u: %d (%s).\n", ctx->va_profile,
  807. vas, vaErrorStr(vas));
  808. err = AVERROR(ENOSYS);
  809. goto fail;
  810. }
  811. for (i = 0; i < n; i++) {
  812. if (entrypoints[i] == ctx->va_entrypoint)
  813. break;
  814. }
  815. if (i >= n) {
  816. av_log(ctx, AV_LOG_ERROR, "Encoding entrypoint not found "
  817. "(%d / %d).\n", ctx->va_profile, ctx->va_entrypoint);
  818. err = AVERROR(ENOSYS);
  819. goto fail;
  820. }
  821. vas = vaGetConfigAttributes(ctx->hwctx->display,
  822. ctx->va_profile, ctx->va_entrypoint,
  823. attr, FF_ARRAY_ELEMS(attr));
  824. if (vas != VA_STATUS_SUCCESS) {
  825. av_log(avctx, AV_LOG_ERROR, "Failed to fetch config "
  826. "attributes: %d (%s).\n", vas, vaErrorStr(vas));
  827. return AVERROR(EINVAL);
  828. }
  829. for (i = 0; i < FF_ARRAY_ELEMS(attr); i++) {
  830. if (attr[i].value == VA_ATTRIB_NOT_SUPPORTED) {
  831. // Unfortunately we have to treat this as "don't know" and hope
  832. // for the best, because the Intel MJPEG encoder returns this
  833. // for all the interesting attributes.
  834. continue;
  835. }
  836. switch (attr[i].type) {
  837. case VAConfigAttribRateControl:
  838. if (!(ctx->va_rc_mode & attr[i].value)) {
  839. av_log(avctx, AV_LOG_ERROR, "Rate control mode is not "
  840. "supported: %x\n", attr[i].value);
  841. err = AVERROR(EINVAL);
  842. goto fail;
  843. }
  844. break;
  845. case VAConfigAttribEncMaxRefFrames:
  846. {
  847. unsigned int ref_l0 = attr[i].value & 0xffff;
  848. unsigned int ref_l1 = (attr[i].value >> 16) & 0xffff;
  849. if (avctx->gop_size > 1 && ref_l0 < 1) {
  850. av_log(avctx, AV_LOG_ERROR, "P frames are not "
  851. "supported (%x).\n", attr[i].value);
  852. err = AVERROR(EINVAL);
  853. goto fail;
  854. }
  855. if (avctx->max_b_frames > 0 && ref_l1 < 1) {
  856. av_log(avctx, AV_LOG_ERROR, "B frames are not "
  857. "supported (%x).\n", attr[i].value);
  858. err = AVERROR(EINVAL);
  859. goto fail;
  860. }
  861. }
  862. break;
  863. }
  864. }
  865. err = 0;
  866. fail:
  867. av_freep(&profiles);
  868. av_freep(&entrypoints);
  869. return err;
  870. }
  871. static void vaapi_encode_free_output_buffer(void *opaque,
  872. uint8_t *data)
  873. {
  874. AVCodecContext *avctx = opaque;
  875. VAAPIEncodeContext *ctx = avctx->priv_data;
  876. VABufferID buffer_id;
  877. buffer_id = (VABufferID)(uintptr_t)data;
  878. vaDestroyBuffer(ctx->hwctx->display, buffer_id);
  879. av_log(avctx, AV_LOG_DEBUG, "Freed output buffer %#x\n", buffer_id);
  880. }
  881. static AVBufferRef *vaapi_encode_alloc_output_buffer(void *opaque,
  882. int size)
  883. {
  884. AVCodecContext *avctx = opaque;
  885. VAAPIEncodeContext *ctx = avctx->priv_data;
  886. VABufferID buffer_id;
  887. VAStatus vas;
  888. AVBufferRef *ref;
  889. // The output buffer size is fixed, so it needs to be large enough
  890. // to hold the largest possible compressed frame. We assume here
  891. // that the uncompressed frame plus some header data is an upper
  892. // bound on that.
  893. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  894. VAEncCodedBufferType,
  895. 3 * ctx->aligned_width * ctx->aligned_height +
  896. (1 << 16), 1, 0, &buffer_id);
  897. if (vas != VA_STATUS_SUCCESS) {
  898. av_log(avctx, AV_LOG_ERROR, "Failed to create bitstream "
  899. "output buffer: %d (%s).\n", vas, vaErrorStr(vas));
  900. return NULL;
  901. }
  902. av_log(avctx, AV_LOG_DEBUG, "Allocated output buffer %#x\n", buffer_id);
  903. ref = av_buffer_create((uint8_t*)(uintptr_t)buffer_id,
  904. sizeof(buffer_id),
  905. &vaapi_encode_free_output_buffer,
  906. avctx, AV_BUFFER_FLAG_READONLY);
  907. if (!ref) {
  908. vaDestroyBuffer(ctx->hwctx->display, buffer_id);
  909. return NULL;
  910. }
  911. return ref;
  912. }
  913. av_cold int ff_vaapi_encode_init(AVCodecContext *avctx,
  914. const VAAPIEncodeType *type)
  915. {
  916. VAAPIEncodeContext *ctx = avctx->priv_data;
  917. AVVAAPIFramesContext *recon_hwctx = NULL;
  918. AVVAAPIHWConfig *hwconfig = NULL;
  919. AVHWFramesConstraints *constraints = NULL;
  920. enum AVPixelFormat recon_format;
  921. VAStatus vas;
  922. int err, i;
  923. if (!avctx->hw_frames_ctx) {
  924. av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
  925. "required to associate the encoding device.\n");
  926. return AVERROR(EINVAL);
  927. }
  928. ctx->codec = type;
  929. ctx->codec_options = ctx->codec_options_data;
  930. ctx->va_config = VA_INVALID_ID;
  931. ctx->va_context = VA_INVALID_ID;
  932. ctx->priv_data = av_mallocz(type->priv_data_size);
  933. if (!ctx->priv_data) {
  934. err = AVERROR(ENOMEM);
  935. goto fail;
  936. }
  937. ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
  938. if (!ctx->input_frames_ref) {
  939. err = AVERROR(ENOMEM);
  940. goto fail;
  941. }
  942. ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data;
  943. ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
  944. if (!ctx->device_ref) {
  945. err = AVERROR(ENOMEM);
  946. goto fail;
  947. }
  948. ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
  949. ctx->hwctx = ctx->device->hwctx;
  950. err = ctx->codec->init(avctx);
  951. if (err < 0)
  952. goto fail;
  953. err = vaapi_encode_check_config(avctx);
  954. if (err < 0)
  955. goto fail;
  956. vas = vaCreateConfig(ctx->hwctx->display,
  957. ctx->va_profile, ctx->va_entrypoint,
  958. ctx->config_attributes, ctx->nb_config_attributes,
  959. &ctx->va_config);
  960. if (vas != VA_STATUS_SUCCESS) {
  961. av_log(avctx, AV_LOG_ERROR, "Failed to create encode pipeline "
  962. "configuration: %d (%s).\n", vas, vaErrorStr(vas));
  963. err = AVERROR(EIO);
  964. goto fail;
  965. }
  966. hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
  967. if (!hwconfig) {
  968. err = AVERROR(ENOMEM);
  969. goto fail;
  970. }
  971. hwconfig->config_id = ctx->va_config;
  972. constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
  973. hwconfig);
  974. if (!constraints) {
  975. err = AVERROR(ENOMEM);
  976. goto fail;
  977. }
  978. // Probably we can use the input surface format as the surface format
  979. // of the reconstructed frames. If not, we just pick the first (only?)
  980. // format in the valid list and hope that it all works.
  981. recon_format = AV_PIX_FMT_NONE;
  982. if (constraints->valid_sw_formats) {
  983. for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
  984. if (ctx->input_frames->sw_format ==
  985. constraints->valid_sw_formats[i]) {
  986. recon_format = ctx->input_frames->sw_format;
  987. break;
  988. }
  989. }
  990. if (recon_format == AV_PIX_FMT_NONE) {
  991. // No match. Just use the first in the supported list and
  992. // hope for the best.
  993. recon_format = constraints->valid_sw_formats[0];
  994. }
  995. } else {
  996. // No idea what to use; copy input format.
  997. recon_format = ctx->input_frames->sw_format;
  998. }
  999. av_log(avctx, AV_LOG_DEBUG, "Using %s as format of "
  1000. "reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
  1001. if (ctx->aligned_width < constraints->min_width ||
  1002. ctx->aligned_height < constraints->min_height ||
  1003. ctx->aligned_width > constraints->max_width ||
  1004. ctx->aligned_height > constraints->max_height) {
  1005. av_log(avctx, AV_LOG_ERROR, "Hardware does not support encoding at "
  1006. "size %dx%d (constraints: width %d-%d height %d-%d).\n",
  1007. ctx->aligned_width, ctx->aligned_height,
  1008. constraints->min_width, constraints->max_width,
  1009. constraints->min_height, constraints->max_height);
  1010. err = AVERROR(EINVAL);
  1011. goto fail;
  1012. }
  1013. av_freep(&hwconfig);
  1014. av_hwframe_constraints_free(&constraints);
  1015. ctx->recon_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
  1016. if (!ctx->recon_frames_ref) {
  1017. err = AVERROR(ENOMEM);
  1018. goto fail;
  1019. }
  1020. ctx->recon_frames = (AVHWFramesContext*)ctx->recon_frames_ref->data;
  1021. ctx->recon_frames->format = AV_PIX_FMT_VAAPI;
  1022. ctx->recon_frames->sw_format = recon_format;
  1023. ctx->recon_frames->width = ctx->aligned_width;
  1024. ctx->recon_frames->height = ctx->aligned_height;
  1025. ctx->recon_frames->initial_pool_size = ctx->nb_recon_frames;
  1026. err = av_hwframe_ctx_init(ctx->recon_frames_ref);
  1027. if (err < 0) {
  1028. av_log(avctx, AV_LOG_ERROR, "Failed to initialise reconstructed "
  1029. "frame context: %d.\n", err);
  1030. goto fail;
  1031. }
  1032. recon_hwctx = ctx->recon_frames->hwctx;
  1033. vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
  1034. ctx->aligned_width, ctx->aligned_height,
  1035. VA_PROGRESSIVE,
  1036. recon_hwctx->surface_ids,
  1037. recon_hwctx->nb_surfaces,
  1038. &ctx->va_context);
  1039. if (vas != VA_STATUS_SUCCESS) {
  1040. av_log(avctx, AV_LOG_ERROR, "Failed to create encode pipeline "
  1041. "context: %d (%s).\n", vas, vaErrorStr(vas));
  1042. err = AVERROR(EIO);
  1043. goto fail;
  1044. }
  1045. ctx->input_order = 0;
  1046. ctx->output_delay = avctx->max_b_frames;
  1047. ctx->decode_delay = 1;
  1048. ctx->output_order = - ctx->output_delay - 1;
  1049. if (ctx->codec->sequence_params_size > 0) {
  1050. ctx->codec_sequence_params =
  1051. av_mallocz(ctx->codec->sequence_params_size);
  1052. if (!ctx->codec_sequence_params) {
  1053. err = AVERROR(ENOMEM);
  1054. goto fail;
  1055. }
  1056. }
  1057. if (ctx->codec->picture_params_size > 0) {
  1058. ctx->codec_picture_params =
  1059. av_mallocz(ctx->codec->picture_params_size);
  1060. if (!ctx->codec_picture_params) {
  1061. err = AVERROR(ENOMEM);
  1062. goto fail;
  1063. }
  1064. }
  1065. if (ctx->codec->init_sequence_params) {
  1066. err = ctx->codec->init_sequence_params(avctx);
  1067. if (err < 0) {
  1068. av_log(avctx, AV_LOG_ERROR, "Codec sequence initialisation "
  1069. "failed: %d.\n", err);
  1070. goto fail;
  1071. }
  1072. }
  1073. ctx->output_buffer_pool =
  1074. av_buffer_pool_init2(sizeof(VABufferID), avctx,
  1075. &vaapi_encode_alloc_output_buffer, NULL);
  1076. if (!ctx->output_buffer_pool) {
  1077. err = AVERROR(ENOMEM);
  1078. goto fail;
  1079. }
  1080. // All I are IDR for now.
  1081. ctx->i_per_idr = 0;
  1082. ctx->p_per_i = ((avctx->gop_size + avctx->max_b_frames) /
  1083. (avctx->max_b_frames + 1));
  1084. ctx->b_per_p = avctx->max_b_frames;
  1085. // This should be configurable somehow. (Needs testing on a machine
  1086. // where it actually overlaps properly, though.)
  1087. ctx->issue_mode = ISSUE_MODE_MAXIMISE_THROUGHPUT;
  1088. return 0;
  1089. fail:
  1090. av_freep(&hwconfig);
  1091. av_hwframe_constraints_free(&constraints);
  1092. ff_vaapi_encode_close(avctx);
  1093. return err;
  1094. }
  1095. av_cold int ff_vaapi_encode_close(AVCodecContext *avctx)
  1096. {
  1097. VAAPIEncodeContext *ctx = avctx->priv_data;
  1098. VAAPIEncodePicture *pic, *next;
  1099. for (pic = ctx->pic_start; pic; pic = next) {
  1100. next = pic->next;
  1101. vaapi_encode_free(avctx, pic);
  1102. }
  1103. if (ctx->va_context != VA_INVALID_ID) {
  1104. vaDestroyContext(ctx->hwctx->display, ctx->va_context);
  1105. ctx->va_context = VA_INVALID_ID;
  1106. }
  1107. if (ctx->va_config != VA_INVALID_ID) {
  1108. vaDestroyConfig(ctx->hwctx->display, ctx->va_config);
  1109. ctx->va_config = VA_INVALID_ID;
  1110. }
  1111. if (ctx->codec->close)
  1112. ctx->codec->close(avctx);
  1113. av_buffer_pool_uninit(&ctx->output_buffer_pool);
  1114. av_freep(&ctx->codec_sequence_params);
  1115. av_freep(&ctx->codec_picture_params);
  1116. av_buffer_unref(&ctx->recon_frames_ref);
  1117. av_buffer_unref(&ctx->input_frames_ref);
  1118. av_buffer_unref(&ctx->device_ref);
  1119. av_freep(&ctx->priv_data);
  1120. return 0;
  1121. }