You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1337 lines
43KB

  1. /*
  2. * This file is part of Libav.
  3. *
  4. * Libav is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * Libav is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with Libav; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <inttypes.h>
  19. #include <string.h>
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/common.h"
  22. #include "libavutil/log.h"
  23. #include "libavutil/pixdesc.h"
  24. #include "vaapi_encode.h"
  25. #include "avcodec.h"
  26. static const char *picture_type_name[] = { "IDR", "I", "P", "B" };
  27. static int vaapi_encode_make_packed_header(AVCodecContext *avctx,
  28. VAAPIEncodePicture *pic,
  29. int type, char *data, size_t bit_len)
  30. {
  31. VAAPIEncodeContext *ctx = avctx->priv_data;
  32. VAStatus vas;
  33. VABufferID param_buffer, data_buffer;
  34. VAEncPackedHeaderParameterBuffer params = {
  35. .type = type,
  36. .bit_length = bit_len,
  37. .has_emulation_bytes = 1,
  38. };
  39. av_assert0(pic->nb_param_buffers + 2 <= MAX_PARAM_BUFFERS);
  40. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  41. VAEncPackedHeaderParameterBufferType,
  42. sizeof(params), 1, &params, &param_buffer);
  43. if (vas != VA_STATUS_SUCCESS) {
  44. av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer "
  45. "for packed header (type %d): %d (%s).\n",
  46. type, vas, vaErrorStr(vas));
  47. return AVERROR(EIO);
  48. }
  49. pic->param_buffers[pic->nb_param_buffers++] = param_buffer;
  50. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  51. VAEncPackedHeaderDataBufferType,
  52. (bit_len + 7) / 8, 1, data, &data_buffer);
  53. if (vas != VA_STATUS_SUCCESS) {
  54. av_log(avctx, AV_LOG_ERROR, "Failed to create data buffer "
  55. "for packed header (type %d): %d (%s).\n",
  56. type, vas, vaErrorStr(vas));
  57. return AVERROR(EIO);
  58. }
  59. pic->param_buffers[pic->nb_param_buffers++] = data_buffer;
  60. av_log(avctx, AV_LOG_DEBUG, "Packed header buffer (%d) is %#x/%#x "
  61. "(%zu bits).\n", type, param_buffer, data_buffer, bit_len);
  62. return 0;
  63. }
  64. static int vaapi_encode_make_param_buffer(AVCodecContext *avctx,
  65. VAAPIEncodePicture *pic,
  66. int type, char *data, size_t len)
  67. {
  68. VAAPIEncodeContext *ctx = avctx->priv_data;
  69. VAStatus vas;
  70. VABufferID buffer;
  71. av_assert0(pic->nb_param_buffers + 1 <= MAX_PARAM_BUFFERS);
  72. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  73. type, len, 1, data, &buffer);
  74. if (vas != VA_STATUS_SUCCESS) {
  75. av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer "
  76. "(type %d): %d (%s).\n", type, vas, vaErrorStr(vas));
  77. return AVERROR(EIO);
  78. }
  79. pic->param_buffers[pic->nb_param_buffers++] = buffer;
  80. av_log(avctx, AV_LOG_DEBUG, "Param buffer (%d) is %#x.\n",
  81. type, buffer);
  82. return 0;
  83. }
  84. static int vaapi_encode_wait(AVCodecContext *avctx,
  85. VAAPIEncodePicture *pic)
  86. {
  87. VAAPIEncodeContext *ctx = avctx->priv_data;
  88. VAStatus vas;
  89. av_assert0(pic->encode_issued);
  90. if (pic->encode_complete) {
  91. // Already waited for this picture.
  92. return 0;
  93. }
  94. av_log(avctx, AV_LOG_DEBUG, "Sync to pic %"PRId64"/%"PRId64" "
  95. "(recon surface %#x).\n", pic->display_order,
  96. pic->encode_order, pic->recon_surface);
  97. vas = vaSyncSurface(ctx->hwctx->display, pic->recon_surface);
  98. if (vas != VA_STATUS_SUCCESS) {
  99. av_log(avctx, AV_LOG_ERROR, "Failed to sync to picture completion: "
  100. "%d (%s).\n", vas, vaErrorStr(vas));
  101. return AVERROR(EIO);
  102. }
  103. // Input is definitely finished with now.
  104. av_frame_free(&pic->input_image);
  105. pic->encode_complete = 1;
  106. return 0;
  107. }
  108. static int vaapi_encode_issue(AVCodecContext *avctx,
  109. VAAPIEncodePicture *pic)
  110. {
  111. VAAPIEncodeContext *ctx = avctx->priv_data;
  112. VAAPIEncodeSlice *slice;
  113. VAStatus vas;
  114. int err, i;
  115. char data[MAX_PARAM_BUFFER_SIZE];
  116. size_t bit_len;
  117. av_log(avctx, AV_LOG_DEBUG, "Issuing encode for pic %"PRId64"/%"PRId64" "
  118. "as type %s.\n", pic->display_order, pic->encode_order,
  119. picture_type_name[pic->type]);
  120. if (pic->nb_refs == 0) {
  121. av_log(avctx, AV_LOG_DEBUG, "No reference pictures.\n");
  122. } else {
  123. av_log(avctx, AV_LOG_DEBUG, "Refers to:");
  124. for (i = 0; i < pic->nb_refs; i++) {
  125. av_log(avctx, AV_LOG_DEBUG, " %"PRId64"/%"PRId64,
  126. pic->refs[i]->display_order, pic->refs[i]->encode_order);
  127. }
  128. av_log(avctx, AV_LOG_DEBUG, ".\n");
  129. }
  130. av_assert0(pic->input_available && !pic->encode_issued);
  131. for (i = 0; i < pic->nb_refs; i++) {
  132. av_assert0(pic->refs[i]);
  133. // If we are serialised then the references must have already
  134. // completed. If not, they must have been issued but need not
  135. // have completed yet.
  136. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING)
  137. av_assert0(pic->refs[i]->encode_complete);
  138. else
  139. av_assert0(pic->refs[i]->encode_issued);
  140. }
  141. av_log(avctx, AV_LOG_DEBUG, "Input surface is %#x.\n", pic->input_surface);
  142. pic->recon_image = av_frame_alloc();
  143. if (!pic->recon_image) {
  144. err = AVERROR(ENOMEM);
  145. goto fail;
  146. }
  147. err = av_hwframe_get_buffer(ctx->recon_frames_ref, pic->recon_image, 0);
  148. if (err < 0) {
  149. err = AVERROR(ENOMEM);
  150. goto fail;
  151. }
  152. pic->recon_surface = (VASurfaceID)(uintptr_t)pic->recon_image->data[3];
  153. av_log(avctx, AV_LOG_DEBUG, "Recon surface is %#x.\n", pic->recon_surface);
  154. pic->output_buffer_ref = av_buffer_pool_get(ctx->output_buffer_pool);
  155. if (!pic->output_buffer_ref) {
  156. err = AVERROR(ENOMEM);
  157. goto fail;
  158. }
  159. pic->output_buffer = (VABufferID)(uintptr_t)pic->output_buffer_ref->data;
  160. av_log(avctx, AV_LOG_DEBUG, "Output buffer is %#x.\n",
  161. pic->output_buffer);
  162. if (ctx->codec->picture_params_size > 0) {
  163. pic->codec_picture_params = av_malloc(ctx->codec->picture_params_size);
  164. if (!pic->codec_picture_params)
  165. goto fail;
  166. memcpy(pic->codec_picture_params, ctx->codec_picture_params,
  167. ctx->codec->picture_params_size);
  168. } else {
  169. av_assert0(!ctx->codec_picture_params);
  170. }
  171. pic->nb_param_buffers = 0;
  172. if (pic->encode_order == 0) {
  173. // Global parameter buffers are set on the first picture only.
  174. for (i = 0; i < ctx->nb_global_params; i++) {
  175. err = vaapi_encode_make_param_buffer(avctx, pic,
  176. VAEncMiscParameterBufferType,
  177. (char*)ctx->global_params[i],
  178. ctx->global_params_size[i]);
  179. if (err < 0)
  180. goto fail;
  181. }
  182. }
  183. if (pic->type == PICTURE_TYPE_IDR && ctx->codec->init_sequence_params) {
  184. err = vaapi_encode_make_param_buffer(avctx, pic,
  185. VAEncSequenceParameterBufferType,
  186. ctx->codec_sequence_params,
  187. ctx->codec->sequence_params_size);
  188. if (err < 0)
  189. goto fail;
  190. }
  191. if (ctx->codec->init_picture_params) {
  192. err = ctx->codec->init_picture_params(avctx, pic);
  193. if (err < 0) {
  194. av_log(avctx, AV_LOG_ERROR, "Failed to initialise picture "
  195. "parameters: %d.\n", err);
  196. goto fail;
  197. }
  198. err = vaapi_encode_make_param_buffer(avctx, pic,
  199. VAEncPictureParameterBufferType,
  200. pic->codec_picture_params,
  201. ctx->codec->picture_params_size);
  202. if (err < 0)
  203. goto fail;
  204. }
  205. if (pic->type == PICTURE_TYPE_IDR) {
  206. if (ctx->codec->write_sequence_header) {
  207. bit_len = 8 * sizeof(data);
  208. err = ctx->codec->write_sequence_header(avctx, data, &bit_len);
  209. if (err < 0) {
  210. av_log(avctx, AV_LOG_ERROR, "Failed to write per-sequence "
  211. "header: %d.\n", err);
  212. goto fail;
  213. }
  214. err = vaapi_encode_make_packed_header(avctx, pic,
  215. ctx->codec->sequence_header_type,
  216. data, bit_len);
  217. if (err < 0)
  218. goto fail;
  219. }
  220. }
  221. if (ctx->codec->write_picture_header) {
  222. bit_len = 8 * sizeof(data);
  223. err = ctx->codec->write_picture_header(avctx, pic, data, &bit_len);
  224. if (err < 0) {
  225. av_log(avctx, AV_LOG_ERROR, "Failed to write per-picture "
  226. "header: %d.\n", err);
  227. goto fail;
  228. }
  229. err = vaapi_encode_make_packed_header(avctx, pic,
  230. ctx->codec->picture_header_type,
  231. data, bit_len);
  232. if (err < 0)
  233. goto fail;
  234. }
  235. if (ctx->codec->write_extra_buffer) {
  236. for (i = 0;; i++) {
  237. size_t len = sizeof(data);
  238. int type;
  239. err = ctx->codec->write_extra_buffer(avctx, pic, i, &type,
  240. data, &len);
  241. if (err == AVERROR_EOF)
  242. break;
  243. if (err < 0) {
  244. av_log(avctx, AV_LOG_ERROR, "Failed to write extra "
  245. "buffer %d: %d.\n", i, err);
  246. goto fail;
  247. }
  248. err = vaapi_encode_make_param_buffer(avctx, pic, type,
  249. data, len);
  250. if (err < 0)
  251. goto fail;
  252. }
  253. }
  254. if (ctx->codec->write_extra_header) {
  255. for (i = 0;; i++) {
  256. int type;
  257. bit_len = 8 * sizeof(data);
  258. err = ctx->codec->write_extra_header(avctx, pic, i, &type,
  259. data, &bit_len);
  260. if (err == AVERROR_EOF)
  261. break;
  262. if (err < 0) {
  263. av_log(avctx, AV_LOG_ERROR, "Failed to write extra "
  264. "header %d: %d.\n", i, err);
  265. goto fail;
  266. }
  267. err = vaapi_encode_make_packed_header(avctx, pic, type,
  268. data, bit_len);
  269. if (err < 0)
  270. goto fail;
  271. }
  272. }
  273. av_assert0(pic->nb_slices <= MAX_PICTURE_SLICES);
  274. for (i = 0; i < pic->nb_slices; i++) {
  275. slice = av_mallocz(sizeof(*slice));
  276. if (!slice) {
  277. err = AVERROR(ENOMEM);
  278. goto fail;
  279. }
  280. pic->slices[i] = slice;
  281. if (ctx->codec->slice_params_size > 0) {
  282. slice->codec_slice_params = av_mallocz(ctx->codec->slice_params_size);
  283. if (!slice->codec_slice_params) {
  284. err = AVERROR(ENOMEM);
  285. goto fail;
  286. }
  287. }
  288. if (ctx->codec->init_slice_params) {
  289. err = ctx->codec->init_slice_params(avctx, pic, slice);
  290. if (err < 0) {
  291. av_log(avctx, AV_LOG_ERROR, "Failed to initalise slice "
  292. "parameters: %d.\n", err);
  293. goto fail;
  294. }
  295. }
  296. if (ctx->codec->write_slice_header) {
  297. bit_len = 8 * sizeof(data);
  298. err = ctx->codec->write_slice_header(avctx, pic, slice,
  299. data, &bit_len);
  300. if (err < 0) {
  301. av_log(avctx, AV_LOG_ERROR, "Failed to write per-slice "
  302. "header: %d.\n", err);
  303. goto fail;
  304. }
  305. err = vaapi_encode_make_packed_header(avctx, pic,
  306. ctx->codec->slice_header_type,
  307. data, bit_len);
  308. if (err < 0)
  309. goto fail;
  310. }
  311. if (ctx->codec->init_slice_params) {
  312. err = vaapi_encode_make_param_buffer(avctx, pic,
  313. VAEncSliceParameterBufferType,
  314. slice->codec_slice_params,
  315. ctx->codec->slice_params_size);
  316. if (err < 0)
  317. goto fail;
  318. }
  319. }
  320. vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context,
  321. pic->input_surface);
  322. if (vas != VA_STATUS_SUCCESS) {
  323. av_log(avctx, AV_LOG_ERROR, "Failed to begin picture encode issue: "
  324. "%d (%s).\n", vas, vaErrorStr(vas));
  325. err = AVERROR(EIO);
  326. goto fail_with_picture;
  327. }
  328. vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
  329. pic->param_buffers, pic->nb_param_buffers);
  330. if (vas != VA_STATUS_SUCCESS) {
  331. av_log(avctx, AV_LOG_ERROR, "Failed to upload encode parameters: "
  332. "%d (%s).\n", vas, vaErrorStr(vas));
  333. err = AVERROR(EIO);
  334. goto fail_with_picture;
  335. }
  336. vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
  337. if (vas != VA_STATUS_SUCCESS) {
  338. av_log(avctx, AV_LOG_ERROR, "Failed to end picture encode issue: "
  339. "%d (%s).\n", vas, vaErrorStr(vas));
  340. err = AVERROR(EIO);
  341. // vaRenderPicture() has been called here, so we should not destroy
  342. // the parameter buffers unless separate destruction is required.
  343. if (ctx->hwctx->driver_quirks &
  344. AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS)
  345. goto fail;
  346. else
  347. goto fail_at_end;
  348. }
  349. if (ctx->hwctx->driver_quirks &
  350. AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) {
  351. for (i = 0; i < pic->nb_param_buffers; i++) {
  352. vas = vaDestroyBuffer(ctx->hwctx->display,
  353. pic->param_buffers[i]);
  354. if (vas != VA_STATUS_SUCCESS) {
  355. av_log(avctx, AV_LOG_ERROR, "Failed to destroy "
  356. "param buffer %#x: %d (%s).\n",
  357. pic->param_buffers[i], vas, vaErrorStr(vas));
  358. // And ignore.
  359. }
  360. }
  361. }
  362. pic->encode_issued = 1;
  363. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING)
  364. return vaapi_encode_wait(avctx, pic);
  365. else
  366. return 0;
  367. fail_with_picture:
  368. vaEndPicture(ctx->hwctx->display, ctx->va_context);
  369. fail:
  370. for(i = 0; i < pic->nb_param_buffers; i++)
  371. vaDestroyBuffer(ctx->hwctx->display, pic->param_buffers[i]);
  372. fail_at_end:
  373. av_freep(&pic->codec_picture_params);
  374. av_frame_free(&pic->recon_image);
  375. return err;
  376. }
  377. static int vaapi_encode_output(AVCodecContext *avctx,
  378. VAAPIEncodePicture *pic, AVPacket *pkt)
  379. {
  380. VAAPIEncodeContext *ctx = avctx->priv_data;
  381. VACodedBufferSegment *buf_list, *buf;
  382. VAStatus vas;
  383. int err;
  384. err = vaapi_encode_wait(avctx, pic);
  385. if (err < 0)
  386. return err;
  387. buf_list = NULL;
  388. vas = vaMapBuffer(ctx->hwctx->display, pic->output_buffer,
  389. (void**)&buf_list);
  390. if (vas != VA_STATUS_SUCCESS) {
  391. av_log(avctx, AV_LOG_ERROR, "Failed to map output buffers: "
  392. "%d (%s).\n", vas, vaErrorStr(vas));
  393. err = AVERROR(EIO);
  394. goto fail;
  395. }
  396. for (buf = buf_list; buf; buf = buf->next) {
  397. av_log(avctx, AV_LOG_DEBUG, "Output buffer: %u bytes "
  398. "(status %08x).\n", buf->size, buf->status);
  399. err = av_new_packet(pkt, buf->size);
  400. if (err < 0)
  401. goto fail_mapped;
  402. memcpy(pkt->data, buf->buf, buf->size);
  403. }
  404. if (pic->type == PICTURE_TYPE_IDR)
  405. pkt->flags |= AV_PKT_FLAG_KEY;
  406. pkt->pts = pic->pts;
  407. vas = vaUnmapBuffer(ctx->hwctx->display, pic->output_buffer);
  408. if (vas != VA_STATUS_SUCCESS) {
  409. av_log(avctx, AV_LOG_ERROR, "Failed to unmap output buffers: "
  410. "%d (%s).\n", vas, vaErrorStr(vas));
  411. err = AVERROR(EIO);
  412. goto fail;
  413. }
  414. av_buffer_unref(&pic->output_buffer_ref);
  415. pic->output_buffer = VA_INVALID_ID;
  416. av_log(avctx, AV_LOG_DEBUG, "Output read for pic %"PRId64"/%"PRId64".\n",
  417. pic->display_order, pic->encode_order);
  418. return 0;
  419. fail_mapped:
  420. vaUnmapBuffer(ctx->hwctx->display, pic->output_buffer);
  421. fail:
  422. av_buffer_unref(&pic->output_buffer_ref);
  423. pic->output_buffer = VA_INVALID_ID;
  424. return err;
  425. }
  426. static int vaapi_encode_discard(AVCodecContext *avctx,
  427. VAAPIEncodePicture *pic)
  428. {
  429. vaapi_encode_wait(avctx, pic);
  430. if (pic->output_buffer_ref) {
  431. av_log(avctx, AV_LOG_DEBUG, "Discard output for pic "
  432. "%"PRId64"/%"PRId64".\n",
  433. pic->display_order, pic->encode_order);
  434. av_buffer_unref(&pic->output_buffer_ref);
  435. pic->output_buffer = VA_INVALID_ID;
  436. }
  437. return 0;
  438. }
  439. static VAAPIEncodePicture *vaapi_encode_alloc(void)
  440. {
  441. VAAPIEncodePicture *pic;
  442. pic = av_mallocz(sizeof(*pic));
  443. if (!pic)
  444. return NULL;
  445. pic->input_surface = VA_INVALID_ID;
  446. pic->recon_surface = VA_INVALID_ID;
  447. pic->output_buffer = VA_INVALID_ID;
  448. return pic;
  449. }
  450. static int vaapi_encode_free(AVCodecContext *avctx,
  451. VAAPIEncodePicture *pic)
  452. {
  453. int i;
  454. if (pic->encode_issued)
  455. vaapi_encode_discard(avctx, pic);
  456. for (i = 0; i < pic->nb_slices; i++) {
  457. av_freep(&pic->slices[i]->priv_data);
  458. av_freep(&pic->slices[i]->codec_slice_params);
  459. av_freep(&pic->slices[i]);
  460. }
  461. av_freep(&pic->codec_picture_params);
  462. av_frame_free(&pic->input_image);
  463. av_frame_free(&pic->recon_image);
  464. // Output buffer should already be destroyed.
  465. av_assert0(pic->output_buffer == VA_INVALID_ID);
  466. av_freep(&pic->priv_data);
  467. av_freep(&pic->codec_picture_params);
  468. av_free(pic);
  469. return 0;
  470. }
  471. static int vaapi_encode_step(AVCodecContext *avctx,
  472. VAAPIEncodePicture *target)
  473. {
  474. VAAPIEncodeContext *ctx = avctx->priv_data;
  475. VAAPIEncodePicture *pic;
  476. int i, err;
  477. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING ||
  478. ctx->issue_mode == ISSUE_MODE_MINIMISE_LATENCY) {
  479. // These two modes are equivalent, except that we wait for
  480. // immediate completion on each operation if serialised.
  481. if (!target) {
  482. // No target, nothing to do yet.
  483. return 0;
  484. }
  485. if (target->encode_complete) {
  486. // Already done.
  487. return 0;
  488. }
  489. pic = target;
  490. for (i = 0; i < pic->nb_refs; i++) {
  491. if (!pic->refs[i]->encode_complete) {
  492. err = vaapi_encode_step(avctx, pic->refs[i]);
  493. if (err < 0)
  494. return err;
  495. }
  496. }
  497. err = vaapi_encode_issue(avctx, pic);
  498. if (err < 0)
  499. return err;
  500. } else if (ctx->issue_mode == ISSUE_MODE_MAXIMISE_THROUGHPUT) {
  501. int activity;
  502. do {
  503. activity = 0;
  504. for (pic = ctx->pic_start; pic; pic = pic->next) {
  505. if (!pic->input_available || pic->encode_issued)
  506. continue;
  507. for (i = 0; i < pic->nb_refs; i++) {
  508. if (!pic->refs[i]->encode_issued)
  509. break;
  510. }
  511. if (i < pic->nb_refs)
  512. continue;
  513. err = vaapi_encode_issue(avctx, pic);
  514. if (err < 0)
  515. return err;
  516. activity = 1;
  517. }
  518. } while(activity);
  519. if (target) {
  520. av_assert0(target->encode_issued && "broken dependencies?");
  521. }
  522. } else {
  523. av_assert0(0);
  524. }
  525. return 0;
  526. }
  527. static int vaapi_encode_get_next(AVCodecContext *avctx,
  528. VAAPIEncodePicture **pic_out)
  529. {
  530. VAAPIEncodeContext *ctx = avctx->priv_data;
  531. VAAPIEncodePicture *start, *end, *pic;
  532. int i;
  533. for (pic = ctx->pic_start; pic; pic = pic->next) {
  534. if (pic->next)
  535. av_assert0(pic->display_order + 1 == pic->next->display_order);
  536. if (pic->display_order == ctx->input_order) {
  537. *pic_out = pic;
  538. return 0;
  539. }
  540. }
  541. if (ctx->input_order == 0) {
  542. // First frame is always an IDR frame.
  543. av_assert0(!ctx->pic_start && !ctx->pic_end);
  544. pic = vaapi_encode_alloc();
  545. if (!pic)
  546. return AVERROR(ENOMEM);
  547. pic->type = PICTURE_TYPE_IDR;
  548. pic->display_order = 0;
  549. pic->encode_order = 0;
  550. ctx->pic_start = ctx->pic_end = pic;
  551. *pic_out = pic;
  552. return 0;
  553. }
  554. pic = vaapi_encode_alloc();
  555. if (!pic)
  556. return AVERROR(ENOMEM);
  557. if (ctx->p_per_i == 0 || ctx->p_counter == ctx->p_per_i) {
  558. if (ctx->i_per_idr == 0 || ctx->i_counter == ctx->i_per_idr) {
  559. pic->type = PICTURE_TYPE_IDR;
  560. ctx->i_counter = 0;
  561. } else {
  562. pic->type = PICTURE_TYPE_I;
  563. ++ctx->i_counter;
  564. }
  565. ctx->p_counter = 0;
  566. } else {
  567. pic->type = PICTURE_TYPE_P;
  568. pic->refs[0] = ctx->pic_end;
  569. pic->nb_refs = 1;
  570. ++ctx->p_counter;
  571. }
  572. start = end = pic;
  573. if (pic->type != PICTURE_TYPE_IDR) {
  574. // If that was not an IDR frame, add B-frames display-before and
  575. // encode-after it.
  576. for (i = 0; i < ctx->b_per_p; i++) {
  577. pic = vaapi_encode_alloc();
  578. if (!pic)
  579. goto fail;
  580. pic->type = PICTURE_TYPE_B;
  581. pic->refs[0] = ctx->pic_end;
  582. pic->refs[1] = end;
  583. pic->nb_refs = 2;
  584. pic->next = start;
  585. pic->display_order = ctx->input_order + ctx->b_per_p - i - 1;
  586. pic->encode_order = pic->display_order + 1;
  587. start = pic;
  588. }
  589. }
  590. for (i = 0, pic = start; pic; i++, pic = pic->next) {
  591. pic->display_order = ctx->input_order + i;
  592. if (end->type == PICTURE_TYPE_IDR)
  593. pic->encode_order = ctx->input_order + i;
  594. else if (pic == end)
  595. pic->encode_order = ctx->input_order;
  596. else
  597. pic->encode_order = ctx->input_order + i + 1;
  598. }
  599. av_assert0(ctx->pic_end);
  600. ctx->pic_end->next = start;
  601. ctx->pic_end = end;
  602. *pic_out = start;
  603. av_log(avctx, AV_LOG_DEBUG, "Pictures:");
  604. for (pic = ctx->pic_start; pic; pic = pic->next) {
  605. av_log(avctx, AV_LOG_DEBUG, " %s (%"PRId64"/%"PRId64")",
  606. picture_type_name[pic->type],
  607. pic->display_order, pic->encode_order);
  608. }
  609. av_log(avctx, AV_LOG_DEBUG, "\n");
  610. return 0;
  611. fail:
  612. while (start) {
  613. pic = start->next;
  614. vaapi_encode_free(avctx, start);
  615. start = pic;
  616. }
  617. return AVERROR(ENOMEM);
  618. }
  619. static int vaapi_encode_mangle_end(AVCodecContext *avctx)
  620. {
  621. VAAPIEncodeContext *ctx = avctx->priv_data;
  622. VAAPIEncodePicture *pic, *last_pic, *next;
  623. // Find the last picture we actually have input for.
  624. for (pic = ctx->pic_start; pic; pic = pic->next) {
  625. if (!pic->input_available)
  626. break;
  627. last_pic = pic;
  628. }
  629. if (pic) {
  630. av_assert0(last_pic);
  631. if (last_pic->type == PICTURE_TYPE_B) {
  632. // Some fixing up is required. Change the type of this
  633. // picture to P, then modify preceding B references which
  634. // point beyond it to point at it instead.
  635. last_pic->type = PICTURE_TYPE_P;
  636. last_pic->encode_order = last_pic->refs[1]->encode_order;
  637. for (pic = ctx->pic_start; pic != last_pic; pic = pic->next) {
  638. if (pic->type == PICTURE_TYPE_B &&
  639. pic->refs[1] == last_pic->refs[1])
  640. pic->refs[1] = last_pic;
  641. }
  642. last_pic->nb_refs = 1;
  643. last_pic->refs[1] = NULL;
  644. } else {
  645. // We can use the current structure (no references point
  646. // beyond the end), but there are unused pics to discard.
  647. }
  648. // Discard all following pics, they will never be used.
  649. for (pic = last_pic->next; pic; pic = next) {
  650. next = pic->next;
  651. vaapi_encode_free(avctx, pic);
  652. }
  653. last_pic->next = NULL;
  654. ctx->pic_end = last_pic;
  655. } else {
  656. // Input is available for all pictures, so we don't need to
  657. // mangle anything.
  658. }
  659. av_log(avctx, AV_LOG_DEBUG, "Pictures at end of stream:");
  660. for (pic = ctx->pic_start; pic; pic = pic->next) {
  661. av_log(avctx, AV_LOG_DEBUG, " %s (%"PRId64"/%"PRId64")",
  662. picture_type_name[pic->type],
  663. pic->display_order, pic->encode_order);
  664. }
  665. av_log(avctx, AV_LOG_DEBUG, "\n");
  666. return 0;
  667. }
  668. static int vaapi_encode_clear_old(AVCodecContext *avctx)
  669. {
  670. VAAPIEncodeContext *ctx = avctx->priv_data;
  671. VAAPIEncodePicture *pic, *old;
  672. int i;
  673. while (ctx->pic_start != ctx->pic_end) {
  674. old = ctx->pic_start;
  675. if (old->encode_order > ctx->output_order)
  676. break;
  677. for (pic = old->next; pic; pic = pic->next) {
  678. if (pic->encode_complete)
  679. continue;
  680. for (i = 0; i < pic->nb_refs; i++) {
  681. if (pic->refs[i] == old) {
  682. // We still need this picture because it's referred to
  683. // directly by a later one, so it and all following
  684. // pictures have to stay.
  685. return 0;
  686. }
  687. }
  688. }
  689. pic = ctx->pic_start;
  690. ctx->pic_start = pic->next;
  691. vaapi_encode_free(avctx, pic);
  692. }
  693. return 0;
  694. }
  695. int ff_vaapi_encode2(AVCodecContext *avctx, AVPacket *pkt,
  696. const AVFrame *input_image, int *got_packet)
  697. {
  698. VAAPIEncodeContext *ctx = avctx->priv_data;
  699. VAAPIEncodePicture *pic;
  700. int err;
  701. if (input_image) {
  702. av_log(avctx, AV_LOG_DEBUG, "Encode frame: %ux%u (%"PRId64").\n",
  703. input_image->width, input_image->height, input_image->pts);
  704. err = vaapi_encode_get_next(avctx, &pic);
  705. if (err) {
  706. av_log(avctx, AV_LOG_ERROR, "Input setup failed: %d.\n", err);
  707. return err;
  708. }
  709. pic->input_image = av_frame_alloc();
  710. if (!pic->input_image) {
  711. err = AVERROR(ENOMEM);
  712. goto fail;
  713. }
  714. err = av_frame_ref(pic->input_image, input_image);
  715. if (err < 0)
  716. goto fail;
  717. pic->input_surface = (VASurfaceID)(uintptr_t)input_image->data[3];
  718. pic->pts = input_image->pts;
  719. if (ctx->input_order == 0)
  720. ctx->first_pts = pic->pts;
  721. if (ctx->input_order == ctx->decode_delay)
  722. ctx->dts_pts_diff = pic->pts - ctx->first_pts;
  723. if (ctx->output_delay > 0)
  724. ctx->ts_ring[ctx->input_order % (3 * ctx->output_delay)] = pic->pts;
  725. pic->input_available = 1;
  726. } else {
  727. if (!ctx->end_of_stream) {
  728. err = vaapi_encode_mangle_end(avctx);
  729. if (err < 0)
  730. goto fail;
  731. ctx->end_of_stream = 1;
  732. }
  733. }
  734. ++ctx->input_order;
  735. ++ctx->output_order;
  736. av_assert0(ctx->output_order + ctx->output_delay + 1 == ctx->input_order);
  737. for (pic = ctx->pic_start; pic; pic = pic->next)
  738. if (pic->encode_order == ctx->output_order)
  739. break;
  740. // pic can be null here if we don't have a specific target in this
  741. // iteration. We might still issue encodes if things can be overlapped,
  742. // even though we don't intend to output anything.
  743. err = vaapi_encode_step(avctx, pic);
  744. if (err < 0) {
  745. av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
  746. goto fail;
  747. }
  748. if (!pic) {
  749. *got_packet = 0;
  750. } else {
  751. err = vaapi_encode_output(avctx, pic, pkt);
  752. if (err < 0) {
  753. av_log(avctx, AV_LOG_ERROR, "Output failed: %d.\n", err);
  754. goto fail;
  755. }
  756. if (ctx->output_delay == 0) {
  757. pkt->dts = pkt->pts;
  758. } else if (ctx->output_order < ctx->decode_delay) {
  759. if (ctx->ts_ring[ctx->output_order] < INT64_MIN + ctx->dts_pts_diff)
  760. pkt->dts = INT64_MIN;
  761. else
  762. pkt->dts = ctx->ts_ring[ctx->output_order] - ctx->dts_pts_diff;
  763. } else {
  764. pkt->dts = ctx->ts_ring[(ctx->output_order - ctx->decode_delay) %
  765. (3 * ctx->output_delay)];
  766. }
  767. *got_packet = 1;
  768. }
  769. err = vaapi_encode_clear_old(avctx);
  770. if (err < 0) {
  771. av_log(avctx, AV_LOG_ERROR, "List clearing failed: %d.\n", err);
  772. goto fail;
  773. }
  774. return 0;
  775. fail:
  776. // Unclear what to clean up on failure. There are probably some things we
  777. // could do usefully clean up here, but for now just leave them for uninit()
  778. // to do instead.
  779. return err;
  780. }
  781. static av_cold int vaapi_encode_check_config(AVCodecContext *avctx)
  782. {
  783. VAAPIEncodeContext *ctx = avctx->priv_data;
  784. VAStatus vas;
  785. int i, n, err;
  786. VAProfile *profiles = NULL;
  787. VAEntrypoint *entrypoints = NULL;
  788. VAConfigAttrib attr[] = {
  789. { VAConfigAttribRateControl },
  790. { VAConfigAttribEncMaxRefFrames },
  791. };
  792. n = vaMaxNumProfiles(ctx->hwctx->display);
  793. profiles = av_malloc_array(n, sizeof(VAProfile));
  794. if (!profiles) {
  795. err = AVERROR(ENOMEM);
  796. goto fail;
  797. }
  798. vas = vaQueryConfigProfiles(ctx->hwctx->display, profiles, &n);
  799. if (vas != VA_STATUS_SUCCESS) {
  800. av_log(ctx, AV_LOG_ERROR, "Failed to query profiles: %d (%s).\n",
  801. vas, vaErrorStr(vas));
  802. err = AVERROR(ENOSYS);
  803. goto fail;
  804. }
  805. for (i = 0; i < n; i++) {
  806. if (profiles[i] == ctx->va_profile)
  807. break;
  808. }
  809. if (i >= n) {
  810. av_log(ctx, AV_LOG_ERROR, "Encoding profile not found (%d).\n",
  811. ctx->va_profile);
  812. err = AVERROR(ENOSYS);
  813. goto fail;
  814. }
  815. n = vaMaxNumEntrypoints(ctx->hwctx->display);
  816. entrypoints = av_malloc_array(n, sizeof(VAEntrypoint));
  817. if (!entrypoints) {
  818. err = AVERROR(ENOMEM);
  819. goto fail;
  820. }
  821. vas = vaQueryConfigEntrypoints(ctx->hwctx->display, ctx->va_profile,
  822. entrypoints, &n);
  823. if (vas != VA_STATUS_SUCCESS) {
  824. av_log(ctx, AV_LOG_ERROR, "Failed to query entrypoints for "
  825. "profile %u: %d (%s).\n", ctx->va_profile,
  826. vas, vaErrorStr(vas));
  827. err = AVERROR(ENOSYS);
  828. goto fail;
  829. }
  830. for (i = 0; i < n; i++) {
  831. if (entrypoints[i] == ctx->va_entrypoint)
  832. break;
  833. }
  834. if (i >= n) {
  835. av_log(ctx, AV_LOG_ERROR, "Encoding entrypoint not found "
  836. "(%d / %d).\n", ctx->va_profile, ctx->va_entrypoint);
  837. err = AVERROR(ENOSYS);
  838. goto fail;
  839. }
  840. vas = vaGetConfigAttributes(ctx->hwctx->display,
  841. ctx->va_profile, ctx->va_entrypoint,
  842. attr, FF_ARRAY_ELEMS(attr));
  843. if (vas != VA_STATUS_SUCCESS) {
  844. av_log(avctx, AV_LOG_ERROR, "Failed to fetch config "
  845. "attributes: %d (%s).\n", vas, vaErrorStr(vas));
  846. return AVERROR(EINVAL);
  847. }
  848. for (i = 0; i < FF_ARRAY_ELEMS(attr); i++) {
  849. if (attr[i].value == VA_ATTRIB_NOT_SUPPORTED) {
  850. // Unfortunately we have to treat this as "don't know" and hope
  851. // for the best, because the Intel MJPEG encoder returns this
  852. // for all the interesting attributes.
  853. continue;
  854. }
  855. switch (attr[i].type) {
  856. case VAConfigAttribRateControl:
  857. if (!(ctx->va_rc_mode & attr[i].value)) {
  858. av_log(avctx, AV_LOG_ERROR, "Rate control mode is not "
  859. "supported: %x\n", attr[i].value);
  860. err = AVERROR(EINVAL);
  861. goto fail;
  862. }
  863. break;
  864. case VAConfigAttribEncMaxRefFrames:
  865. {
  866. unsigned int ref_l0 = attr[i].value & 0xffff;
  867. unsigned int ref_l1 = (attr[i].value >> 16) & 0xffff;
  868. if (avctx->gop_size > 1 && ref_l0 < 1) {
  869. av_log(avctx, AV_LOG_ERROR, "P frames are not "
  870. "supported (%x).\n", attr[i].value);
  871. err = AVERROR(EINVAL);
  872. goto fail;
  873. }
  874. if (avctx->max_b_frames > 0 && ref_l1 < 1) {
  875. av_log(avctx, AV_LOG_ERROR, "B frames are not "
  876. "supported (%x).\n", attr[i].value);
  877. err = AVERROR(EINVAL);
  878. goto fail;
  879. }
  880. }
  881. break;
  882. }
  883. }
  884. err = 0;
  885. fail:
  886. av_freep(&profiles);
  887. av_freep(&entrypoints);
  888. return err;
  889. }
  890. static void vaapi_encode_free_output_buffer(void *opaque,
  891. uint8_t *data)
  892. {
  893. AVCodecContext *avctx = opaque;
  894. VAAPIEncodeContext *ctx = avctx->priv_data;
  895. VABufferID buffer_id;
  896. buffer_id = (VABufferID)(uintptr_t)data;
  897. vaDestroyBuffer(ctx->hwctx->display, buffer_id);
  898. av_log(avctx, AV_LOG_DEBUG, "Freed output buffer %#x\n", buffer_id);
  899. }
  900. static AVBufferRef *vaapi_encode_alloc_output_buffer(void *opaque,
  901. int size)
  902. {
  903. AVCodecContext *avctx = opaque;
  904. VAAPIEncodeContext *ctx = avctx->priv_data;
  905. VABufferID buffer_id;
  906. VAStatus vas;
  907. AVBufferRef *ref;
  908. // The output buffer size is fixed, so it needs to be large enough
  909. // to hold the largest possible compressed frame. We assume here
  910. // that the uncompressed frame plus some header data is an upper
  911. // bound on that.
  912. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  913. VAEncCodedBufferType,
  914. 3 * ctx->aligned_width * ctx->aligned_height +
  915. (1 << 16), 1, 0, &buffer_id);
  916. if (vas != VA_STATUS_SUCCESS) {
  917. av_log(avctx, AV_LOG_ERROR, "Failed to create bitstream "
  918. "output buffer: %d (%s).\n", vas, vaErrorStr(vas));
  919. return NULL;
  920. }
  921. av_log(avctx, AV_LOG_DEBUG, "Allocated output buffer %#x\n", buffer_id);
  922. ref = av_buffer_create((uint8_t*)(uintptr_t)buffer_id,
  923. sizeof(buffer_id),
  924. &vaapi_encode_free_output_buffer,
  925. avctx, AV_BUFFER_FLAG_READONLY);
  926. if (!ref) {
  927. vaDestroyBuffer(ctx->hwctx->display, buffer_id);
  928. return NULL;
  929. }
  930. return ref;
  931. }
  932. av_cold int ff_vaapi_encode_init(AVCodecContext *avctx,
  933. const VAAPIEncodeType *type)
  934. {
  935. VAAPIEncodeContext *ctx = avctx->priv_data;
  936. AVVAAPIFramesContext *recon_hwctx = NULL;
  937. AVVAAPIHWConfig *hwconfig = NULL;
  938. AVHWFramesConstraints *constraints = NULL;
  939. enum AVPixelFormat recon_format;
  940. VAStatus vas;
  941. int err, i;
  942. if (!avctx->hw_frames_ctx) {
  943. av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
  944. "required to associate the encoding device.\n");
  945. return AVERROR(EINVAL);
  946. }
  947. ctx->codec = type;
  948. ctx->codec_options = ctx->codec_options_data;
  949. ctx->va_config = VA_INVALID_ID;
  950. ctx->va_context = VA_INVALID_ID;
  951. ctx->priv_data = av_mallocz(type->priv_data_size);
  952. if (!ctx->priv_data) {
  953. err = AVERROR(ENOMEM);
  954. goto fail;
  955. }
  956. ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
  957. if (!ctx->input_frames_ref) {
  958. err = AVERROR(ENOMEM);
  959. goto fail;
  960. }
  961. ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data;
  962. ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
  963. if (!ctx->device_ref) {
  964. err = AVERROR(ENOMEM);
  965. goto fail;
  966. }
  967. ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
  968. ctx->hwctx = ctx->device->hwctx;
  969. err = ctx->codec->init(avctx);
  970. if (err < 0)
  971. goto fail;
  972. err = vaapi_encode_check_config(avctx);
  973. if (err < 0)
  974. goto fail;
  975. vas = vaCreateConfig(ctx->hwctx->display,
  976. ctx->va_profile, ctx->va_entrypoint,
  977. ctx->config_attributes, ctx->nb_config_attributes,
  978. &ctx->va_config);
  979. if (vas != VA_STATUS_SUCCESS) {
  980. av_log(avctx, AV_LOG_ERROR, "Failed to create encode pipeline "
  981. "configuration: %d (%s).\n", vas, vaErrorStr(vas));
  982. err = AVERROR(EIO);
  983. goto fail;
  984. }
  985. hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
  986. if (!hwconfig) {
  987. err = AVERROR(ENOMEM);
  988. goto fail;
  989. }
  990. hwconfig->config_id = ctx->va_config;
  991. constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
  992. hwconfig);
  993. if (!constraints) {
  994. err = AVERROR(ENOMEM);
  995. goto fail;
  996. }
  997. // Probably we can use the input surface format as the surface format
  998. // of the reconstructed frames. If not, we just pick the first (only?)
  999. // format in the valid list and hope that it all works.
  1000. recon_format = AV_PIX_FMT_NONE;
  1001. if (constraints->valid_sw_formats) {
  1002. for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
  1003. if (ctx->input_frames->sw_format ==
  1004. constraints->valid_sw_formats[i]) {
  1005. recon_format = ctx->input_frames->sw_format;
  1006. break;
  1007. }
  1008. }
  1009. if (recon_format == AV_PIX_FMT_NONE) {
  1010. // No match. Just use the first in the supported list and
  1011. // hope for the best.
  1012. recon_format = constraints->valid_sw_formats[0];
  1013. }
  1014. } else {
  1015. // No idea what to use; copy input format.
  1016. recon_format = ctx->input_frames->sw_format;
  1017. }
  1018. av_log(avctx, AV_LOG_DEBUG, "Using %s as format of "
  1019. "reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
  1020. if (ctx->aligned_width < constraints->min_width ||
  1021. ctx->aligned_height < constraints->min_height ||
  1022. ctx->aligned_width > constraints->max_width ||
  1023. ctx->aligned_height > constraints->max_height) {
  1024. av_log(avctx, AV_LOG_ERROR, "Hardware does not support encoding at "
  1025. "size %dx%d (constraints: width %d-%d height %d-%d).\n",
  1026. ctx->aligned_width, ctx->aligned_height,
  1027. constraints->min_width, constraints->max_width,
  1028. constraints->min_height, constraints->max_height);
  1029. err = AVERROR(EINVAL);
  1030. goto fail;
  1031. }
  1032. av_freep(&hwconfig);
  1033. av_hwframe_constraints_free(&constraints);
  1034. ctx->recon_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
  1035. if (!ctx->recon_frames_ref) {
  1036. err = AVERROR(ENOMEM);
  1037. goto fail;
  1038. }
  1039. ctx->recon_frames = (AVHWFramesContext*)ctx->recon_frames_ref->data;
  1040. ctx->recon_frames->format = AV_PIX_FMT_VAAPI;
  1041. ctx->recon_frames->sw_format = recon_format;
  1042. ctx->recon_frames->width = ctx->aligned_width;
  1043. ctx->recon_frames->height = ctx->aligned_height;
  1044. ctx->recon_frames->initial_pool_size = ctx->nb_recon_frames;
  1045. err = av_hwframe_ctx_init(ctx->recon_frames_ref);
  1046. if (err < 0) {
  1047. av_log(avctx, AV_LOG_ERROR, "Failed to initialise reconstructed "
  1048. "frame context: %d.\n", err);
  1049. goto fail;
  1050. }
  1051. recon_hwctx = ctx->recon_frames->hwctx;
  1052. vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
  1053. ctx->aligned_width, ctx->aligned_height,
  1054. VA_PROGRESSIVE,
  1055. recon_hwctx->surface_ids,
  1056. recon_hwctx->nb_surfaces,
  1057. &ctx->va_context);
  1058. if (vas != VA_STATUS_SUCCESS) {
  1059. av_log(avctx, AV_LOG_ERROR, "Failed to create encode pipeline "
  1060. "context: %d (%s).\n", vas, vaErrorStr(vas));
  1061. err = AVERROR(EIO);
  1062. goto fail;
  1063. }
  1064. ctx->input_order = 0;
  1065. ctx->output_delay = avctx->max_b_frames;
  1066. ctx->decode_delay = 1;
  1067. ctx->output_order = - ctx->output_delay - 1;
  1068. if (ctx->codec->sequence_params_size > 0) {
  1069. ctx->codec_sequence_params =
  1070. av_mallocz(ctx->codec->sequence_params_size);
  1071. if (!ctx->codec_sequence_params) {
  1072. err = AVERROR(ENOMEM);
  1073. goto fail;
  1074. }
  1075. }
  1076. if (ctx->codec->picture_params_size > 0) {
  1077. ctx->codec_picture_params =
  1078. av_mallocz(ctx->codec->picture_params_size);
  1079. if (!ctx->codec_picture_params) {
  1080. err = AVERROR(ENOMEM);
  1081. goto fail;
  1082. }
  1083. }
  1084. if (ctx->codec->init_sequence_params) {
  1085. err = ctx->codec->init_sequence_params(avctx);
  1086. if (err < 0) {
  1087. av_log(avctx, AV_LOG_ERROR, "Codec sequence initialisation "
  1088. "failed: %d.\n", err);
  1089. goto fail;
  1090. }
  1091. }
  1092. ctx->output_buffer_pool =
  1093. av_buffer_pool_init2(sizeof(VABufferID), avctx,
  1094. &vaapi_encode_alloc_output_buffer, NULL);
  1095. if (!ctx->output_buffer_pool) {
  1096. err = AVERROR(ENOMEM);
  1097. goto fail;
  1098. }
  1099. // All I are IDR for now.
  1100. ctx->i_per_idr = 0;
  1101. ctx->p_per_i = ((avctx->gop_size + avctx->max_b_frames) /
  1102. (avctx->max_b_frames + 1));
  1103. ctx->b_per_p = avctx->max_b_frames;
  1104. // This should be configurable somehow. (Needs testing on a machine
  1105. // where it actually overlaps properly, though.)
  1106. ctx->issue_mode = ISSUE_MODE_MAXIMISE_THROUGHPUT;
  1107. return 0;
  1108. fail:
  1109. av_freep(&hwconfig);
  1110. av_hwframe_constraints_free(&constraints);
  1111. ff_vaapi_encode_close(avctx);
  1112. return err;
  1113. }
  1114. av_cold int ff_vaapi_encode_close(AVCodecContext *avctx)
  1115. {
  1116. VAAPIEncodeContext *ctx = avctx->priv_data;
  1117. VAAPIEncodePicture *pic, *next;
  1118. for (pic = ctx->pic_start; pic; pic = next) {
  1119. next = pic->next;
  1120. vaapi_encode_free(avctx, pic);
  1121. }
  1122. if (ctx->va_context != VA_INVALID_ID) {
  1123. vaDestroyContext(ctx->hwctx->display, ctx->va_context);
  1124. ctx->va_context = VA_INVALID_ID;
  1125. }
  1126. if (ctx->va_config != VA_INVALID_ID) {
  1127. vaDestroyConfig(ctx->hwctx->display, ctx->va_config);
  1128. ctx->va_config = VA_INVALID_ID;
  1129. }
  1130. if (ctx->codec->close)
  1131. ctx->codec->close(avctx);
  1132. av_buffer_pool_uninit(&ctx->output_buffer_pool);
  1133. av_freep(&ctx->codec_sequence_params);
  1134. av_freep(&ctx->codec_picture_params);
  1135. av_buffer_unref(&ctx->recon_frames_ref);
  1136. av_buffer_unref(&ctx->input_frames_ref);
  1137. av_buffer_unref(&ctx->device_ref);
  1138. av_freep(&ctx->priv_data);
  1139. return 0;
  1140. }