You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1476 lines
48KB

  1. /*
  2. * This file is part of Libav.
  3. *
  4. * Libav is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * Libav is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with Libav; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <inttypes.h>
  19. #include <string.h>
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/common.h"
  22. #include "libavutil/log.h"
  23. #include "libavutil/pixdesc.h"
  24. #include "vaapi_encode.h"
  25. #include "avcodec.h"
  26. static const char *picture_type_name[] = { "IDR", "I", "P", "B" };
  27. static int vaapi_encode_make_packed_header(AVCodecContext *avctx,
  28. VAAPIEncodePicture *pic,
  29. int type, char *data, size_t bit_len)
  30. {
  31. VAAPIEncodeContext *ctx = avctx->priv_data;
  32. VAStatus vas;
  33. VABufferID param_buffer, data_buffer;
  34. VAEncPackedHeaderParameterBuffer params = {
  35. .type = type,
  36. .bit_length = bit_len,
  37. .has_emulation_bytes = 1,
  38. };
  39. av_assert0(pic->nb_param_buffers + 2 <= MAX_PARAM_BUFFERS);
  40. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  41. VAEncPackedHeaderParameterBufferType,
  42. sizeof(params), 1, &params, &param_buffer);
  43. if (vas != VA_STATUS_SUCCESS) {
  44. av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer "
  45. "for packed header (type %d): %d (%s).\n",
  46. type, vas, vaErrorStr(vas));
  47. return AVERROR(EIO);
  48. }
  49. pic->param_buffers[pic->nb_param_buffers++] = param_buffer;
  50. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  51. VAEncPackedHeaderDataBufferType,
  52. (bit_len + 7) / 8, 1, data, &data_buffer);
  53. if (vas != VA_STATUS_SUCCESS) {
  54. av_log(avctx, AV_LOG_ERROR, "Failed to create data buffer "
  55. "for packed header (type %d): %d (%s).\n",
  56. type, vas, vaErrorStr(vas));
  57. return AVERROR(EIO);
  58. }
  59. pic->param_buffers[pic->nb_param_buffers++] = data_buffer;
  60. av_log(avctx, AV_LOG_DEBUG, "Packed header buffer (%d) is %#x/%#x "
  61. "(%zu bits).\n", type, param_buffer, data_buffer, bit_len);
  62. return 0;
  63. }
  64. static int vaapi_encode_make_param_buffer(AVCodecContext *avctx,
  65. VAAPIEncodePicture *pic,
  66. int type, char *data, size_t len)
  67. {
  68. VAAPIEncodeContext *ctx = avctx->priv_data;
  69. VAStatus vas;
  70. VABufferID buffer;
  71. av_assert0(pic->nb_param_buffers + 1 <= MAX_PARAM_BUFFERS);
  72. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  73. type, len, 1, data, &buffer);
  74. if (vas != VA_STATUS_SUCCESS) {
  75. av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer "
  76. "(type %d): %d (%s).\n", type, vas, vaErrorStr(vas));
  77. return AVERROR(EIO);
  78. }
  79. pic->param_buffers[pic->nb_param_buffers++] = buffer;
  80. av_log(avctx, AV_LOG_DEBUG, "Param buffer (%d) is %#x.\n",
  81. type, buffer);
  82. return 0;
  83. }
  84. static int vaapi_encode_wait(AVCodecContext *avctx,
  85. VAAPIEncodePicture *pic)
  86. {
  87. VAAPIEncodeContext *ctx = avctx->priv_data;
  88. VAStatus vas;
  89. av_assert0(pic->encode_issued);
  90. if (pic->encode_complete) {
  91. // Already waited for this picture.
  92. return 0;
  93. }
  94. av_log(avctx, AV_LOG_DEBUG, "Sync to pic %"PRId64"/%"PRId64" "
  95. "(input surface %#x).\n", pic->display_order,
  96. pic->encode_order, pic->input_surface);
  97. vas = vaSyncSurface(ctx->hwctx->display, pic->input_surface);
  98. if (vas != VA_STATUS_SUCCESS) {
  99. av_log(avctx, AV_LOG_ERROR, "Failed to sync to picture completion: "
  100. "%d (%s).\n", vas, vaErrorStr(vas));
  101. return AVERROR(EIO);
  102. }
  103. // Input is definitely finished with now.
  104. av_frame_free(&pic->input_image);
  105. pic->encode_complete = 1;
  106. return 0;
  107. }
  108. static int vaapi_encode_issue(AVCodecContext *avctx,
  109. VAAPIEncodePicture *pic)
  110. {
  111. VAAPIEncodeContext *ctx = avctx->priv_data;
  112. VAAPIEncodeSlice *slice;
  113. VAStatus vas;
  114. int err, i;
  115. char data[MAX_PARAM_BUFFER_SIZE];
  116. size_t bit_len;
  117. av_log(avctx, AV_LOG_DEBUG, "Issuing encode for pic %"PRId64"/%"PRId64" "
  118. "as type %s.\n", pic->display_order, pic->encode_order,
  119. picture_type_name[pic->type]);
  120. if (pic->nb_refs == 0) {
  121. av_log(avctx, AV_LOG_DEBUG, "No reference pictures.\n");
  122. } else {
  123. av_log(avctx, AV_LOG_DEBUG, "Refers to:");
  124. for (i = 0; i < pic->nb_refs; i++) {
  125. av_log(avctx, AV_LOG_DEBUG, " %"PRId64"/%"PRId64,
  126. pic->refs[i]->display_order, pic->refs[i]->encode_order);
  127. }
  128. av_log(avctx, AV_LOG_DEBUG, ".\n");
  129. }
  130. av_assert0(pic->input_available && !pic->encode_issued);
  131. for (i = 0; i < pic->nb_refs; i++) {
  132. av_assert0(pic->refs[i]);
  133. // If we are serialised then the references must have already
  134. // completed. If not, they must have been issued but need not
  135. // have completed yet.
  136. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING)
  137. av_assert0(pic->refs[i]->encode_complete);
  138. else
  139. av_assert0(pic->refs[i]->encode_issued);
  140. }
  141. av_log(avctx, AV_LOG_DEBUG, "Input surface is %#x.\n", pic->input_surface);
  142. pic->recon_image = av_frame_alloc();
  143. if (!pic->recon_image) {
  144. err = AVERROR(ENOMEM);
  145. goto fail;
  146. }
  147. err = av_hwframe_get_buffer(ctx->recon_frames_ref, pic->recon_image, 0);
  148. if (err < 0) {
  149. err = AVERROR(ENOMEM);
  150. goto fail;
  151. }
  152. pic->recon_surface = (VASurfaceID)(uintptr_t)pic->recon_image->data[3];
  153. av_log(avctx, AV_LOG_DEBUG, "Recon surface is %#x.\n", pic->recon_surface);
  154. pic->output_buffer_ref = av_buffer_pool_get(ctx->output_buffer_pool);
  155. if (!pic->output_buffer_ref) {
  156. err = AVERROR(ENOMEM);
  157. goto fail;
  158. }
  159. pic->output_buffer = (VABufferID)(uintptr_t)pic->output_buffer_ref->data;
  160. av_log(avctx, AV_LOG_DEBUG, "Output buffer is %#x.\n",
  161. pic->output_buffer);
  162. if (ctx->codec->picture_params_size > 0) {
  163. pic->codec_picture_params = av_malloc(ctx->codec->picture_params_size);
  164. if (!pic->codec_picture_params)
  165. goto fail;
  166. memcpy(pic->codec_picture_params, ctx->codec_picture_params,
  167. ctx->codec->picture_params_size);
  168. } else {
  169. av_assert0(!ctx->codec_picture_params);
  170. }
  171. pic->nb_param_buffers = 0;
  172. if (pic->encode_order == 0) {
  173. // Global parameter buffers are set on the first picture only.
  174. for (i = 0; i < ctx->nb_global_params; i++) {
  175. err = vaapi_encode_make_param_buffer(avctx, pic,
  176. VAEncMiscParameterBufferType,
  177. (char*)ctx->global_params[i],
  178. ctx->global_params_size[i]);
  179. if (err < 0)
  180. goto fail;
  181. }
  182. }
  183. if (pic->type == PICTURE_TYPE_IDR && ctx->codec->init_sequence_params) {
  184. err = vaapi_encode_make_param_buffer(avctx, pic,
  185. VAEncSequenceParameterBufferType,
  186. ctx->codec_sequence_params,
  187. ctx->codec->sequence_params_size);
  188. if (err < 0)
  189. goto fail;
  190. }
  191. if (ctx->codec->init_picture_params) {
  192. err = ctx->codec->init_picture_params(avctx, pic);
  193. if (err < 0) {
  194. av_log(avctx, AV_LOG_ERROR, "Failed to initialise picture "
  195. "parameters: %d.\n", err);
  196. goto fail;
  197. }
  198. err = vaapi_encode_make_param_buffer(avctx, pic,
  199. VAEncPictureParameterBufferType,
  200. pic->codec_picture_params,
  201. ctx->codec->picture_params_size);
  202. if (err < 0)
  203. goto fail;
  204. }
  205. if (pic->type == PICTURE_TYPE_IDR) {
  206. if (ctx->va_packed_headers & VA_ENC_PACKED_HEADER_SEQUENCE &&
  207. ctx->codec->write_sequence_header) {
  208. bit_len = 8 * sizeof(data);
  209. err = ctx->codec->write_sequence_header(avctx, data, &bit_len);
  210. if (err < 0) {
  211. av_log(avctx, AV_LOG_ERROR, "Failed to write per-sequence "
  212. "header: %d.\n", err);
  213. goto fail;
  214. }
  215. err = vaapi_encode_make_packed_header(avctx, pic,
  216. ctx->codec->sequence_header_type,
  217. data, bit_len);
  218. if (err < 0)
  219. goto fail;
  220. }
  221. }
  222. if (ctx->va_packed_headers & VA_ENC_PACKED_HEADER_PICTURE &&
  223. ctx->codec->write_picture_header) {
  224. bit_len = 8 * sizeof(data);
  225. err = ctx->codec->write_picture_header(avctx, pic, data, &bit_len);
  226. if (err < 0) {
  227. av_log(avctx, AV_LOG_ERROR, "Failed to write per-picture "
  228. "header: %d.\n", err);
  229. goto fail;
  230. }
  231. err = vaapi_encode_make_packed_header(avctx, pic,
  232. ctx->codec->picture_header_type,
  233. data, bit_len);
  234. if (err < 0)
  235. goto fail;
  236. }
  237. if (ctx->codec->write_extra_buffer) {
  238. for (i = 0;; i++) {
  239. size_t len = sizeof(data);
  240. int type;
  241. err = ctx->codec->write_extra_buffer(avctx, pic, i, &type,
  242. data, &len);
  243. if (err == AVERROR_EOF)
  244. break;
  245. if (err < 0) {
  246. av_log(avctx, AV_LOG_ERROR, "Failed to write extra "
  247. "buffer %d: %d.\n", i, err);
  248. goto fail;
  249. }
  250. err = vaapi_encode_make_param_buffer(avctx, pic, type,
  251. data, len);
  252. if (err < 0)
  253. goto fail;
  254. }
  255. }
  256. if (ctx->va_packed_headers & VA_ENC_PACKED_HEADER_MISC &&
  257. ctx->codec->write_extra_header) {
  258. for (i = 0;; i++) {
  259. int type;
  260. bit_len = 8 * sizeof(data);
  261. err = ctx->codec->write_extra_header(avctx, pic, i, &type,
  262. data, &bit_len);
  263. if (err == AVERROR_EOF)
  264. break;
  265. if (err < 0) {
  266. av_log(avctx, AV_LOG_ERROR, "Failed to write extra "
  267. "header %d: %d.\n", i, err);
  268. goto fail;
  269. }
  270. err = vaapi_encode_make_packed_header(avctx, pic, type,
  271. data, bit_len);
  272. if (err < 0)
  273. goto fail;
  274. }
  275. }
  276. av_assert0(pic->nb_slices <= MAX_PICTURE_SLICES);
  277. for (i = 0; i < pic->nb_slices; i++) {
  278. slice = av_mallocz(sizeof(*slice));
  279. if (!slice) {
  280. err = AVERROR(ENOMEM);
  281. goto fail;
  282. }
  283. slice->index = i;
  284. pic->slices[i] = slice;
  285. if (ctx->codec->slice_params_size > 0) {
  286. slice->codec_slice_params = av_mallocz(ctx->codec->slice_params_size);
  287. if (!slice->codec_slice_params) {
  288. err = AVERROR(ENOMEM);
  289. goto fail;
  290. }
  291. }
  292. if (ctx->codec->init_slice_params) {
  293. err = ctx->codec->init_slice_params(avctx, pic, slice);
  294. if (err < 0) {
  295. av_log(avctx, AV_LOG_ERROR, "Failed to initalise slice "
  296. "parameters: %d.\n", err);
  297. goto fail;
  298. }
  299. }
  300. if (ctx->va_packed_headers & VA_ENC_PACKED_HEADER_SLICE &&
  301. ctx->codec->write_slice_header) {
  302. bit_len = 8 * sizeof(data);
  303. err = ctx->codec->write_slice_header(avctx, pic, slice,
  304. data, &bit_len);
  305. if (err < 0) {
  306. av_log(avctx, AV_LOG_ERROR, "Failed to write per-slice "
  307. "header: %d.\n", err);
  308. goto fail;
  309. }
  310. err = vaapi_encode_make_packed_header(avctx, pic,
  311. ctx->codec->slice_header_type,
  312. data, bit_len);
  313. if (err < 0)
  314. goto fail;
  315. }
  316. if (ctx->codec->init_slice_params) {
  317. err = vaapi_encode_make_param_buffer(avctx, pic,
  318. VAEncSliceParameterBufferType,
  319. slice->codec_slice_params,
  320. ctx->codec->slice_params_size);
  321. if (err < 0)
  322. goto fail;
  323. }
  324. }
  325. vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context,
  326. pic->input_surface);
  327. if (vas != VA_STATUS_SUCCESS) {
  328. av_log(avctx, AV_LOG_ERROR, "Failed to begin picture encode issue: "
  329. "%d (%s).\n", vas, vaErrorStr(vas));
  330. err = AVERROR(EIO);
  331. goto fail_with_picture;
  332. }
  333. vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
  334. pic->param_buffers, pic->nb_param_buffers);
  335. if (vas != VA_STATUS_SUCCESS) {
  336. av_log(avctx, AV_LOG_ERROR, "Failed to upload encode parameters: "
  337. "%d (%s).\n", vas, vaErrorStr(vas));
  338. err = AVERROR(EIO);
  339. goto fail_with_picture;
  340. }
  341. vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
  342. if (vas != VA_STATUS_SUCCESS) {
  343. av_log(avctx, AV_LOG_ERROR, "Failed to end picture encode issue: "
  344. "%d (%s).\n", vas, vaErrorStr(vas));
  345. err = AVERROR(EIO);
  346. // vaRenderPicture() has been called here, so we should not destroy
  347. // the parameter buffers unless separate destruction is required.
  348. if (ctx->hwctx->driver_quirks &
  349. AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS)
  350. goto fail;
  351. else
  352. goto fail_at_end;
  353. }
  354. if (ctx->hwctx->driver_quirks &
  355. AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS) {
  356. for (i = 0; i < pic->nb_param_buffers; i++) {
  357. vas = vaDestroyBuffer(ctx->hwctx->display,
  358. pic->param_buffers[i]);
  359. if (vas != VA_STATUS_SUCCESS) {
  360. av_log(avctx, AV_LOG_ERROR, "Failed to destroy "
  361. "param buffer %#x: %d (%s).\n",
  362. pic->param_buffers[i], vas, vaErrorStr(vas));
  363. // And ignore.
  364. }
  365. }
  366. }
  367. pic->encode_issued = 1;
  368. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING)
  369. return vaapi_encode_wait(avctx, pic);
  370. else
  371. return 0;
  372. fail_with_picture:
  373. vaEndPicture(ctx->hwctx->display, ctx->va_context);
  374. fail:
  375. for(i = 0; i < pic->nb_param_buffers; i++)
  376. vaDestroyBuffer(ctx->hwctx->display, pic->param_buffers[i]);
  377. fail_at_end:
  378. av_freep(&pic->codec_picture_params);
  379. av_frame_free(&pic->recon_image);
  380. return err;
  381. }
  382. static int vaapi_encode_output(AVCodecContext *avctx,
  383. VAAPIEncodePicture *pic, AVPacket *pkt)
  384. {
  385. VAAPIEncodeContext *ctx = avctx->priv_data;
  386. VACodedBufferSegment *buf_list, *buf;
  387. VAStatus vas;
  388. int err;
  389. err = vaapi_encode_wait(avctx, pic);
  390. if (err < 0)
  391. return err;
  392. buf_list = NULL;
  393. vas = vaMapBuffer(ctx->hwctx->display, pic->output_buffer,
  394. (void**)&buf_list);
  395. if (vas != VA_STATUS_SUCCESS) {
  396. av_log(avctx, AV_LOG_ERROR, "Failed to map output buffers: "
  397. "%d (%s).\n", vas, vaErrorStr(vas));
  398. err = AVERROR(EIO);
  399. goto fail;
  400. }
  401. for (buf = buf_list; buf; buf = buf->next) {
  402. av_log(avctx, AV_LOG_DEBUG, "Output buffer: %u bytes "
  403. "(status %08x).\n", buf->size, buf->status);
  404. err = av_new_packet(pkt, buf->size);
  405. if (err < 0)
  406. goto fail_mapped;
  407. memcpy(pkt->data, buf->buf, buf->size);
  408. }
  409. if (pic->type == PICTURE_TYPE_IDR)
  410. pkt->flags |= AV_PKT_FLAG_KEY;
  411. pkt->pts = pic->pts;
  412. vas = vaUnmapBuffer(ctx->hwctx->display, pic->output_buffer);
  413. if (vas != VA_STATUS_SUCCESS) {
  414. av_log(avctx, AV_LOG_ERROR, "Failed to unmap output buffers: "
  415. "%d (%s).\n", vas, vaErrorStr(vas));
  416. err = AVERROR(EIO);
  417. goto fail;
  418. }
  419. av_buffer_unref(&pic->output_buffer_ref);
  420. pic->output_buffer = VA_INVALID_ID;
  421. av_log(avctx, AV_LOG_DEBUG, "Output read for pic %"PRId64"/%"PRId64".\n",
  422. pic->display_order, pic->encode_order);
  423. return 0;
  424. fail_mapped:
  425. vaUnmapBuffer(ctx->hwctx->display, pic->output_buffer);
  426. fail:
  427. av_buffer_unref(&pic->output_buffer_ref);
  428. pic->output_buffer = VA_INVALID_ID;
  429. return err;
  430. }
  431. static int vaapi_encode_discard(AVCodecContext *avctx,
  432. VAAPIEncodePicture *pic)
  433. {
  434. vaapi_encode_wait(avctx, pic);
  435. if (pic->output_buffer_ref) {
  436. av_log(avctx, AV_LOG_DEBUG, "Discard output for pic "
  437. "%"PRId64"/%"PRId64".\n",
  438. pic->display_order, pic->encode_order);
  439. av_buffer_unref(&pic->output_buffer_ref);
  440. pic->output_buffer = VA_INVALID_ID;
  441. }
  442. return 0;
  443. }
  444. static VAAPIEncodePicture *vaapi_encode_alloc(void)
  445. {
  446. VAAPIEncodePicture *pic;
  447. pic = av_mallocz(sizeof(*pic));
  448. if (!pic)
  449. return NULL;
  450. pic->input_surface = VA_INVALID_ID;
  451. pic->recon_surface = VA_INVALID_ID;
  452. pic->output_buffer = VA_INVALID_ID;
  453. return pic;
  454. }
  455. static int vaapi_encode_free(AVCodecContext *avctx,
  456. VAAPIEncodePicture *pic)
  457. {
  458. int i;
  459. if (pic->encode_issued)
  460. vaapi_encode_discard(avctx, pic);
  461. for (i = 0; i < pic->nb_slices; i++) {
  462. av_freep(&pic->slices[i]->priv_data);
  463. av_freep(&pic->slices[i]->codec_slice_params);
  464. av_freep(&pic->slices[i]);
  465. }
  466. av_freep(&pic->codec_picture_params);
  467. av_frame_free(&pic->input_image);
  468. av_frame_free(&pic->recon_image);
  469. // Output buffer should already be destroyed.
  470. av_assert0(pic->output_buffer == VA_INVALID_ID);
  471. av_freep(&pic->priv_data);
  472. av_freep(&pic->codec_picture_params);
  473. av_free(pic);
  474. return 0;
  475. }
  476. static int vaapi_encode_step(AVCodecContext *avctx,
  477. VAAPIEncodePicture *target)
  478. {
  479. VAAPIEncodeContext *ctx = avctx->priv_data;
  480. VAAPIEncodePicture *pic;
  481. int i, err;
  482. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING ||
  483. ctx->issue_mode == ISSUE_MODE_MINIMISE_LATENCY) {
  484. // These two modes are equivalent, except that we wait for
  485. // immediate completion on each operation if serialised.
  486. if (!target) {
  487. // No target, nothing to do yet.
  488. return 0;
  489. }
  490. if (target->encode_complete) {
  491. // Already done.
  492. return 0;
  493. }
  494. pic = target;
  495. for (i = 0; i < pic->nb_refs; i++) {
  496. if (!pic->refs[i]->encode_complete) {
  497. err = vaapi_encode_step(avctx, pic->refs[i]);
  498. if (err < 0)
  499. return err;
  500. }
  501. }
  502. err = vaapi_encode_issue(avctx, pic);
  503. if (err < 0)
  504. return err;
  505. } else if (ctx->issue_mode == ISSUE_MODE_MAXIMISE_THROUGHPUT) {
  506. int activity;
  507. // Run through the list of all available pictures repeatedly
  508. // and issue the first one found which has all dependencies
  509. // available (including previously-issued but not necessarily
  510. // completed pictures).
  511. do {
  512. activity = 0;
  513. for (pic = ctx->pic_start; pic; pic = pic->next) {
  514. if (!pic->input_available || pic->encode_issued)
  515. continue;
  516. for (i = 0; i < pic->nb_refs; i++) {
  517. if (!pic->refs[i]->encode_issued)
  518. break;
  519. }
  520. if (i < pic->nb_refs)
  521. continue;
  522. err = vaapi_encode_issue(avctx, pic);
  523. if (err < 0)
  524. return err;
  525. activity = 1;
  526. // Start again from the beginning of the list,
  527. // because issuing this picture may have satisfied
  528. // forward dependencies of earlier ones.
  529. break;
  530. }
  531. } while(activity);
  532. // If we had a defined target for this step then it will
  533. // always have been issued by now.
  534. if (target) {
  535. av_assert0(target->encode_issued && "broken dependencies?");
  536. }
  537. } else {
  538. av_assert0(0);
  539. }
  540. return 0;
  541. }
  542. static int vaapi_encode_get_next(AVCodecContext *avctx,
  543. VAAPIEncodePicture **pic_out)
  544. {
  545. VAAPIEncodeContext *ctx = avctx->priv_data;
  546. VAAPIEncodePicture *start, *end, *pic;
  547. int i;
  548. for (pic = ctx->pic_start; pic; pic = pic->next) {
  549. if (pic->next)
  550. av_assert0(pic->display_order + 1 == pic->next->display_order);
  551. if (pic->display_order == ctx->input_order) {
  552. *pic_out = pic;
  553. return 0;
  554. }
  555. }
  556. pic = vaapi_encode_alloc();
  557. if (!pic)
  558. return AVERROR(ENOMEM);
  559. if (ctx->input_order == 0 || ctx->force_idr ||
  560. ctx->gop_counter >= avctx->gop_size) {
  561. pic->type = PICTURE_TYPE_IDR;
  562. ctx->force_idr = 0;
  563. ctx->gop_counter = 1;
  564. ctx->p_counter = 0;
  565. } else if (ctx->p_counter >= ctx->p_per_i) {
  566. pic->type = PICTURE_TYPE_I;
  567. ++ctx->gop_counter;
  568. ctx->p_counter = 0;
  569. } else {
  570. pic->type = PICTURE_TYPE_P;
  571. pic->refs[0] = ctx->pic_end;
  572. pic->nb_refs = 1;
  573. ++ctx->gop_counter;
  574. ++ctx->p_counter;
  575. }
  576. start = end = pic;
  577. if (pic->type != PICTURE_TYPE_IDR) {
  578. // If that was not an IDR frame, add B-frames display-before and
  579. // encode-after it, but not exceeding the GOP size.
  580. for (i = 0; i < ctx->b_per_p &&
  581. ctx->gop_counter < avctx->gop_size; i++) {
  582. pic = vaapi_encode_alloc();
  583. if (!pic)
  584. goto fail;
  585. pic->type = PICTURE_TYPE_B;
  586. pic->refs[0] = ctx->pic_end;
  587. pic->refs[1] = end;
  588. pic->nb_refs = 2;
  589. pic->next = start;
  590. pic->display_order = ctx->input_order + ctx->b_per_p - i - 1;
  591. pic->encode_order = pic->display_order + 1;
  592. start = pic;
  593. ++ctx->gop_counter;
  594. }
  595. }
  596. if (ctx->input_order == 0) {
  597. pic->display_order = 0;
  598. pic->encode_order = 0;
  599. ctx->pic_start = ctx->pic_end = pic;
  600. } else {
  601. for (i = 0, pic = start; pic; i++, pic = pic->next) {
  602. pic->display_order = ctx->input_order + i;
  603. if (end->type == PICTURE_TYPE_IDR)
  604. pic->encode_order = ctx->input_order + i;
  605. else if (pic == end)
  606. pic->encode_order = ctx->input_order;
  607. else
  608. pic->encode_order = ctx->input_order + i + 1;
  609. }
  610. av_assert0(ctx->pic_end);
  611. ctx->pic_end->next = start;
  612. ctx->pic_end = end;
  613. }
  614. *pic_out = start;
  615. av_log(avctx, AV_LOG_DEBUG, "Pictures:");
  616. for (pic = ctx->pic_start; pic; pic = pic->next) {
  617. av_log(avctx, AV_LOG_DEBUG, " %s (%"PRId64"/%"PRId64")",
  618. picture_type_name[pic->type],
  619. pic->display_order, pic->encode_order);
  620. }
  621. av_log(avctx, AV_LOG_DEBUG, "\n");
  622. return 0;
  623. fail:
  624. while (start) {
  625. pic = start->next;
  626. vaapi_encode_free(avctx, start);
  627. start = pic;
  628. }
  629. return AVERROR(ENOMEM);
  630. }
  631. static int vaapi_encode_truncate_gop(AVCodecContext *avctx)
  632. {
  633. VAAPIEncodeContext *ctx = avctx->priv_data;
  634. VAAPIEncodePicture *pic, *last_pic, *next;
  635. // Find the last picture we actually have input for.
  636. for (pic = ctx->pic_start; pic; pic = pic->next) {
  637. if (!pic->input_available)
  638. break;
  639. last_pic = pic;
  640. }
  641. if (pic) {
  642. av_assert0(last_pic);
  643. if (last_pic->type == PICTURE_TYPE_B) {
  644. // Some fixing up is required. Change the type of this
  645. // picture to P, then modify preceding B references which
  646. // point beyond it to point at it instead.
  647. last_pic->type = PICTURE_TYPE_P;
  648. last_pic->encode_order = last_pic->refs[1]->encode_order;
  649. for (pic = ctx->pic_start; pic != last_pic; pic = pic->next) {
  650. if (pic->type == PICTURE_TYPE_B &&
  651. pic->refs[1] == last_pic->refs[1])
  652. pic->refs[1] = last_pic;
  653. }
  654. last_pic->nb_refs = 1;
  655. last_pic->refs[1] = NULL;
  656. } else {
  657. // We can use the current structure (no references point
  658. // beyond the end), but there are unused pics to discard.
  659. }
  660. // Discard all following pics, they will never be used.
  661. for (pic = last_pic->next; pic; pic = next) {
  662. next = pic->next;
  663. vaapi_encode_free(avctx, pic);
  664. }
  665. last_pic->next = NULL;
  666. ctx->pic_end = last_pic;
  667. } else {
  668. // Input is available for all pictures, so we don't need to
  669. // mangle anything.
  670. }
  671. av_log(avctx, AV_LOG_DEBUG, "Pictures ending truncated GOP:");
  672. for (pic = ctx->pic_start; pic; pic = pic->next) {
  673. av_log(avctx, AV_LOG_DEBUG, " %s (%"PRId64"/%"PRId64")",
  674. picture_type_name[pic->type],
  675. pic->display_order, pic->encode_order);
  676. }
  677. av_log(avctx, AV_LOG_DEBUG, "\n");
  678. return 0;
  679. }
  680. static int vaapi_encode_clear_old(AVCodecContext *avctx)
  681. {
  682. VAAPIEncodeContext *ctx = avctx->priv_data;
  683. VAAPIEncodePicture *pic, *old;
  684. int i;
  685. while (ctx->pic_start != ctx->pic_end) {
  686. old = ctx->pic_start;
  687. if (old->encode_order > ctx->output_order)
  688. break;
  689. for (pic = old->next; pic; pic = pic->next) {
  690. if (pic->encode_complete)
  691. continue;
  692. for (i = 0; i < pic->nb_refs; i++) {
  693. if (pic->refs[i] == old) {
  694. // We still need this picture because it's referred to
  695. // directly by a later one, so it and all following
  696. // pictures have to stay.
  697. return 0;
  698. }
  699. }
  700. }
  701. pic = ctx->pic_start;
  702. ctx->pic_start = pic->next;
  703. vaapi_encode_free(avctx, pic);
  704. }
  705. return 0;
  706. }
  707. int ff_vaapi_encode2(AVCodecContext *avctx, AVPacket *pkt,
  708. const AVFrame *input_image, int *got_packet)
  709. {
  710. VAAPIEncodeContext *ctx = avctx->priv_data;
  711. VAAPIEncodePicture *pic;
  712. int err;
  713. if (input_image) {
  714. av_log(avctx, AV_LOG_DEBUG, "Encode frame: %ux%u (%"PRId64").\n",
  715. input_image->width, input_image->height, input_image->pts);
  716. if (input_image->pict_type == AV_PICTURE_TYPE_I) {
  717. err = vaapi_encode_truncate_gop(avctx);
  718. if (err < 0)
  719. goto fail;
  720. ctx->force_idr = 1;
  721. }
  722. err = vaapi_encode_get_next(avctx, &pic);
  723. if (err) {
  724. av_log(avctx, AV_LOG_ERROR, "Input setup failed: %d.\n", err);
  725. return err;
  726. }
  727. pic->input_image = av_frame_alloc();
  728. if (!pic->input_image) {
  729. err = AVERROR(ENOMEM);
  730. goto fail;
  731. }
  732. err = av_frame_ref(pic->input_image, input_image);
  733. if (err < 0)
  734. goto fail;
  735. pic->input_surface = (VASurfaceID)(uintptr_t)input_image->data[3];
  736. pic->pts = input_image->pts;
  737. if (ctx->input_order == 0)
  738. ctx->first_pts = pic->pts;
  739. if (ctx->input_order == ctx->decode_delay)
  740. ctx->dts_pts_diff = pic->pts - ctx->first_pts;
  741. if (ctx->output_delay > 0)
  742. ctx->ts_ring[ctx->input_order % (3 * ctx->output_delay)] = pic->pts;
  743. pic->input_available = 1;
  744. } else {
  745. if (!ctx->end_of_stream) {
  746. err = vaapi_encode_truncate_gop(avctx);
  747. if (err < 0)
  748. goto fail;
  749. ctx->end_of_stream = 1;
  750. }
  751. }
  752. ++ctx->input_order;
  753. ++ctx->output_order;
  754. av_assert0(ctx->output_order + ctx->output_delay + 1 == ctx->input_order);
  755. for (pic = ctx->pic_start; pic; pic = pic->next)
  756. if (pic->encode_order == ctx->output_order)
  757. break;
  758. // pic can be null here if we don't have a specific target in this
  759. // iteration. We might still issue encodes if things can be overlapped,
  760. // even though we don't intend to output anything.
  761. err = vaapi_encode_step(avctx, pic);
  762. if (err < 0) {
  763. av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
  764. goto fail;
  765. }
  766. if (!pic) {
  767. *got_packet = 0;
  768. } else {
  769. err = vaapi_encode_output(avctx, pic, pkt);
  770. if (err < 0) {
  771. av_log(avctx, AV_LOG_ERROR, "Output failed: %d.\n", err);
  772. goto fail;
  773. }
  774. if (ctx->output_delay == 0) {
  775. pkt->dts = pkt->pts;
  776. } else if (ctx->output_order < ctx->decode_delay) {
  777. if (ctx->ts_ring[ctx->output_order] < INT64_MIN + ctx->dts_pts_diff)
  778. pkt->dts = INT64_MIN;
  779. else
  780. pkt->dts = ctx->ts_ring[ctx->output_order] - ctx->dts_pts_diff;
  781. } else {
  782. pkt->dts = ctx->ts_ring[(ctx->output_order - ctx->decode_delay) %
  783. (3 * ctx->output_delay)];
  784. }
  785. *got_packet = 1;
  786. }
  787. err = vaapi_encode_clear_old(avctx);
  788. if (err < 0) {
  789. av_log(avctx, AV_LOG_ERROR, "List clearing failed: %d.\n", err);
  790. goto fail;
  791. }
  792. return 0;
  793. fail:
  794. // Unclear what to clean up on failure. There are probably some things we
  795. // could do usefully clean up here, but for now just leave them for uninit()
  796. // to do instead.
  797. return err;
  798. }
  799. static av_cold int vaapi_encode_config_attributes(AVCodecContext *avctx)
  800. {
  801. VAAPIEncodeContext *ctx = avctx->priv_data;
  802. VAStatus vas;
  803. int i, n, err;
  804. VAProfile *profiles = NULL;
  805. VAEntrypoint *entrypoints = NULL;
  806. VAConfigAttrib attr[] = {
  807. { VAConfigAttribRTFormat },
  808. { VAConfigAttribRateControl },
  809. { VAConfigAttribEncMaxRefFrames },
  810. { VAConfigAttribEncPackedHeaders },
  811. };
  812. n = vaMaxNumProfiles(ctx->hwctx->display);
  813. profiles = av_malloc_array(n, sizeof(VAProfile));
  814. if (!profiles) {
  815. err = AVERROR(ENOMEM);
  816. goto fail;
  817. }
  818. vas = vaQueryConfigProfiles(ctx->hwctx->display, profiles, &n);
  819. if (vas != VA_STATUS_SUCCESS) {
  820. av_log(ctx, AV_LOG_ERROR, "Failed to query profiles: %d (%s).\n",
  821. vas, vaErrorStr(vas));
  822. err = AVERROR(ENOSYS);
  823. goto fail;
  824. }
  825. for (i = 0; i < n; i++) {
  826. if (profiles[i] == ctx->va_profile)
  827. break;
  828. }
  829. if (i >= n) {
  830. av_log(ctx, AV_LOG_ERROR, "Encoding profile not found (%d).\n",
  831. ctx->va_profile);
  832. err = AVERROR(ENOSYS);
  833. goto fail;
  834. }
  835. n = vaMaxNumEntrypoints(ctx->hwctx->display);
  836. entrypoints = av_malloc_array(n, sizeof(VAEntrypoint));
  837. if (!entrypoints) {
  838. err = AVERROR(ENOMEM);
  839. goto fail;
  840. }
  841. vas = vaQueryConfigEntrypoints(ctx->hwctx->display, ctx->va_profile,
  842. entrypoints, &n);
  843. if (vas != VA_STATUS_SUCCESS) {
  844. av_log(ctx, AV_LOG_ERROR, "Failed to query entrypoints for "
  845. "profile %u: %d (%s).\n", ctx->va_profile,
  846. vas, vaErrorStr(vas));
  847. err = AVERROR(ENOSYS);
  848. goto fail;
  849. }
  850. for (i = 0; i < n; i++) {
  851. if (entrypoints[i] == ctx->va_entrypoint)
  852. break;
  853. }
  854. if (i >= n) {
  855. av_log(ctx, AV_LOG_ERROR, "Encoding entrypoint not found "
  856. "(%d / %d).\n", ctx->va_profile, ctx->va_entrypoint);
  857. err = AVERROR(ENOSYS);
  858. goto fail;
  859. }
  860. vas = vaGetConfigAttributes(ctx->hwctx->display,
  861. ctx->va_profile, ctx->va_entrypoint,
  862. attr, FF_ARRAY_ELEMS(attr));
  863. if (vas != VA_STATUS_SUCCESS) {
  864. av_log(avctx, AV_LOG_ERROR, "Failed to fetch config "
  865. "attributes: %d (%s).\n", vas, vaErrorStr(vas));
  866. return AVERROR(EINVAL);
  867. }
  868. for (i = 0; i < FF_ARRAY_ELEMS(attr); i++) {
  869. if (attr[i].value == VA_ATTRIB_NOT_SUPPORTED) {
  870. // Unfortunately we have to treat this as "don't know" and hope
  871. // for the best, because the Intel MJPEG encoder returns this
  872. // for all the interesting attributes.
  873. continue;
  874. }
  875. switch (attr[i].type) {
  876. case VAConfigAttribRTFormat:
  877. if (!(ctx->va_rt_format & attr[i].value)) {
  878. av_log(avctx, AV_LOG_ERROR, "Surface RT format %#x "
  879. "is not supported (mask %#x).\n",
  880. ctx->va_rt_format, attr[i].value);
  881. err = AVERROR(EINVAL);
  882. goto fail;
  883. }
  884. ctx->config_attributes[ctx->nb_config_attributes++] =
  885. (VAConfigAttrib) {
  886. .type = VAConfigAttribRTFormat,
  887. .value = ctx->va_rt_format,
  888. };
  889. break;
  890. case VAConfigAttribRateControl:
  891. if (!(ctx->va_rc_mode & attr[i].value)) {
  892. av_log(avctx, AV_LOG_ERROR, "Rate control mode %#x "
  893. "is not supported (mask: %#x).\n",
  894. ctx->va_rc_mode, attr[i].value);
  895. err = AVERROR(EINVAL);
  896. goto fail;
  897. }
  898. ctx->config_attributes[ctx->nb_config_attributes++] =
  899. (VAConfigAttrib) {
  900. .type = VAConfigAttribRateControl,
  901. .value = ctx->va_rc_mode,
  902. };
  903. break;
  904. case VAConfigAttribEncMaxRefFrames:
  905. {
  906. unsigned int ref_l0 = attr[i].value & 0xffff;
  907. unsigned int ref_l1 = (attr[i].value >> 16) & 0xffff;
  908. if (avctx->gop_size > 1 && ref_l0 < 1) {
  909. av_log(avctx, AV_LOG_ERROR, "P frames are not "
  910. "supported (%#x).\n", attr[i].value);
  911. err = AVERROR(EINVAL);
  912. goto fail;
  913. }
  914. if (avctx->max_b_frames > 0 && ref_l1 < 1) {
  915. av_log(avctx, AV_LOG_ERROR, "B frames are not "
  916. "supported (%#x).\n", attr[i].value);
  917. err = AVERROR(EINVAL);
  918. goto fail;
  919. }
  920. }
  921. break;
  922. case VAConfigAttribEncPackedHeaders:
  923. if (ctx->va_packed_headers & ~attr[i].value) {
  924. // This isn't fatal, but packed headers are always
  925. // preferable because they are under our control.
  926. // When absent, the driver is generating them and some
  927. // features may not work (e.g. VUI or SEI in H.264).
  928. av_log(avctx, AV_LOG_WARNING, "Warning: some packed "
  929. "headers are not supported (want %#x, got %#x).\n",
  930. ctx->va_packed_headers, attr[i].value);
  931. ctx->va_packed_headers &= attr[i].value;
  932. }
  933. ctx->config_attributes[ctx->nb_config_attributes++] =
  934. (VAConfigAttrib) {
  935. .type = VAConfigAttribEncPackedHeaders,
  936. .value = ctx->va_packed_headers,
  937. };
  938. break;
  939. default:
  940. av_assert0(0 && "Unexpected config attribute.");
  941. }
  942. }
  943. err = 0;
  944. fail:
  945. av_freep(&profiles);
  946. av_freep(&entrypoints);
  947. return err;
  948. }
  949. static av_cold int vaapi_encode_init_rate_control(AVCodecContext *avctx)
  950. {
  951. VAAPIEncodeContext *ctx = avctx->priv_data;
  952. int hrd_buffer_size;
  953. int hrd_initial_buffer_fullness;
  954. if (avctx->rc_buffer_size)
  955. hrd_buffer_size = avctx->rc_buffer_size;
  956. else
  957. hrd_buffer_size = avctx->bit_rate;
  958. if (avctx->rc_initial_buffer_occupancy)
  959. hrd_initial_buffer_fullness = avctx->rc_initial_buffer_occupancy;
  960. else
  961. hrd_initial_buffer_fullness = hrd_buffer_size * 3 / 4;
  962. ctx->rc_params.misc.type = VAEncMiscParameterTypeRateControl;
  963. ctx->rc_params.rc = (VAEncMiscParameterRateControl) {
  964. .bits_per_second = avctx->bit_rate,
  965. .target_percentage = 66,
  966. .window_size = 1000,
  967. .initial_qp = (avctx->qmax >= 0 ? avctx->qmax : 40),
  968. .min_qp = (avctx->qmin >= 0 ? avctx->qmin : 18),
  969. .basic_unit_size = 0,
  970. };
  971. ctx->global_params[ctx->nb_global_params] =
  972. &ctx->rc_params.misc;
  973. ctx->global_params_size[ctx->nb_global_params++] =
  974. sizeof(ctx->rc_params);
  975. ctx->hrd_params.misc.type = VAEncMiscParameterTypeHRD;
  976. ctx->hrd_params.hrd = (VAEncMiscParameterHRD) {
  977. .initial_buffer_fullness = hrd_initial_buffer_fullness,
  978. .buffer_size = hrd_buffer_size,
  979. };
  980. ctx->global_params[ctx->nb_global_params] =
  981. &ctx->hrd_params.misc;
  982. ctx->global_params_size[ctx->nb_global_params++] =
  983. sizeof(ctx->hrd_params);
  984. return 0;
  985. }
  986. static void vaapi_encode_free_output_buffer(void *opaque,
  987. uint8_t *data)
  988. {
  989. AVCodecContext *avctx = opaque;
  990. VAAPIEncodeContext *ctx = avctx->priv_data;
  991. VABufferID buffer_id;
  992. buffer_id = (VABufferID)(uintptr_t)data;
  993. vaDestroyBuffer(ctx->hwctx->display, buffer_id);
  994. av_log(avctx, AV_LOG_DEBUG, "Freed output buffer %#x\n", buffer_id);
  995. }
  996. static AVBufferRef *vaapi_encode_alloc_output_buffer(void *opaque,
  997. int size)
  998. {
  999. AVCodecContext *avctx = opaque;
  1000. VAAPIEncodeContext *ctx = avctx->priv_data;
  1001. VABufferID buffer_id;
  1002. VAStatus vas;
  1003. AVBufferRef *ref;
  1004. // The output buffer size is fixed, so it needs to be large enough
  1005. // to hold the largest possible compressed frame. We assume here
  1006. // that the uncompressed frame plus some header data is an upper
  1007. // bound on that.
  1008. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  1009. VAEncCodedBufferType,
  1010. 3 * ctx->surface_width * ctx->surface_height +
  1011. (1 << 16), 1, 0, &buffer_id);
  1012. if (vas != VA_STATUS_SUCCESS) {
  1013. av_log(avctx, AV_LOG_ERROR, "Failed to create bitstream "
  1014. "output buffer: %d (%s).\n", vas, vaErrorStr(vas));
  1015. return NULL;
  1016. }
  1017. av_log(avctx, AV_LOG_DEBUG, "Allocated output buffer %#x\n", buffer_id);
  1018. ref = av_buffer_create((uint8_t*)(uintptr_t)buffer_id,
  1019. sizeof(buffer_id),
  1020. &vaapi_encode_free_output_buffer,
  1021. avctx, AV_BUFFER_FLAG_READONLY);
  1022. if (!ref) {
  1023. vaDestroyBuffer(ctx->hwctx->display, buffer_id);
  1024. return NULL;
  1025. }
  1026. return ref;
  1027. }
  1028. static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
  1029. {
  1030. VAAPIEncodeContext *ctx = avctx->priv_data;
  1031. AVVAAPIHWConfig *hwconfig = NULL;
  1032. AVHWFramesConstraints *constraints = NULL;
  1033. enum AVPixelFormat recon_format;
  1034. int err, i;
  1035. hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
  1036. if (!hwconfig) {
  1037. err = AVERROR(ENOMEM);
  1038. goto fail;
  1039. }
  1040. hwconfig->config_id = ctx->va_config;
  1041. constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
  1042. hwconfig);
  1043. if (!constraints) {
  1044. err = AVERROR(ENOMEM);
  1045. goto fail;
  1046. }
  1047. // Probably we can use the input surface format as the surface format
  1048. // of the reconstructed frames. If not, we just pick the first (only?)
  1049. // format in the valid list and hope that it all works.
  1050. recon_format = AV_PIX_FMT_NONE;
  1051. if (constraints->valid_sw_formats) {
  1052. for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
  1053. if (ctx->input_frames->sw_format ==
  1054. constraints->valid_sw_formats[i]) {
  1055. recon_format = ctx->input_frames->sw_format;
  1056. break;
  1057. }
  1058. }
  1059. if (recon_format == AV_PIX_FMT_NONE) {
  1060. // No match. Just use the first in the supported list and
  1061. // hope for the best.
  1062. recon_format = constraints->valid_sw_formats[0];
  1063. }
  1064. } else {
  1065. // No idea what to use; copy input format.
  1066. recon_format = ctx->input_frames->sw_format;
  1067. }
  1068. av_log(avctx, AV_LOG_DEBUG, "Using %s as format of "
  1069. "reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
  1070. if (ctx->surface_width < constraints->min_width ||
  1071. ctx->surface_height < constraints->min_height ||
  1072. ctx->surface_width > constraints->max_width ||
  1073. ctx->surface_height > constraints->max_height) {
  1074. av_log(avctx, AV_LOG_ERROR, "Hardware does not support encoding at "
  1075. "size %dx%d (constraints: width %d-%d height %d-%d).\n",
  1076. ctx->surface_width, ctx->surface_height,
  1077. constraints->min_width, constraints->max_width,
  1078. constraints->min_height, constraints->max_height);
  1079. err = AVERROR(EINVAL);
  1080. goto fail;
  1081. }
  1082. av_freep(&hwconfig);
  1083. av_hwframe_constraints_free(&constraints);
  1084. ctx->recon_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
  1085. if (!ctx->recon_frames_ref) {
  1086. err = AVERROR(ENOMEM);
  1087. goto fail;
  1088. }
  1089. ctx->recon_frames = (AVHWFramesContext*)ctx->recon_frames_ref->data;
  1090. ctx->recon_frames->format = AV_PIX_FMT_VAAPI;
  1091. ctx->recon_frames->sw_format = recon_format;
  1092. ctx->recon_frames->width = ctx->surface_width;
  1093. ctx->recon_frames->height = ctx->surface_height;
  1094. // At most three IDR/I/P frames and two runs of B frames can be in
  1095. // flight at any one time.
  1096. ctx->recon_frames->initial_pool_size = 3 + 2 * avctx->max_b_frames;
  1097. err = av_hwframe_ctx_init(ctx->recon_frames_ref);
  1098. if (err < 0) {
  1099. av_log(avctx, AV_LOG_ERROR, "Failed to initialise reconstructed "
  1100. "frame context: %d.\n", err);
  1101. goto fail;
  1102. }
  1103. err = 0;
  1104. fail:
  1105. av_freep(&hwconfig);
  1106. av_hwframe_constraints_free(&constraints);
  1107. return err;
  1108. }
  1109. av_cold int ff_vaapi_encode_init(AVCodecContext *avctx)
  1110. {
  1111. VAAPIEncodeContext *ctx = avctx->priv_data;
  1112. AVVAAPIFramesContext *recon_hwctx = NULL;
  1113. VAStatus vas;
  1114. int err;
  1115. if (!avctx->hw_frames_ctx) {
  1116. av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
  1117. "required to associate the encoding device.\n");
  1118. return AVERROR(EINVAL);
  1119. }
  1120. ctx->codec_options = ctx->codec_options_data;
  1121. ctx->va_config = VA_INVALID_ID;
  1122. ctx->va_context = VA_INVALID_ID;
  1123. ctx->priv_data = av_mallocz(ctx->codec->priv_data_size);
  1124. if (!ctx->priv_data) {
  1125. err = AVERROR(ENOMEM);
  1126. goto fail;
  1127. }
  1128. ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
  1129. if (!ctx->input_frames_ref) {
  1130. err = AVERROR(ENOMEM);
  1131. goto fail;
  1132. }
  1133. ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data;
  1134. ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
  1135. if (!ctx->device_ref) {
  1136. err = AVERROR(ENOMEM);
  1137. goto fail;
  1138. }
  1139. ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
  1140. ctx->hwctx = ctx->device->hwctx;
  1141. err = vaapi_encode_config_attributes(avctx);
  1142. if (err < 0)
  1143. goto fail;
  1144. vas = vaCreateConfig(ctx->hwctx->display,
  1145. ctx->va_profile, ctx->va_entrypoint,
  1146. ctx->config_attributes, ctx->nb_config_attributes,
  1147. &ctx->va_config);
  1148. if (vas != VA_STATUS_SUCCESS) {
  1149. av_log(avctx, AV_LOG_ERROR, "Failed to create encode pipeline "
  1150. "configuration: %d (%s).\n", vas, vaErrorStr(vas));
  1151. err = AVERROR(EIO);
  1152. goto fail;
  1153. }
  1154. err = vaapi_encode_create_recon_frames(avctx);
  1155. if (err < 0)
  1156. goto fail;
  1157. recon_hwctx = ctx->recon_frames->hwctx;
  1158. vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
  1159. ctx->surface_width, ctx->surface_height,
  1160. VA_PROGRESSIVE,
  1161. recon_hwctx->surface_ids,
  1162. recon_hwctx->nb_surfaces,
  1163. &ctx->va_context);
  1164. if (vas != VA_STATUS_SUCCESS) {
  1165. av_log(avctx, AV_LOG_ERROR, "Failed to create encode pipeline "
  1166. "context: %d (%s).\n", vas, vaErrorStr(vas));
  1167. err = AVERROR(EIO);
  1168. goto fail;
  1169. }
  1170. ctx->output_buffer_pool =
  1171. av_buffer_pool_init2(sizeof(VABufferID), avctx,
  1172. &vaapi_encode_alloc_output_buffer, NULL);
  1173. if (!ctx->output_buffer_pool) {
  1174. err = AVERROR(ENOMEM);
  1175. goto fail;
  1176. }
  1177. if (ctx->va_rc_mode & ~VA_RC_CQP) {
  1178. err = vaapi_encode_init_rate_control(avctx);
  1179. if (err < 0)
  1180. goto fail;
  1181. }
  1182. if (ctx->codec->configure) {
  1183. err = ctx->codec->configure(avctx);
  1184. if (err < 0)
  1185. goto fail;
  1186. }
  1187. ctx->input_order = 0;
  1188. ctx->output_delay = avctx->max_b_frames;
  1189. ctx->decode_delay = 1;
  1190. ctx->output_order = - ctx->output_delay - 1;
  1191. // Currently we never generate I frames, only IDR.
  1192. ctx->p_per_i = ((avctx->gop_size + avctx->max_b_frames) /
  1193. (avctx->max_b_frames + 1));
  1194. ctx->b_per_p = avctx->max_b_frames;
  1195. if (ctx->codec->sequence_params_size > 0) {
  1196. ctx->codec_sequence_params =
  1197. av_mallocz(ctx->codec->sequence_params_size);
  1198. if (!ctx->codec_sequence_params) {
  1199. err = AVERROR(ENOMEM);
  1200. goto fail;
  1201. }
  1202. }
  1203. if (ctx->codec->picture_params_size > 0) {
  1204. ctx->codec_picture_params =
  1205. av_mallocz(ctx->codec->picture_params_size);
  1206. if (!ctx->codec_picture_params) {
  1207. err = AVERROR(ENOMEM);
  1208. goto fail;
  1209. }
  1210. }
  1211. if (ctx->codec->init_sequence_params) {
  1212. err = ctx->codec->init_sequence_params(avctx);
  1213. if (err < 0) {
  1214. av_log(avctx, AV_LOG_ERROR, "Codec sequence initialisation "
  1215. "failed: %d.\n", err);
  1216. goto fail;
  1217. }
  1218. }
  1219. // This should be configurable somehow. (Needs testing on a machine
  1220. // where it actually overlaps properly, though.)
  1221. ctx->issue_mode = ISSUE_MODE_MAXIMISE_THROUGHPUT;
  1222. if (ctx->va_packed_headers & VA_ENC_PACKED_HEADER_SEQUENCE &&
  1223. ctx->codec->write_sequence_header) {
  1224. char data[MAX_PARAM_BUFFER_SIZE];
  1225. size_t bit_len = 8 * sizeof(data);
  1226. err = ctx->codec->write_sequence_header(avctx, data, &bit_len);
  1227. if (err < 0) {
  1228. av_log(avctx, AV_LOG_ERROR, "Failed to write sequence header "
  1229. "for extradata: %d.\n", err);
  1230. goto fail;
  1231. } else {
  1232. avctx->extradata_size = (bit_len + 7) / 8;
  1233. avctx->extradata = av_mallocz(avctx->extradata_size +
  1234. AV_INPUT_BUFFER_PADDING_SIZE);
  1235. if (!avctx->extradata) {
  1236. err = AVERROR(ENOMEM);
  1237. goto fail;
  1238. }
  1239. memcpy(avctx->extradata, data, avctx->extradata_size);
  1240. }
  1241. }
  1242. return 0;
  1243. fail:
  1244. ff_vaapi_encode_close(avctx);
  1245. return err;
  1246. }
  1247. av_cold int ff_vaapi_encode_close(AVCodecContext *avctx)
  1248. {
  1249. VAAPIEncodeContext *ctx = avctx->priv_data;
  1250. VAAPIEncodePicture *pic, *next;
  1251. for (pic = ctx->pic_start; pic; pic = next) {
  1252. next = pic->next;
  1253. vaapi_encode_free(avctx, pic);
  1254. }
  1255. if (ctx->va_context != VA_INVALID_ID) {
  1256. vaDestroyContext(ctx->hwctx->display, ctx->va_context);
  1257. ctx->va_context = VA_INVALID_ID;
  1258. }
  1259. if (ctx->va_config != VA_INVALID_ID) {
  1260. vaDestroyConfig(ctx->hwctx->display, ctx->va_config);
  1261. ctx->va_config = VA_INVALID_ID;
  1262. }
  1263. av_buffer_pool_uninit(&ctx->output_buffer_pool);
  1264. av_freep(&ctx->codec_sequence_params);
  1265. av_freep(&ctx->codec_picture_params);
  1266. av_buffer_unref(&ctx->recon_frames_ref);
  1267. av_buffer_unref(&ctx->input_frames_ref);
  1268. av_buffer_unref(&ctx->device_ref);
  1269. av_freep(&ctx->priv_data);
  1270. return 0;
  1271. }