You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

1263 lines
40KB

  1. /*
  2. * This file is part of FFmpeg.
  3. *
  4. * FFmpeg is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU Lesser General Public
  6. * License as published by the Free Software Foundation; either
  7. * version 2.1 of the License, or (at your option) any later version.
  8. *
  9. * FFmpeg is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  12. * Lesser General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU Lesser General Public
  15. * License along with FFmpeg; if not, write to the Free Software
  16. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  17. */
  18. #include <inttypes.h>
  19. #include <string.h>
  20. #include "libavutil/avassert.h"
  21. #include "libavutil/common.h"
  22. #include "libavutil/log.h"
  23. #include "libavutil/pixdesc.h"
  24. #include "vaapi_encode.h"
  25. #include "avcodec.h"
  26. static const char *picture_type_name[] = { "IDR", "I", "P", "B" };
  27. static int vaapi_encode_make_packed_header(AVCodecContext *avctx,
  28. VAAPIEncodePicture *pic,
  29. int type, char *data, size_t bit_len)
  30. {
  31. VAAPIEncodeContext *ctx = avctx->priv_data;
  32. VAStatus vas;
  33. VABufferID param_buffer, data_buffer;
  34. VAEncPackedHeaderParameterBuffer params = {
  35. .type = type,
  36. .bit_length = bit_len,
  37. .has_emulation_bytes = 1,
  38. };
  39. av_assert0(pic->nb_param_buffers + 2 <= MAX_PARAM_BUFFERS);
  40. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  41. VAEncPackedHeaderParameterBufferType,
  42. sizeof(params), 1, &params, &param_buffer);
  43. if (vas != VA_STATUS_SUCCESS) {
  44. av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer "
  45. "for packed header (type %d): %d (%s).\n",
  46. type, vas, vaErrorStr(vas));
  47. return AVERROR(EIO);
  48. }
  49. pic->param_buffers[pic->nb_param_buffers++] = param_buffer;
  50. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  51. VAEncPackedHeaderDataBufferType,
  52. (bit_len + 7) / 8, 1, data, &data_buffer);
  53. if (vas != VA_STATUS_SUCCESS) {
  54. av_log(avctx, AV_LOG_ERROR, "Failed to create data buffer "
  55. "for packed header (type %d): %d (%s).\n",
  56. type, vas, vaErrorStr(vas));
  57. return AVERROR(EIO);
  58. }
  59. pic->param_buffers[pic->nb_param_buffers++] = data_buffer;
  60. av_log(avctx, AV_LOG_DEBUG, "Packed header buffer (%d) is %#x/%#x "
  61. "(%zu bits).\n", type, param_buffer, data_buffer, bit_len);
  62. return 0;
  63. }
  64. static int vaapi_encode_make_param_buffer(AVCodecContext *avctx,
  65. VAAPIEncodePicture *pic,
  66. int type, char *data, size_t len)
  67. {
  68. VAAPIEncodeContext *ctx = avctx->priv_data;
  69. VAStatus vas;
  70. VABufferID buffer;
  71. av_assert0(pic->nb_param_buffers + 1 <= MAX_PARAM_BUFFERS);
  72. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  73. type, len, 1, data, &buffer);
  74. if (vas != VA_STATUS_SUCCESS) {
  75. av_log(avctx, AV_LOG_ERROR, "Failed to create parameter buffer "
  76. "(type %d): %d (%s).\n", type, vas, vaErrorStr(vas));
  77. return AVERROR(EIO);
  78. }
  79. pic->param_buffers[pic->nb_param_buffers++] = buffer;
  80. av_log(avctx, AV_LOG_DEBUG, "Param buffer (%d) is %#x.\n",
  81. type, buffer);
  82. return 0;
  83. }
  84. static int vaapi_encode_wait(AVCodecContext *avctx,
  85. VAAPIEncodePicture *pic)
  86. {
  87. VAAPIEncodeContext *ctx = avctx->priv_data;
  88. VAStatus vas;
  89. av_assert0(pic->encode_issued);
  90. if (pic->encode_complete) {
  91. // Already waited for this picture.
  92. return 0;
  93. }
  94. av_log(avctx, AV_LOG_DEBUG, "Sync to pic %"PRId64"/%"PRId64" "
  95. "(recon surface %#x).\n", pic->display_order,
  96. pic->encode_order, pic->recon_surface);
  97. vas = vaSyncSurface(ctx->hwctx->display, pic->recon_surface);
  98. if (vas != VA_STATUS_SUCCESS) {
  99. av_log(avctx, AV_LOG_ERROR, "Failed to sync to picture completion: "
  100. "%d (%s).\n", vas, vaErrorStr(vas));
  101. return AVERROR(EIO);
  102. }
  103. // Input is definitely finished with now.
  104. av_frame_free(&pic->input_image);
  105. pic->encode_complete = 1;
  106. return 0;
  107. }
  108. static int vaapi_encode_issue(AVCodecContext *avctx,
  109. VAAPIEncodePicture *pic)
  110. {
  111. VAAPIEncodeContext *ctx = avctx->priv_data;
  112. VAAPIEncodeSlice *slice;
  113. VAStatus vas;
  114. int err, i;
  115. char data[MAX_PARAM_BUFFER_SIZE];
  116. size_t bit_len;
  117. av_log(avctx, AV_LOG_DEBUG, "Issuing encode for pic %"PRId64"/%"PRId64" "
  118. "as type %s.\n", pic->display_order, pic->encode_order,
  119. picture_type_name[pic->type]);
  120. if (pic->nb_refs == 0) {
  121. av_log(avctx, AV_LOG_DEBUG, "No reference pictures.\n");
  122. } else {
  123. av_log(avctx, AV_LOG_DEBUG, "Refers to:");
  124. for (i = 0; i < pic->nb_refs; i++) {
  125. av_log(avctx, AV_LOG_DEBUG, " %"PRId64"/%"PRId64,
  126. pic->refs[i]->display_order, pic->refs[i]->encode_order);
  127. }
  128. av_log(avctx, AV_LOG_DEBUG, ".\n");
  129. }
  130. av_assert0(pic->input_available && !pic->encode_issued);
  131. for (i = 0; i < pic->nb_refs; i++) {
  132. av_assert0(pic->refs[i]);
  133. // If we are serialised then the references must have already
  134. // completed. If not, they must have been issued but need not
  135. // have completed yet.
  136. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING)
  137. av_assert0(pic->refs[i]->encode_complete);
  138. else
  139. av_assert0(pic->refs[i]->encode_issued);
  140. }
  141. av_log(avctx, AV_LOG_DEBUG, "Input surface is %#x.\n", pic->input_surface);
  142. pic->recon_image = av_frame_alloc();
  143. if (!pic->recon_image) {
  144. err = AVERROR(ENOMEM);
  145. goto fail;
  146. }
  147. err = av_hwframe_get_buffer(ctx->recon_frames_ref, pic->recon_image, 0);
  148. if (err < 0) {
  149. err = AVERROR(ENOMEM);
  150. goto fail;
  151. }
  152. pic->recon_surface = (VASurfaceID)(uintptr_t)pic->recon_image->data[3];
  153. av_log(avctx, AV_LOG_DEBUG, "Recon surface is %#x.\n", pic->recon_surface);
  154. vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
  155. VAEncCodedBufferType,
  156. MAX_OUTPUT_BUFFER_SIZE, 1, 0,
  157. &pic->output_buffer);
  158. if (vas != VA_STATUS_SUCCESS) {
  159. av_log(avctx, AV_LOG_ERROR, "Failed to create bitstream "
  160. "output buffer: %d (%s).\n", vas, vaErrorStr(vas));
  161. err = AVERROR(ENOMEM);
  162. goto fail;
  163. }
  164. av_log(avctx, AV_LOG_DEBUG, "Output buffer is %#x.\n",
  165. pic->output_buffer);
  166. if (ctx->codec->picture_params_size > 0) {
  167. pic->codec_picture_params = av_malloc(ctx->codec->picture_params_size);
  168. if (!pic->codec_picture_params)
  169. goto fail;
  170. memcpy(pic->codec_picture_params, ctx->codec_picture_params,
  171. ctx->codec->picture_params_size);
  172. } else {
  173. av_assert0(!ctx->codec_picture_params);
  174. }
  175. pic->nb_param_buffers = 0;
  176. if (pic->encode_order == 0) {
  177. // Global parameter buffers are set on the first picture only.
  178. for (i = 0; i < ctx->nb_global_params; i++) {
  179. err = vaapi_encode_make_param_buffer(avctx, pic,
  180. VAEncMiscParameterBufferType,
  181. (char*)ctx->global_params[i],
  182. ctx->global_params_size[i]);
  183. if (err < 0)
  184. goto fail;
  185. }
  186. }
  187. if (pic->type == PICTURE_TYPE_IDR && ctx->codec->init_sequence_params) {
  188. err = vaapi_encode_make_param_buffer(avctx, pic,
  189. VAEncSequenceParameterBufferType,
  190. ctx->codec_sequence_params,
  191. ctx->codec->sequence_params_size);
  192. if (err < 0)
  193. goto fail;
  194. }
  195. if (ctx->codec->init_picture_params) {
  196. err = ctx->codec->init_picture_params(avctx, pic);
  197. if (err < 0) {
  198. av_log(avctx, AV_LOG_ERROR, "Failed to initialise picture "
  199. "parameters: %d.\n", err);
  200. goto fail;
  201. }
  202. err = vaapi_encode_make_param_buffer(avctx, pic,
  203. VAEncPictureParameterBufferType,
  204. pic->codec_picture_params,
  205. ctx->codec->picture_params_size);
  206. if (err < 0)
  207. goto fail;
  208. }
  209. if (pic->type == PICTURE_TYPE_IDR) {
  210. if (ctx->codec->write_sequence_header) {
  211. bit_len = 8 * sizeof(data);
  212. err = ctx->codec->write_sequence_header(avctx, data, &bit_len);
  213. if (err < 0) {
  214. av_log(avctx, AV_LOG_ERROR, "Failed to write per-sequence "
  215. "header: %d.\n", err);
  216. goto fail;
  217. }
  218. err = vaapi_encode_make_packed_header(avctx, pic,
  219. ctx->codec->sequence_header_type,
  220. data, bit_len);
  221. if (err < 0)
  222. goto fail;
  223. }
  224. }
  225. if (ctx->codec->write_picture_header) {
  226. bit_len = 8 * sizeof(data);
  227. err = ctx->codec->write_picture_header(avctx, pic, data, &bit_len);
  228. if (err < 0) {
  229. av_log(avctx, AV_LOG_ERROR, "Failed to write per-picture "
  230. "header: %d.\n", err);
  231. goto fail;
  232. }
  233. err = vaapi_encode_make_packed_header(avctx, pic,
  234. ctx->codec->picture_header_type,
  235. data, bit_len);
  236. if (err < 0)
  237. goto fail;
  238. }
  239. if (ctx->codec->write_extra_buffer) {
  240. for (i = 0;; i++) {
  241. size_t len = sizeof(data);
  242. int type;
  243. err = ctx->codec->write_extra_buffer(avctx, pic, i, &type,
  244. data, &len);
  245. if (err == AVERROR_EOF)
  246. break;
  247. if (err < 0) {
  248. av_log(avctx, AV_LOG_ERROR, "Failed to write extra "
  249. "buffer %d: %d.\n", i, err);
  250. goto fail;
  251. }
  252. err = vaapi_encode_make_param_buffer(avctx, pic, type,
  253. data, len);
  254. if (err < 0)
  255. goto fail;
  256. }
  257. }
  258. if (ctx->codec->write_extra_header) {
  259. for (i = 0;; i++) {
  260. int type;
  261. bit_len = 8 * sizeof(data);
  262. err = ctx->codec->write_extra_header(avctx, pic, i, &type,
  263. data, &bit_len);
  264. if (err == AVERROR_EOF)
  265. break;
  266. if (err < 0) {
  267. av_log(avctx, AV_LOG_ERROR, "Failed to write extra "
  268. "header %d: %d.\n", i, err);
  269. goto fail;
  270. }
  271. err = vaapi_encode_make_packed_header(avctx, pic, type,
  272. data, bit_len);
  273. if (err < 0)
  274. goto fail;
  275. }
  276. }
  277. av_assert0(pic->nb_slices <= MAX_PICTURE_SLICES);
  278. for (i = 0; i < pic->nb_slices; i++) {
  279. slice = av_mallocz(sizeof(*slice));
  280. if (!slice) {
  281. err = AVERROR(ENOMEM);
  282. goto fail;
  283. }
  284. pic->slices[i] = slice;
  285. if (ctx->codec->slice_params_size > 0) {
  286. slice->codec_slice_params = av_mallocz(ctx->codec->slice_params_size);
  287. if (!slice->codec_slice_params) {
  288. err = AVERROR(ENOMEM);
  289. goto fail;
  290. }
  291. }
  292. if (ctx->codec->init_slice_params) {
  293. err = ctx->codec->init_slice_params(avctx, pic, slice);
  294. if (err < 0) {
  295. av_log(avctx, AV_LOG_ERROR, "Failed to initialise slice "
  296. "parameters: %d.\n", err);
  297. goto fail;
  298. }
  299. }
  300. if (ctx->codec->write_slice_header) {
  301. bit_len = 8 * sizeof(data);
  302. err = ctx->codec->write_slice_header(avctx, pic, slice,
  303. data, &bit_len);
  304. if (err < 0) {
  305. av_log(avctx, AV_LOG_ERROR, "Failed to write per-slice "
  306. "header: %d.\n", err);
  307. goto fail;
  308. }
  309. err = vaapi_encode_make_packed_header(avctx, pic,
  310. ctx->codec->slice_header_type,
  311. data, bit_len);
  312. if (err < 0)
  313. goto fail;
  314. }
  315. if (ctx->codec->init_slice_params) {
  316. err = vaapi_encode_make_param_buffer(avctx, pic,
  317. VAEncSliceParameterBufferType,
  318. slice->codec_slice_params,
  319. ctx->codec->slice_params_size);
  320. if (err < 0)
  321. goto fail;
  322. }
  323. }
  324. vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context,
  325. pic->input_surface);
  326. if (vas != VA_STATUS_SUCCESS) {
  327. av_log(avctx, AV_LOG_ERROR, "Failed to begin picture encode issue: "
  328. "%d (%s).\n", vas, vaErrorStr(vas));
  329. err = AVERROR(EIO);
  330. goto fail_with_picture;
  331. }
  332. vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
  333. pic->param_buffers, pic->nb_param_buffers);
  334. if (vas != VA_STATUS_SUCCESS) {
  335. av_log(avctx, AV_LOG_ERROR, "Failed to upload encode parameters: "
  336. "%d (%s).\n", vas, vaErrorStr(vas));
  337. err = AVERROR(EIO);
  338. goto fail_with_picture;
  339. }
  340. vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
  341. if (vas != VA_STATUS_SUCCESS) {
  342. av_log(avctx, AV_LOG_ERROR, "Failed to end picture encode issue: "
  343. "%d (%s).\n", vas, vaErrorStr(vas));
  344. err = AVERROR(EIO);
  345. goto fail_at_end;
  346. }
  347. pic->encode_issued = 1;
  348. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING)
  349. return vaapi_encode_wait(avctx, pic);
  350. else
  351. return 0;
  352. fail_with_picture:
  353. vaEndPicture(ctx->hwctx->display, ctx->va_context);
  354. fail:
  355. for(i = 0; i < pic->nb_param_buffers; i++)
  356. vaDestroyBuffer(ctx->hwctx->display, pic->param_buffers[i]);
  357. fail_at_end:
  358. av_freep(&pic->codec_picture_params);
  359. av_frame_free(&pic->recon_image);
  360. return err;
  361. }
  362. static int vaapi_encode_output(AVCodecContext *avctx,
  363. VAAPIEncodePicture *pic, AVPacket *pkt)
  364. {
  365. VAAPIEncodeContext *ctx = avctx->priv_data;
  366. VACodedBufferSegment *buf_list, *buf;
  367. VAStatus vas;
  368. int err;
  369. err = vaapi_encode_wait(avctx, pic);
  370. if (err < 0)
  371. return err;
  372. buf_list = NULL;
  373. vas = vaMapBuffer(ctx->hwctx->display, pic->output_buffer,
  374. (void**)&buf_list);
  375. if (vas != VA_STATUS_SUCCESS) {
  376. av_log(avctx, AV_LOG_ERROR, "Failed to map output buffers: "
  377. "%d (%s).\n", vas, vaErrorStr(vas));
  378. err = AVERROR(EIO);
  379. goto fail;
  380. }
  381. for (buf = buf_list; buf; buf = buf->next) {
  382. av_log(avctx, AV_LOG_DEBUG, "Output buffer: %u bytes "
  383. "(status %08x).\n", buf->size, buf->status);
  384. err = av_new_packet(pkt, buf->size);
  385. if (err < 0)
  386. goto fail;
  387. memcpy(pkt->data, buf->buf, buf->size);
  388. }
  389. if (pic->type == PICTURE_TYPE_IDR)
  390. pkt->flags |= AV_PKT_FLAG_KEY;
  391. pkt->pts = pic->pts;
  392. vas = vaUnmapBuffer(ctx->hwctx->display, pic->output_buffer);
  393. if (vas != VA_STATUS_SUCCESS) {
  394. av_log(avctx, AV_LOG_ERROR, "Failed to unmap output buffers: "
  395. "%d (%s).\n", vas, vaErrorStr(vas));
  396. err = AVERROR(EIO);
  397. goto fail;
  398. }
  399. vaDestroyBuffer(ctx->hwctx->display, pic->output_buffer);
  400. pic->output_buffer = VA_INVALID_ID;
  401. av_log(avctx, AV_LOG_DEBUG, "Output read for pic %"PRId64"/%"PRId64".\n",
  402. pic->display_order, pic->encode_order);
  403. return 0;
  404. fail:
  405. if (pic->output_buffer != VA_INVALID_ID) {
  406. vaUnmapBuffer(ctx->hwctx->display, pic->output_buffer);
  407. vaDestroyBuffer(ctx->hwctx->display, pic->output_buffer);
  408. pic->output_buffer = VA_INVALID_ID;
  409. }
  410. return err;
  411. }
  412. static int vaapi_encode_discard(AVCodecContext *avctx,
  413. VAAPIEncodePicture *pic)
  414. {
  415. VAAPIEncodeContext *ctx = avctx->priv_data;
  416. vaapi_encode_wait(avctx, pic);
  417. if (pic->output_buffer != VA_INVALID_ID) {
  418. av_log(avctx, AV_LOG_DEBUG, "Discard output for pic "
  419. "%"PRId64"/%"PRId64".\n",
  420. pic->display_order, pic->encode_order);
  421. vaDestroyBuffer(ctx->hwctx->display, pic->output_buffer);
  422. pic->output_buffer = VA_INVALID_ID;
  423. }
  424. return 0;
  425. }
  426. static VAAPIEncodePicture *vaapi_encode_alloc(void)
  427. {
  428. VAAPIEncodePicture *pic;
  429. pic = av_mallocz(sizeof(*pic));
  430. if (!pic)
  431. return NULL;
  432. pic->input_surface = VA_INVALID_ID;
  433. pic->recon_surface = VA_INVALID_ID;
  434. pic->output_buffer = VA_INVALID_ID;
  435. return pic;
  436. }
  437. static int vaapi_encode_free(AVCodecContext *avctx,
  438. VAAPIEncodePicture *pic)
  439. {
  440. int i;
  441. if (pic->encode_issued)
  442. vaapi_encode_discard(avctx, pic);
  443. for (i = 0; i < pic->nb_slices; i++) {
  444. av_freep(&pic->slices[i]->priv_data);
  445. av_freep(&pic->slices[i]->codec_slice_params);
  446. av_freep(&pic->slices[i]);
  447. }
  448. av_freep(&pic->codec_picture_params);
  449. av_frame_free(&pic->input_image);
  450. av_frame_free(&pic->recon_image);
  451. // Output buffer should already be destroyed.
  452. av_assert0(pic->output_buffer == VA_INVALID_ID);
  453. av_freep(&pic->priv_data);
  454. av_freep(&pic->codec_picture_params);
  455. av_free(pic);
  456. return 0;
  457. }
  458. static int vaapi_encode_step(AVCodecContext *avctx,
  459. VAAPIEncodePicture *target)
  460. {
  461. VAAPIEncodeContext *ctx = avctx->priv_data;
  462. VAAPIEncodePicture *pic;
  463. int i, err;
  464. if (ctx->issue_mode == ISSUE_MODE_SERIALISE_EVERYTHING ||
  465. ctx->issue_mode == ISSUE_MODE_MINIMISE_LATENCY) {
  466. // These two modes are equivalent, except that we wait for
  467. // immediate completion on each operation if serialised.
  468. if (!target) {
  469. // No target, nothing to do yet.
  470. return 0;
  471. }
  472. if (target->encode_complete) {
  473. // Already done.
  474. return 0;
  475. }
  476. pic = target;
  477. for (i = 0; i < pic->nb_refs; i++) {
  478. if (!pic->refs[i]->encode_complete) {
  479. err = vaapi_encode_step(avctx, pic->refs[i]);
  480. if (err < 0)
  481. return err;
  482. }
  483. }
  484. err = vaapi_encode_issue(avctx, pic);
  485. if (err < 0)
  486. return err;
  487. } else if (ctx->issue_mode == ISSUE_MODE_MAXIMISE_THROUGHPUT) {
  488. int activity;
  489. do {
  490. activity = 0;
  491. for (pic = ctx->pic_start; pic; pic = pic->next) {
  492. if (!pic->input_available || pic->encode_issued)
  493. continue;
  494. for (i = 0; i < pic->nb_refs; i++) {
  495. if (!pic->refs[i]->encode_issued)
  496. break;
  497. }
  498. if (i < pic->nb_refs)
  499. continue;
  500. err = vaapi_encode_issue(avctx, pic);
  501. if (err < 0)
  502. return err;
  503. activity = 1;
  504. }
  505. } while(activity);
  506. if (target) {
  507. av_assert0(target->encode_issued && "broken dependencies?");
  508. }
  509. } else {
  510. av_assert0(0);
  511. }
  512. return 0;
  513. }
  514. static int vaapi_encode_get_next(AVCodecContext *avctx,
  515. VAAPIEncodePicture **pic_out)
  516. {
  517. VAAPIEncodeContext *ctx = avctx->priv_data;
  518. VAAPIEncodePicture *start, *end, *pic;
  519. int i;
  520. for (pic = ctx->pic_start; pic; pic = pic->next) {
  521. if (pic->next)
  522. av_assert0(pic->display_order + 1 == pic->next->display_order);
  523. if (pic->display_order == ctx->input_order) {
  524. *pic_out = pic;
  525. return 0;
  526. }
  527. }
  528. if (ctx->input_order == 0) {
  529. // First frame is always an IDR frame.
  530. av_assert0(!ctx->pic_start && !ctx->pic_end);
  531. pic = vaapi_encode_alloc();
  532. if (!pic)
  533. return AVERROR(ENOMEM);
  534. pic->type = PICTURE_TYPE_IDR;
  535. pic->display_order = 0;
  536. pic->encode_order = 0;
  537. ctx->pic_start = ctx->pic_end = pic;
  538. *pic_out = pic;
  539. return 0;
  540. }
  541. pic = vaapi_encode_alloc();
  542. if (!pic)
  543. return AVERROR(ENOMEM);
  544. if (ctx->p_per_i == 0 || ctx->p_counter == ctx->p_per_i) {
  545. if (ctx->i_per_idr == 0 || ctx->i_counter == ctx->i_per_idr) {
  546. pic->type = PICTURE_TYPE_IDR;
  547. ctx->i_counter = 0;
  548. } else {
  549. pic->type = PICTURE_TYPE_I;
  550. ++ctx->i_counter;
  551. }
  552. ctx->p_counter = 0;
  553. } else {
  554. pic->type = PICTURE_TYPE_P;
  555. pic->refs[0] = ctx->pic_end;
  556. pic->nb_refs = 1;
  557. ++ctx->p_counter;
  558. }
  559. start = end = pic;
  560. if (pic->type != PICTURE_TYPE_IDR) {
  561. // If that was not an IDR frame, add B-frames display-before and
  562. // encode-after it.
  563. for (i = 0; i < ctx->b_per_p; i++) {
  564. pic = vaapi_encode_alloc();
  565. if (!pic)
  566. goto fail;
  567. pic->type = PICTURE_TYPE_B;
  568. pic->refs[0] = ctx->pic_end;
  569. pic->refs[1] = end;
  570. pic->nb_refs = 2;
  571. pic->next = start;
  572. pic->display_order = ctx->input_order + ctx->b_per_p - i - 1;
  573. pic->encode_order = pic->display_order + 1;
  574. start = pic;
  575. }
  576. }
  577. for (i = 0, pic = start; pic; i++, pic = pic->next) {
  578. pic->display_order = ctx->input_order + i;
  579. if (end->type == PICTURE_TYPE_IDR)
  580. pic->encode_order = ctx->input_order + i;
  581. else if (pic == end)
  582. pic->encode_order = ctx->input_order;
  583. else
  584. pic->encode_order = ctx->input_order + i + 1;
  585. }
  586. av_assert0(ctx->pic_end);
  587. ctx->pic_end->next = start;
  588. ctx->pic_end = end;
  589. *pic_out = start;
  590. av_log(avctx, AV_LOG_DEBUG, "Pictures:");
  591. for (pic = ctx->pic_start; pic; pic = pic->next) {
  592. av_log(avctx, AV_LOG_DEBUG, " %s (%"PRId64"/%"PRId64")",
  593. picture_type_name[pic->type],
  594. pic->display_order, pic->encode_order);
  595. }
  596. av_log(avctx, AV_LOG_DEBUG, "\n");
  597. return 0;
  598. fail:
  599. while (start) {
  600. pic = start->next;
  601. vaapi_encode_free(avctx, start);
  602. start = pic;
  603. }
  604. return AVERROR(ENOMEM);
  605. }
  606. static int vaapi_encode_mangle_end(AVCodecContext *avctx)
  607. {
  608. VAAPIEncodeContext *ctx = avctx->priv_data;
  609. VAAPIEncodePicture *pic, *last_pic, *next;
  610. // Find the last picture we actually have input for.
  611. for (pic = ctx->pic_start; pic; pic = pic->next) {
  612. if (!pic->input_available)
  613. break;
  614. last_pic = pic;
  615. }
  616. if (pic) {
  617. av_assert0(last_pic);
  618. if (last_pic->type == PICTURE_TYPE_B) {
  619. // Some fixing up is required. Change the type of this
  620. // picture to P, then modify preceding B references which
  621. // point beyond it to point at it instead.
  622. last_pic->type = PICTURE_TYPE_P;
  623. last_pic->encode_order = last_pic->refs[1]->encode_order;
  624. for (pic = ctx->pic_start; pic != last_pic; pic = pic->next) {
  625. if (pic->type == PICTURE_TYPE_B &&
  626. pic->refs[1] == last_pic->refs[1])
  627. pic->refs[1] = last_pic;
  628. }
  629. last_pic->nb_refs = 1;
  630. last_pic->refs[1] = NULL;
  631. } else {
  632. // We can use the current structure (no references point
  633. // beyond the end), but there are unused pics to discard.
  634. }
  635. // Discard all following pics, they will never be used.
  636. for (pic = last_pic->next; pic; pic = next) {
  637. next = pic->next;
  638. vaapi_encode_free(avctx, pic);
  639. }
  640. last_pic->next = NULL;
  641. ctx->pic_end = last_pic;
  642. } else {
  643. // Input is available for all pictures, so we don't need to
  644. // mangle anything.
  645. }
  646. av_log(avctx, AV_LOG_DEBUG, "Pictures at end of stream:");
  647. for (pic = ctx->pic_start; pic; pic = pic->next) {
  648. av_log(avctx, AV_LOG_DEBUG, " %s (%"PRId64"/%"PRId64")",
  649. picture_type_name[pic->type],
  650. pic->display_order, pic->encode_order);
  651. }
  652. av_log(avctx, AV_LOG_DEBUG, "\n");
  653. return 0;
  654. }
  655. static int vaapi_encode_clear_old(AVCodecContext *avctx)
  656. {
  657. VAAPIEncodeContext *ctx = avctx->priv_data;
  658. VAAPIEncodePicture *pic, *old;
  659. int i;
  660. while (ctx->pic_start != ctx->pic_end) {
  661. old = ctx->pic_start;
  662. if (old->encode_order > ctx->output_order)
  663. break;
  664. for (pic = old->next; pic; pic = pic->next) {
  665. if (pic->encode_complete)
  666. continue;
  667. for (i = 0; i < pic->nb_refs; i++) {
  668. if (pic->refs[i] == old) {
  669. // We still need this picture because it's referred to
  670. // directly by a later one, so it and all following
  671. // pictures have to stay.
  672. return 0;
  673. }
  674. }
  675. }
  676. pic = ctx->pic_start;
  677. ctx->pic_start = pic->next;
  678. vaapi_encode_free(avctx, pic);
  679. }
  680. return 0;
  681. }
  682. int ff_vaapi_encode2(AVCodecContext *avctx, AVPacket *pkt,
  683. const AVFrame *input_image, int *got_packet)
  684. {
  685. VAAPIEncodeContext *ctx = avctx->priv_data;
  686. VAAPIEncodePicture *pic;
  687. int err;
  688. if (input_image) {
  689. av_log(avctx, AV_LOG_DEBUG, "Encode frame: %ux%u (%"PRId64").\n",
  690. input_image->width, input_image->height, input_image->pts);
  691. err = vaapi_encode_get_next(avctx, &pic);
  692. if (err) {
  693. av_log(avctx, AV_LOG_ERROR, "Input setup failed: %d.\n", err);
  694. return err;
  695. }
  696. pic->input_image = av_frame_alloc();
  697. if (!pic->input_image) {
  698. err = AVERROR(ENOMEM);
  699. goto fail;
  700. }
  701. err = av_frame_ref(pic->input_image, input_image);
  702. if (err < 0)
  703. goto fail;
  704. pic->input_surface = (VASurfaceID)(uintptr_t)input_image->data[3];
  705. pic->pts = input_image->pts;
  706. if (ctx->input_order == 0)
  707. ctx->first_pts = pic->pts;
  708. if (ctx->input_order == ctx->decode_delay)
  709. ctx->dts_pts_diff = pic->pts - ctx->first_pts;
  710. if (ctx->output_delay > 0)
  711. ctx->ts_ring[ctx->input_order % (3 * ctx->output_delay)] = pic->pts;
  712. pic->input_available = 1;
  713. } else {
  714. if (!ctx->end_of_stream) {
  715. err = vaapi_encode_mangle_end(avctx);
  716. if (err < 0)
  717. goto fail;
  718. ctx->end_of_stream = 1;
  719. }
  720. }
  721. ++ctx->input_order;
  722. ++ctx->output_order;
  723. av_assert0(ctx->output_order + ctx->output_delay + 1 == ctx->input_order);
  724. for (pic = ctx->pic_start; pic; pic = pic->next)
  725. if (pic->encode_order == ctx->output_order)
  726. break;
  727. // pic can be null here if we don't have a specific target in this
  728. // iteration. We might still issue encodes if things can be overlapped,
  729. // even though we don't intend to output anything.
  730. err = vaapi_encode_step(avctx, pic);
  731. if (err < 0) {
  732. av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
  733. goto fail;
  734. }
  735. if (!pic) {
  736. *got_packet = 0;
  737. } else {
  738. err = vaapi_encode_output(avctx, pic, pkt);
  739. if (err < 0) {
  740. av_log(avctx, AV_LOG_ERROR, "Output failed: %d.\n", err);
  741. goto fail;
  742. }
  743. if (ctx->output_delay == 0) {
  744. pkt->dts = pkt->pts;
  745. } else if (ctx->output_order < ctx->decode_delay) {
  746. if (ctx->ts_ring[ctx->output_order] < INT64_MIN + ctx->dts_pts_diff)
  747. pkt->dts = INT64_MIN;
  748. else
  749. pkt->dts = ctx->ts_ring[ctx->output_order] - ctx->dts_pts_diff;
  750. } else {
  751. pkt->dts = ctx->ts_ring[(ctx->output_order - ctx->decode_delay) %
  752. (3 * ctx->output_delay)];
  753. }
  754. *got_packet = 1;
  755. }
  756. err = vaapi_encode_clear_old(avctx);
  757. if (err < 0) {
  758. av_log(avctx, AV_LOG_ERROR, "List clearing failed: %d.\n", err);
  759. goto fail;
  760. }
  761. return 0;
  762. fail:
  763. // Unclear what to clean up on failure. There are probably some things we
  764. // could do usefully clean up here, but for now just leave them for uninit()
  765. // to do instead.
  766. return err;
  767. }
  768. static av_cold int vaapi_encode_check_config(AVCodecContext *avctx)
  769. {
  770. VAAPIEncodeContext *ctx = avctx->priv_data;
  771. VAStatus vas;
  772. int i, n, err;
  773. VAProfile *profiles = NULL;
  774. VAEntrypoint *entrypoints = NULL;
  775. VAConfigAttrib attr[] = {
  776. { VAConfigAttribRateControl },
  777. { VAConfigAttribEncMaxRefFrames },
  778. };
  779. n = vaMaxNumProfiles(ctx->hwctx->display);
  780. profiles = av_malloc_array(n, sizeof(VAProfile));
  781. if (!profiles) {
  782. err = AVERROR(ENOMEM);
  783. goto fail;
  784. }
  785. vas = vaQueryConfigProfiles(ctx->hwctx->display, profiles, &n);
  786. if (vas != VA_STATUS_SUCCESS) {
  787. av_log(ctx, AV_LOG_ERROR, "Failed to query profiles: %d (%s).\n",
  788. vas, vaErrorStr(vas));
  789. err = AVERROR(ENOSYS);
  790. goto fail;
  791. }
  792. for (i = 0; i < n; i++) {
  793. if (profiles[i] == ctx->va_profile)
  794. break;
  795. }
  796. if (i >= n) {
  797. av_log(ctx, AV_LOG_ERROR, "Encoding profile not found (%d).\n",
  798. ctx->va_profile);
  799. err = AVERROR(ENOSYS);
  800. goto fail;
  801. }
  802. n = vaMaxNumEntrypoints(ctx->hwctx->display);
  803. entrypoints = av_malloc_array(n, sizeof(VAEntrypoint));
  804. if (!entrypoints) {
  805. err = AVERROR(ENOMEM);
  806. goto fail;
  807. }
  808. vas = vaQueryConfigEntrypoints(ctx->hwctx->display, ctx->va_profile,
  809. entrypoints, &n);
  810. if (vas != VA_STATUS_SUCCESS) {
  811. av_log(ctx, AV_LOG_ERROR, "Failed to query entrypoints for "
  812. "profile %u: %d (%s).\n", ctx->va_profile,
  813. vas, vaErrorStr(vas));
  814. err = AVERROR(ENOSYS);
  815. goto fail;
  816. }
  817. for (i = 0; i < n; i++) {
  818. if (entrypoints[i] == ctx->va_entrypoint)
  819. break;
  820. }
  821. if (i >= n) {
  822. av_log(ctx, AV_LOG_ERROR, "Encoding entrypoint not found "
  823. "(%d / %d).\n", ctx->va_profile, ctx->va_entrypoint);
  824. err = AVERROR(ENOSYS);
  825. goto fail;
  826. }
  827. vas = vaGetConfigAttributes(ctx->hwctx->display,
  828. ctx->va_profile, ctx->va_entrypoint,
  829. attr, FF_ARRAY_ELEMS(attr));
  830. if (vas != VA_STATUS_SUCCESS) {
  831. av_log(avctx, AV_LOG_ERROR, "Failed to fetch config "
  832. "attributes: %d (%s).\n", vas, vaErrorStr(vas));
  833. return AVERROR(EINVAL);
  834. }
  835. for (i = 0; i < FF_ARRAY_ELEMS(attr); i++) {
  836. if (attr[i].value == VA_ATTRIB_NOT_SUPPORTED) {
  837. // Unfortunately we have to treat this as "don't know" and hope
  838. // for the best, because the Intel MJPEG encoder returns this
  839. // for all the interesting attributes.
  840. continue;
  841. }
  842. switch (attr[i].type) {
  843. case VAConfigAttribRateControl:
  844. if (!(ctx->va_rc_mode & attr[i].value)) {
  845. av_log(avctx, AV_LOG_ERROR, "Rate control mode is not "
  846. "supported: %x\n", attr[i].value);
  847. err = AVERROR(EINVAL);
  848. goto fail;
  849. }
  850. break;
  851. case VAConfigAttribEncMaxRefFrames:
  852. {
  853. unsigned int ref_l0 = attr[i].value & 0xffff;
  854. unsigned int ref_l1 = (attr[i].value >> 16) & 0xffff;
  855. if (avctx->gop_size > 1 && ref_l0 < 1) {
  856. av_log(avctx, AV_LOG_ERROR, "P frames are not "
  857. "supported (%x).\n", attr[i].value);
  858. err = AVERROR(EINVAL);
  859. goto fail;
  860. }
  861. if (avctx->max_b_frames > 0 && ref_l1 < 1) {
  862. av_log(avctx, AV_LOG_ERROR, "B frames are not "
  863. "supported (%x).\n", attr[i].value);
  864. err = AVERROR(EINVAL);
  865. goto fail;
  866. }
  867. }
  868. break;
  869. }
  870. }
  871. err = 0;
  872. fail:
  873. av_freep(&profiles);
  874. av_freep(&entrypoints);
  875. return err;
  876. }
  877. av_cold int ff_vaapi_encode_init(AVCodecContext *avctx,
  878. const VAAPIEncodeType *type)
  879. {
  880. VAAPIEncodeContext *ctx = avctx->priv_data;
  881. AVVAAPIFramesContext *recon_hwctx = NULL;
  882. AVVAAPIHWConfig *hwconfig = NULL;
  883. AVHWFramesConstraints *constraints = NULL;
  884. enum AVPixelFormat recon_format;
  885. VAStatus vas;
  886. int err, i;
  887. if (!avctx->hw_frames_ctx) {
  888. av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
  889. "required to associate the encoding device.\n");
  890. return AVERROR(EINVAL);
  891. }
  892. ctx->codec = type;
  893. ctx->codec_options = ctx->codec_options_data;
  894. ctx->va_config = VA_INVALID_ID;
  895. ctx->va_context = VA_INVALID_ID;
  896. ctx->priv_data = av_mallocz(type->priv_data_size);
  897. if (!ctx->priv_data) {
  898. err = AVERROR(ENOMEM);
  899. goto fail;
  900. }
  901. ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
  902. if (!ctx->input_frames_ref) {
  903. err = AVERROR(ENOMEM);
  904. goto fail;
  905. }
  906. ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data;
  907. ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
  908. if (!ctx->device_ref) {
  909. err = AVERROR(ENOMEM);
  910. goto fail;
  911. }
  912. ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
  913. ctx->hwctx = ctx->device->hwctx;
  914. err = ctx->codec->init(avctx);
  915. if (err < 0)
  916. goto fail;
  917. err = vaapi_encode_check_config(avctx);
  918. if (err < 0)
  919. goto fail;
  920. vas = vaCreateConfig(ctx->hwctx->display,
  921. ctx->va_profile, ctx->va_entrypoint,
  922. ctx->config_attributes, ctx->nb_config_attributes,
  923. &ctx->va_config);
  924. if (vas != VA_STATUS_SUCCESS) {
  925. av_log(avctx, AV_LOG_ERROR, "Failed to create encode pipeline "
  926. "configuration: %d (%s).\n", vas, vaErrorStr(vas));
  927. err = AVERROR(EIO);
  928. goto fail;
  929. }
  930. hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
  931. if (!hwconfig) {
  932. err = AVERROR(ENOMEM);
  933. goto fail;
  934. }
  935. hwconfig->config_id = ctx->va_config;
  936. constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
  937. hwconfig);
  938. if (!constraints) {
  939. err = AVERROR(ENOMEM);
  940. goto fail;
  941. }
  942. // Probably we can use the input surface format as the surface format
  943. // of the reconstructed frames. If not, we just pick the first (only?)
  944. // format in the valid list and hope that it all works.
  945. recon_format = AV_PIX_FMT_NONE;
  946. if (constraints->valid_sw_formats) {
  947. for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
  948. if (ctx->input_frames->sw_format ==
  949. constraints->valid_sw_formats[i]) {
  950. recon_format = ctx->input_frames->sw_format;
  951. break;
  952. }
  953. }
  954. if (recon_format == AV_PIX_FMT_NONE) {
  955. // No match. Just use the first in the supported list and
  956. // hope for the best.
  957. recon_format = constraints->valid_sw_formats[0];
  958. }
  959. } else {
  960. // No idea what to use; copy input format.
  961. recon_format = ctx->input_frames->sw_format;
  962. }
  963. av_log(avctx, AV_LOG_DEBUG, "Using %s as format of "
  964. "reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
  965. if (ctx->aligned_width < constraints->min_width ||
  966. ctx->aligned_height < constraints->min_height ||
  967. ctx->aligned_width > constraints->max_width ||
  968. ctx->aligned_height > constraints->max_height) {
  969. av_log(avctx, AV_LOG_ERROR, "Hardware does not support encoding at "
  970. "size %dx%d (constraints: width %d-%d height %d-%d).\n",
  971. ctx->aligned_width, ctx->aligned_height,
  972. constraints->min_width, constraints->max_width,
  973. constraints->min_height, constraints->max_height);
  974. err = AVERROR(EINVAL);
  975. goto fail;
  976. }
  977. av_freep(&hwconfig);
  978. av_hwframe_constraints_free(&constraints);
  979. ctx->recon_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
  980. if (!ctx->recon_frames_ref) {
  981. err = AVERROR(ENOMEM);
  982. goto fail;
  983. }
  984. ctx->recon_frames = (AVHWFramesContext*)ctx->recon_frames_ref->data;
  985. ctx->recon_frames->format = AV_PIX_FMT_VAAPI;
  986. ctx->recon_frames->sw_format = recon_format;
  987. ctx->recon_frames->width = ctx->aligned_width;
  988. ctx->recon_frames->height = ctx->aligned_height;
  989. ctx->recon_frames->initial_pool_size = ctx->nb_recon_frames;
  990. err = av_hwframe_ctx_init(ctx->recon_frames_ref);
  991. if (err < 0) {
  992. av_log(avctx, AV_LOG_ERROR, "Failed to initialise reconstructed "
  993. "frame context: %d.\n", err);
  994. goto fail;
  995. }
  996. recon_hwctx = ctx->recon_frames->hwctx;
  997. vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
  998. ctx->aligned_width, ctx->aligned_height,
  999. VA_PROGRESSIVE,
  1000. recon_hwctx->surface_ids,
  1001. recon_hwctx->nb_surfaces,
  1002. &ctx->va_context);
  1003. if (vas != VA_STATUS_SUCCESS) {
  1004. av_log(avctx, AV_LOG_ERROR, "Failed to create encode pipeline "
  1005. "context: %d (%s).\n", vas, vaErrorStr(vas));
  1006. err = AVERROR(EIO);
  1007. goto fail;
  1008. }
  1009. ctx->input_order = 0;
  1010. ctx->output_delay = avctx->max_b_frames;
  1011. ctx->decode_delay = 1;
  1012. ctx->output_order = - ctx->output_delay - 1;
  1013. if (ctx->codec->sequence_params_size > 0) {
  1014. ctx->codec_sequence_params =
  1015. av_mallocz(ctx->codec->sequence_params_size);
  1016. if (!ctx->codec_sequence_params) {
  1017. err = AVERROR(ENOMEM);
  1018. goto fail;
  1019. }
  1020. }
  1021. if (ctx->codec->picture_params_size > 0) {
  1022. ctx->codec_picture_params =
  1023. av_mallocz(ctx->codec->picture_params_size);
  1024. if (!ctx->codec_picture_params) {
  1025. err = AVERROR(ENOMEM);
  1026. goto fail;
  1027. }
  1028. }
  1029. if (ctx->codec->init_sequence_params) {
  1030. err = ctx->codec->init_sequence_params(avctx);
  1031. if (err < 0) {
  1032. av_log(avctx, AV_LOG_ERROR, "Codec sequence initialisation "
  1033. "failed: %d.\n", err);
  1034. goto fail;
  1035. }
  1036. }
  1037. // All I are IDR for now.
  1038. ctx->i_per_idr = 0;
  1039. ctx->p_per_i = ((avctx->gop_size + avctx->max_b_frames) /
  1040. (avctx->max_b_frames + 1));
  1041. ctx->b_per_p = avctx->max_b_frames;
  1042. // This should be configurable somehow. (Needs testing on a machine
  1043. // where it actually overlaps properly, though.)
  1044. ctx->issue_mode = ISSUE_MODE_MAXIMISE_THROUGHPUT;
  1045. return 0;
  1046. fail:
  1047. av_freep(&hwconfig);
  1048. av_hwframe_constraints_free(&constraints);
  1049. ff_vaapi_encode_close(avctx);
  1050. return err;
  1051. }
  1052. av_cold int ff_vaapi_encode_close(AVCodecContext *avctx)
  1053. {
  1054. VAAPIEncodeContext *ctx = avctx->priv_data;
  1055. VAAPIEncodePicture *pic, *next;
  1056. for (pic = ctx->pic_start; pic; pic = next) {
  1057. next = pic->next;
  1058. vaapi_encode_free(avctx, pic);
  1059. }
  1060. if (ctx->va_context != VA_INVALID_ID) {
  1061. vaDestroyContext(ctx->hwctx->display, ctx->va_context);
  1062. ctx->va_context = VA_INVALID_ID;
  1063. }
  1064. if (ctx->va_config != VA_INVALID_ID) {
  1065. vaDestroyConfig(ctx->hwctx->display, ctx->va_config);
  1066. ctx->va_config = VA_INVALID_ID;
  1067. }
  1068. if (ctx->codec->close)
  1069. ctx->codec->close(avctx);
  1070. av_freep(&ctx->codec_sequence_params);
  1071. av_freep(&ctx->codec_picture_params);
  1072. av_buffer_unref(&ctx->recon_frames_ref);
  1073. av_buffer_unref(&ctx->input_frames_ref);
  1074. av_buffer_unref(&ctx->device_ref);
  1075. av_freep(&ctx->priv_data);
  1076. return 0;
  1077. }