You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

609 lines
23KB

  1. /*
  2. * AMD AMF support
  3. * Copyright (C) 2017 Luca Barbato
  4. * Copyright (C) 2017 Mikhail Mironov <mikhail.mironov@amd.com>
  5. *
  6. * This file is part of Libav.
  7. *
  8. * Libav is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU Lesser General Public
  10. * License as published by the Free Software Foundation; either
  11. * version 2.1 of the License, or (at your option) any later version.
  12. *
  13. * Libav is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  16. * Lesser General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU Lesser General Public
  19. * License along with Libav; if not, write to the Free Software
  20. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  21. */
  22. #include "libavutil/avassert.h"
  23. #include "libavutil/imgutils.h"
  24. #include "libavutil/hwcontext.h"
  25. #include "internal.h"
  26. #if CONFIG_D3D11VA
  27. #include "libavutil/hwcontext_d3d11va.h"
  28. #endif
  29. #include "libavutil/mem.h"
  30. #include "libavutil/pixdesc.h"
  31. #include "libavutil/time.h"
  32. #include "amfenc.h"
  33. #if CONFIG_D3D11VA
  34. #include <d3d11.h>
  35. #endif
  36. #if HAVE_WINDOWS_H
  37. #include <windows.h>
  38. #define dlopen(filename, flags) LoadLibrary((filename))
  39. #define dlsym(handle, symbol) GetProcAddress(handle, symbol)
  40. #define dlclose(handle) FreeLibrary(handle)
  41. #else
  42. #include <dlfcn.h>
  43. #endif
  44. #define PTS_PROP L"PtsProp"
  45. const enum AVPixelFormat ff_amf_pix_fmts[] = {
  46. AV_PIX_FMT_NV12,
  47. AV_PIX_FMT_YUV420P,
  48. #if CONFIG_D3D11VA
  49. AV_PIX_FMT_D3D11,
  50. #endif
  51. AV_PIX_FMT_NONE
  52. };
  53. typedef struct FormatMap {
  54. enum AVPixelFormat av_format;
  55. enum AMF_SURFACE_FORMAT amf_format;
  56. } FormatMap;
  57. static const FormatMap format_map[] =
  58. {
  59. { AV_PIX_FMT_NONE, AMF_SURFACE_UNKNOWN },
  60. { AV_PIX_FMT_NV12, AMF_SURFACE_NV12 },
  61. // { AV_PIX_FMT_BGR0, AMF_SURFACE_BGRA },
  62. // { AV_PIX_FMT_RGB0, AMF_SURFACE_RGBA },
  63. { AV_PIX_FMT_GRAY8, AMF_SURFACE_GRAY8 },
  64. { AV_PIX_FMT_YUV420P, AMF_SURFACE_YUV420P },
  65. { AV_PIX_FMT_YUYV422, AMF_SURFACE_YUY2 },
  66. { AV_PIX_FMT_D3D11, AMF_SURFACE_NV12 },
  67. };
  68. static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt)
  69. {
  70. const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
  71. return desc->flags & AV_PIX_FMT_FLAG_HWACCEL;
  72. }
  73. static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum AVPixelFormat fmt)
  74. {
  75. int i;
  76. for (i = 0; i < amf_countof(format_map); i++) {
  77. if (format_map[i].av_format == fmt) {
  78. return format_map[i].amf_format;
  79. }
  80. }
  81. return AMF_SURFACE_UNKNOWN;
  82. }
  83. static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter *pThis,
  84. const wchar_t *scope, const wchar_t *message)
  85. {
  86. AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
  87. av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message); // \n is provided from AMF
  88. }
  89. static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter *pThis)
  90. {
  91. }
  92. static AMFTraceWriterVtbl tracer_vtbl =
  93. {
  94. .Write = AMFTraceWriter_Write,
  95. .Flush = AMFTraceWriter_Flush,
  96. };
  97. static int amf_load_library(AVCodecContext *avctx)
  98. {
  99. AmfContext *ctx = avctx->priv_data;
  100. AMFInit_Fn init_fun = NULL;
  101. AMFQueryVersion_Fn version_fun = NULL;
  102. AMF_RESULT res = AMF_OK;
  103. ctx->eof = 0;
  104. ctx->delayed_drain = 0;
  105. ctx->hw_frames_ctx = NULL;
  106. ctx->hw_device_ctx = NULL;
  107. ctx->delayed_surface = NULL;
  108. ctx->delayed_frame = av_frame_alloc();
  109. if (!ctx->delayed_frame) {
  110. return AVERROR(ENOMEM);
  111. }
  112. // hardcoded to current HW queue size - will realloc in timestamp_queue_enqueue() if too small
  113. ctx->timestamp_list = av_fifo_alloc((avctx->max_b_frames + 16) * sizeof(int64_t));
  114. if (!ctx->timestamp_list) {
  115. return AVERROR(ENOMEM);
  116. }
  117. ctx->dts_delay = 0;
  118. ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);
  119. AMF_RETURN_IF_FALSE(ctx, ctx->library != NULL,
  120. AVERROR_UNKNOWN, "DLL %s failed to open\n", AMF_DLL_NAMEA);
  121. init_fun = (AMFInit_Fn)dlsym(ctx->library, AMF_INIT_FUNCTION_NAME);
  122. AMF_RETURN_IF_FALSE(ctx, init_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_INIT_FUNCTION_NAME);
  123. version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library, AMF_QUERY_VERSION_FUNCTION_NAME);
  124. AMF_RETURN_IF_FALSE(ctx, version_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s\n", AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
  125. res = version_fun(&ctx->version);
  126. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_QUERY_VERSION_FUNCTION_NAME, res);
  127. res = init_fun(AMF_FULL_VERSION, &ctx->factory);
  128. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d\n", AMF_INIT_FUNCTION_NAME, res);
  129. res = ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
  130. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetTrace() failed with error %d\n", res);
  131. res = ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);
  132. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetDebug() failed with error %d\n", res);
  133. return 0;
  134. }
  135. static int amf_init_context(AVCodecContext *avctx)
  136. {
  137. AmfContext *ctx = avctx->priv_data;
  138. AMF_RESULT res = AMF_OK;
  139. // configure AMF logger
  140. // the return of these functions indicates old state and do not affect behaviour
  141. ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, ctx->log_to_dbg != 0 );
  142. if (ctx->log_to_dbg)
  143. ctx->trace->pVtbl->SetWriterLevel(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);
  144. ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_CONSOLE, 0);
  145. ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);
  146. // connect AMF logger to av_log
  147. ctx->tracer.vtbl = &tracer_vtbl;
  148. ctx->tracer.avctx = avctx;
  149. ctx->trace->pVtbl->RegisterWriter(ctx->trace, ctx->writer_id, (AMFTraceWriter*)&ctx->tracer, 1);
  150. ctx->trace->pVtbl->SetWriterLevel(ctx->trace, ctx->writer_id, AMF_TRACE_TRACE);
  151. res = ctx->factory->pVtbl->CreateContext(ctx->factory, &ctx->context);
  152. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() failed with error %d\n", res);
  153. // try to reuse existing DX device
  154. #if CONFIG_D3D11VA
  155. if (avctx->hw_frames_ctx) {
  156. AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
  157. if (device_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
  158. if (amf_av_to_amf_format(device_ctx->sw_format) != AMF_SURFACE_UNKNOWN) {
  159. if (device_ctx->device_ctx->hwctx) {
  160. AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;
  161. res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
  162. if (res == AMF_OK) {
  163. ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
  164. if (!ctx->hw_frames_ctx) {
  165. return AVERROR(ENOMEM);
  166. }
  167. } else {
  168. if(res == AMF_NOT_SUPPORTED)
  169. av_log(avctx, AV_LOG_INFO, "avctx->hw_frames_ctx has D3D11 device which doesn't have D3D11VA interface, switching to default\n");
  170. else
  171. av_log(avctx, AV_LOG_INFO, "avctx->hw_frames_ctx has non-AMD device, switching to default\n");
  172. }
  173. }
  174. } else {
  175. av_log(avctx, AV_LOG_INFO, "avctx->hw_frames_ctx has format not uspported by AMF, switching to default\n");
  176. }
  177. }
  178. } else if (avctx->hw_device_ctx) {
  179. AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)(avctx->hw_device_ctx->data);
  180. if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
  181. if (device_ctx->hwctx) {
  182. AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->hwctx;
  183. res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
  184. if (res == AMF_OK) {
  185. ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
  186. if (!ctx->hw_device_ctx) {
  187. return AVERROR(ENOMEM);
  188. }
  189. } else {
  190. if (res == AMF_NOT_SUPPORTED)
  191. av_log(avctx, AV_LOG_INFO, "avctx->hw_device_ctx has D3D11 device which doesn't have D3D11VA interface, switching to default\n");
  192. else
  193. av_log(avctx, AV_LOG_INFO, "avctx->hw_device_ctx has non-AMD device, switching to default\n");
  194. }
  195. }
  196. }
  197. }
  198. #endif
  199. if (!ctx->hw_frames_ctx && !ctx->hw_device_ctx) {
  200. res = ctx->context->pVtbl->InitDX11(ctx->context, NULL, AMF_DX11_1);
  201. if (res != AMF_OK) {
  202. res = ctx->context->pVtbl->InitDX9(ctx->context, NULL);
  203. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "InitDX9() failed with error %d\n", res);
  204. }
  205. }
  206. return 0;
  207. }
  208. static int amf_init_encoder(AVCodecContext *avctx)
  209. {
  210. AmfContext *ctx = avctx->priv_data;
  211. const wchar_t *codec_id = NULL;
  212. AMF_RESULT res = AMF_OK;
  213. switch (avctx->codec->id) {
  214. case AV_CODEC_ID_H264:
  215. codec_id = AMFVideoEncoderVCE_AVC;
  216. break;
  217. case AV_CODEC_ID_HEVC:
  218. codec_id = AMFVideoEncoder_HEVC;
  219. break;
  220. default:
  221. break;
  222. }
  223. AMF_RETURN_IF_FALSE(ctx, codec_id != NULL, AVERROR(EINVAL), "Codec %d is not supported\n", avctx->codec->id);
  224. ctx->format = amf_av_to_amf_format(avctx->pix_fmt);
  225. AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, AVERROR(EINVAL), "Format %d is not supported\n", avctx->pix_fmt);
  226. res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context, codec_id, &ctx->encoder);
  227. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d\n", codec_id, res);
  228. return 0;
  229. }
  230. int av_cold ff_amf_encode_close(AVCodecContext *avctx)
  231. {
  232. AmfContext *ctx = avctx->priv_data;
  233. if (ctx->delayed_surface)
  234. {
  235. ctx->delayed_surface->pVtbl->Release(ctx->delayed_surface);
  236. ctx->delayed_surface = NULL;
  237. }
  238. if (ctx->encoder) {
  239. ctx->encoder->pVtbl->Terminate(ctx->encoder);
  240. ctx->encoder->pVtbl->Release(ctx->encoder);
  241. ctx->encoder = NULL;
  242. }
  243. if (ctx->context) {
  244. ctx->context->pVtbl->Terminate(ctx->context);
  245. ctx->context->pVtbl->Release(ctx->context);
  246. ctx->context = NULL;
  247. }
  248. av_buffer_unref(&ctx->hw_device_ctx);
  249. av_buffer_unref(&ctx->hw_frames_ctx);
  250. if (ctx->trace) {
  251. ctx->trace->pVtbl->UnregisterWriter(ctx->trace, ctx->writer_id);
  252. }
  253. if (ctx->library) {
  254. dlclose(ctx->library);
  255. ctx->library = NULL;
  256. }
  257. ctx->trace = NULL;
  258. ctx->debug = NULL;
  259. ctx->factory = NULL;
  260. ctx->version = 0;
  261. ctx->delayed_drain = 0;
  262. av_frame_free(&ctx->delayed_frame);
  263. av_fifo_free(ctx->timestamp_list);
  264. ctx->timestamp_list = NULL;
  265. ctx->timestamp_last = 0;
  266. return 0;
  267. }
  268. static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame,
  269. AMFSurface* surface)
  270. {
  271. AVFrame *sw_frame = NULL;
  272. AMFPlane *plane = NULL;
  273. uint8_t *dst_data[4];
  274. int dst_linesize[4];
  275. int ret = 0;
  276. int planes;
  277. int i;
  278. if (frame->hw_frames_ctx && is_hwaccel_pix_fmt(frame->format)) {
  279. if (!(sw_frame = av_frame_alloc())) {
  280. av_log(avctx, AV_LOG_ERROR, "Can not alloc frame\n");
  281. ret = AVERROR(ENOMEM);
  282. goto fail;
  283. }
  284. if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
  285. av_log(avctx, AV_LOG_ERROR, "Error transferring the data to system memory\n");
  286. goto fail;
  287. }
  288. frame = sw_frame;
  289. }
  290. planes = (int)surface->pVtbl->GetPlanesCount(surface);
  291. if (planes > amf_countof(dst_data)) {
  292. av_log(avctx, AV_LOG_ERROR, "Invalid number of planes %d in surface\n", planes);
  293. ret = AVERROR(EINVAL);
  294. goto fail;
  295. }
  296. for (i = 0; i < planes; i++) {
  297. plane = surface->pVtbl->GetPlaneAt(surface, i);
  298. dst_data[i] = plane->pVtbl->GetNative(plane);
  299. dst_linesize[i] = plane->pVtbl->GetHPitch(plane);
  300. }
  301. av_image_copy(dst_data, dst_linesize,
  302. (const uint8_t**)frame->data, frame->linesize, frame->format,
  303. avctx->width, avctx->height);
  304. fail:
  305. if (sw_frame) {
  306. av_frame_free(&sw_frame);
  307. }
  308. return ret;
  309. }
  310. static inline int timestamp_queue_enqueue(AVCodecContext *avctx, int64_t timestamp)
  311. {
  312. AmfContext *ctx = avctx->priv_data;
  313. if (av_fifo_space(ctx->timestamp_list) < sizeof(timestamp)) {
  314. int size = av_fifo_size(ctx->timestamp_list);
  315. if (INT_MAX / 2 - size < sizeof(timestamp))
  316. return AVERROR(EINVAL);
  317. av_fifo_realloc2(ctx->timestamp_list, (size + sizeof(timestamp)) * 2);
  318. }
  319. av_fifo_generic_write(ctx->timestamp_list, &timestamp, sizeof(timestamp), NULL);
  320. ctx->timestamp_last = timestamp;
  321. return 0;
  322. }
  323. static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buffer)
  324. {
  325. AmfContext *ctx = avctx->priv_data;
  326. int ret;
  327. AMFVariantStruct var = {0};
  328. int64_t timestamp = AV_NOPTS_VALUE;
  329. int64_t size = buffer->pVtbl->GetSize(buffer);
  330. //if ((ret = ff_alloc_packet2(avctx, pkt, size, 0)) < 0) {
  331. if (ret = ff_alloc_packet(pkt, size)) {
  332. return ret;
  333. }
  334. memcpy(pkt->data, buffer->pVtbl->GetNative(buffer), size);
  335. switch (avctx->codec->id) {
  336. case AV_CODEC_ID_H264:
  337. buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE, &var);
  338. if(var.int64Value == AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_IDR) {
  339. pkt->flags = AV_PKT_FLAG_KEY;
  340. }
  341. break;
  342. case AV_CODEC_ID_HEVC:
  343. buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE, &var);
  344. if (var.int64Value == AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_IDR) {
  345. pkt->flags = AV_PKT_FLAG_KEY;
  346. }
  347. break;
  348. default:
  349. break;
  350. }
  351. buffer->pVtbl->GetProperty(buffer, PTS_PROP, &var);
  352. pkt->pts = var.int64Value; // original pts
  353. AMF_RETURN_IF_FALSE(ctx, av_fifo_size(ctx->timestamp_list) > 0, AVERROR_UNKNOWN, "timestamp_list is empty\n");
  354. av_fifo_generic_read(ctx->timestamp_list, &timestamp, sizeof(timestamp), NULL);
  355. // calc dts shift if max_b_frames > 0
  356. if (avctx->max_b_frames > 0 && ctx->dts_delay == 0) {
  357. AMF_RETURN_IF_FALSE(ctx, av_fifo_size(ctx->timestamp_list) > 0, AVERROR_UNKNOWN,
  358. "timestamp_list is empty while max_b_frames = %d\n", avctx->max_b_frames);
  359. if (timestamp < 0 || ctx->timestamp_last < AV_NOPTS_VALUE) {
  360. return AVERROR(ERANGE);
  361. }
  362. ctx->dts_delay = ctx->timestamp_last - timestamp;
  363. }
  364. pkt->dts = timestamp - ctx->dts_delay;
  365. return 0;
  366. }
  367. // amfenc API implementation
  368. int ff_amf_encode_init(AVCodecContext *avctx)
  369. {
  370. AmfContext *ctx = avctx->priv_data;
  371. int ret;
  372. ctx->factory = NULL;
  373. ctx->debug = NULL;
  374. ctx->trace = NULL;
  375. ctx->context = NULL;
  376. ctx->encoder = NULL;
  377. ctx->library = NULL;
  378. ctx->version = 0;
  379. ctx->eof = 0;
  380. ctx->format = 0;
  381. ctx->tracer.vtbl = NULL;
  382. ctx->tracer.avctx = NULL;
  383. if ((ret = amf_load_library(avctx)) == 0) {
  384. if ((ret = amf_init_context(avctx)) == 0) {
  385. if ((ret = amf_init_encoder(avctx)) == 0) {
  386. return 0;
  387. }
  388. }
  389. }
  390. ff_amf_encode_close(avctx);
  391. return ret;
  392. }
  393. int ff_amf_send_frame(AVCodecContext *avctx, const AVFrame *frame)
  394. {
  395. AMF_RESULT res = AMF_OK;
  396. AmfContext *ctx = avctx->priv_data;
  397. AMFSurface *surface = NULL;
  398. int ret;
  399. if (!ctx->encoder)
  400. return AVERROR(EINVAL);
  401. if (!frame) { // submit drain
  402. if (!ctx->eof) { // submit drain one time only
  403. if (ctx->delayed_surface != NULL) {
  404. ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in ff_amf_receive_packet
  405. } else if(!ctx->delayed_drain) {
  406. res = ctx->encoder->pVtbl->Drain(ctx->encoder);
  407. if (res == AMF_INPUT_FULL) {
  408. ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in ff_amf_receive_packet
  409. } else {
  410. if (res == AMF_OK) {
  411. ctx->eof = 1; // drain started
  412. }
  413. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Drain() failed with error %d\n", res);
  414. }
  415. }
  416. } else{
  417. return AVERROR_EOF;
  418. }
  419. } else { // submit frame
  420. if (ctx->delayed_surface != NULL) {
  421. return AVERROR(EAGAIN); // should not happen when called from ffmpeg, other clients may resubmit
  422. }
  423. // prepare surface from frame
  424. if (frame->hw_frames_ctx && ( // HW frame detected
  425. // check if the same hw_frames_ctx as used in initialization
  426. (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx->hw_frames_ctx->data) ||
  427. // check if the same hw_device_ctx as used in initialization
  428. (ctx->hw_device_ctx && ((AVHWFramesContext*)frame->hw_frames_ctx->data)->device_ctx ==
  429. (AVHWDeviceContext*)ctx->hw_device_ctx->data)
  430. )) {
  431. #if CONFIG_D3D11VA
  432. static const GUID AMFTextureArrayIndexGUID = { 0x28115527, 0xe7c3, 0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } };
  433. ID3D11Texture2D *texture = (ID3D11Texture2D*)frame->data[0]; // actual texture
  434. int index = (int)(size_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use
  435. texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
  436. res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx->context, texture, &surface, NULL); // wrap to AMF surface
  437. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX11Native() failed with error %d\n", res);
  438. // input HW surfaces can be vertically aligned by 16; tell AMF the real size
  439. surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height);
  440. #endif
  441. } else {
  442. res = ctx->context->pVtbl->AllocSurface(ctx->context, AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
  443. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "AllocSurface() failed with error %d\n", res);
  444. amf_copy_surface(avctx, frame, surface);
  445. }
  446. surface->pVtbl->SetPts(surface, frame->pts);
  447. AMF_ASSIGN_PROPERTY_INT64(res, surface, PTS_PROP, frame->pts);
  448. switch (avctx->codec->id) {
  449. case AV_CODEC_ID_H264:
  450. AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_INSERT_AUD, !!ctx->aud);
  451. break;
  452. case AV_CODEC_ID_HEVC:
  453. AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_VIDEO_ENCODER_HEVC_INSERT_AUD, !!ctx->aud);
  454. break;
  455. default:
  456. break;
  457. }
  458. // submit surface
  459. res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
  460. if (res == AMF_INPUT_FULL) { // handle full queue
  461. //store surface for later submission
  462. ctx->delayed_surface = surface;
  463. if (surface->pVtbl->GetMemoryType(surface) == AMF_MEMORY_DX11) {
  464. av_frame_ref(ctx->delayed_frame, frame);
  465. }
  466. } else {
  467. surface->pVtbl->Release(surface);
  468. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d\n", res);
  469. if ((ret = timestamp_queue_enqueue(avctx, frame->pts)) < 0) {
  470. return ret;
  471. }
  472. }
  473. }
  474. return 0;
  475. }
  476. int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
  477. {
  478. int ret;
  479. AMF_RESULT res;
  480. AMF_RESULT res_query;
  481. AmfContext *ctx = avctx->priv_data;
  482. AMFData *data = NULL;
  483. int block_and_wait;
  484. if (!ctx->encoder)
  485. return AVERROR(EINVAL);
  486. do {
  487. block_and_wait = 0;
  488. // poll data
  489. res_query = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
  490. if (data) {
  491. // copy data to packet
  492. AMFBuffer* buffer;
  493. AMFGuid guid = IID_AMFBuffer();
  494. data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
  495. ret = amf_copy_buffer(avctx, avpkt, buffer);
  496. buffer->pVtbl->Release(buffer);
  497. data->pVtbl->Release(data);
  498. AMF_RETURN_IF_FALSE(ctx, ret >= 0, ret, "amf_copy_buffer() failed with error %d\n", ret);
  499. if (ctx->delayed_surface != NULL) { // try to resubmit frame
  500. res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)ctx->delayed_surface);
  501. if (res != AMF_INPUT_FULL) {
  502. int64_t pts = ctx->delayed_surface->pVtbl->GetPts(ctx->delayed_surface);
  503. ctx->delayed_surface->pVtbl->Release(ctx->delayed_surface);
  504. ctx->delayed_surface = NULL;
  505. av_frame_unref(ctx->delayed_frame);
  506. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Repeated SubmitInput() failed with error %d\n", res);
  507. if ((ret = timestamp_queue_enqueue(avctx, pts)) < 0) {
  508. return ret;
  509. }
  510. } else {
  511. av_log(avctx, AV_LOG_WARNING, "Data acquired but delayed frame submission got AMF_INPUT_FULL- should not happen\n");
  512. }
  513. } else if (ctx->delayed_drain) { // try to resubmit drain
  514. res = ctx->encoder->pVtbl->Drain(ctx->encoder);
  515. if (res != AMF_INPUT_FULL) {
  516. ctx->delayed_drain = 0;
  517. ctx->eof = 1; // drain started
  518. AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Repeated Drain() failed with error %d\n", res);
  519. } else {
  520. av_log(avctx, AV_LOG_WARNING, "Data acquired but delayed drain submission got AMF_INPUT_FULL- should not happen\n");
  521. }
  522. }
  523. } else if (ctx->delayed_surface != NULL || ctx->delayed_drain || (ctx->eof && res_query != AMF_EOF)) {
  524. block_and_wait = 1;
  525. av_usleep(1000); // wait and poll again
  526. }
  527. } while (block_and_wait);
  528. if (res_query == AMF_EOF) {
  529. ret = AVERROR_EOF;
  530. } else if (data == NULL) {
  531. ret = AVERROR(EAGAIN);
  532. } else {
  533. ret = 0;
  534. }
  535. return ret;
  536. }