Browse Source

nvenc: make gpu indices independent of supported capabilities

Do not allocate a CUDA context for every available gpu.

Signed-off-by: Luca Barbato <lu_zero@gentoo.org>
tags/n3.4
Timo Rothenpieler Luca Barbato 9 years ago
parent
commit
a52976c0fe
1 changed files with 4 additions and 1 deletions
  1. +4
    -1
      libavcodec/nvenc.c

+ 4
- 1
libavcodec/nvenc.c View File

@@ -359,6 +359,9 @@ static int nvenc_check_device(AVCodecContext *avctx, int idx)
if (((major << 4) | minor) < NVENC_CAP)
goto fail;

if (ctx->device != idx && ctx->device != ANY_DEVICE)
return -1;

ret = nvel->cu_ctx_create(&ctx->cu_context_internal, 0, cu_device);
if (ret != CUDA_SUCCESS)
goto fail;
@@ -377,7 +380,7 @@ static int nvenc_check_device(AVCodecContext *avctx, int idx)

av_log(avctx, loglevel, "supports NVENC\n");

if (ctx->device == cu_device || ctx->device == ANY_DEVICE)
if (ctx->device == idx || ctx->device == ANY_DEVICE)
return 0;

fail3:


Loading…
Cancel
Save