[fix] fix nvcodec crash due to cuda context not being released

This commit is contained in:
dijunkun
2025-04-10 17:10:40 +08:00
parent f48d940b80
commit d40ca8814c
5 changed files with 23 additions and 16 deletions

View File

@@ -1,7 +1,6 @@
#include "nvidia_video_decoder.h"
#include "log.h"
#include "nvcodec_api.h"
// #define SAVE_DECODED_NV12_STREAM
// #define SAVE_RECEIVED_H264_STREAM
@@ -24,29 +23,33 @@ NvidiaVideoDecoder::~NvidiaVideoDecoder() {
file_h264_ = nullptr;
}
#endif
if (cuda_context_) {
cuCtxDestroy(cuda_context_);
cuda_context_ = nullptr;
}
}
int NvidiaVideoDecoder::Init() {
ck(cuInit_ld(0));
ck(cuInit(0));
int nGpu = 0;
int iGpu = 0;
ck(cuDeviceGetCount_ld(&nGpu));
ck(cuDeviceGetCount(&nGpu));
if (nGpu < 1) {
return -1;
}
CUdevice cuDevice;
cuDeviceGet_ld(&cuDevice, iGpu);
cuDeviceGet(&cuda_device_, iGpu);
CUcontext cuContext = NULL;
cuCtxCreate_ld(&cuContext, 0, cuDevice);
if (!cuContext) {
cuCtxCreate(&cuda_context_, 0, cuda_device_);
if (!cuda_context_) {
return -1;
}
decoder = new NvDecoder(cuContext, false, cudaVideoCodec_H264, true, false,
nullptr, nullptr, false, 4096, 2160, 1000, false);
decoder =
new NvDecoder(cuda_context_, false, cudaVideoCodec_H264, true, false,
nullptr, nullptr, false, 4096, 2160, 1000, false);
#ifdef SAVE_DECODED_NV12_STREAM
file_nv12_ = fopen("decoded_nv12_stream.yuv", "w+b");

View File

@@ -22,6 +22,8 @@ class NvidiaVideoDecoder : public VideoDecoder {
private:
std::shared_ptr<SystemClock> clock_ = nullptr;
NvDecoder* decoder = nullptr;
CUcontext cuda_context_ = NULL;
CUdevice cuda_device_ = 0;
bool get_first_keyframe_ = false;
bool skip_frame_ = false;

View File

@@ -7,7 +7,6 @@
#if __APPLE__
#else
#include "nvcodec/nvidia_video_decoder.h"
#include "nvcodec_api.h"
#endif
#include "log.h"
@@ -49,7 +48,7 @@ bool VideoDecoderFactory::CheckIsHardwareAccerlerationSupported() {
#else
CUresult cuResult;
CUvideoctxlock cudaCtxLock;
cuResult = cuvidCtxLockCreate_ld(&cudaCtxLock, 0);
cuResult = cuvidCtxLockCreate(&cudaCtxLock, 0);
if (cuResult != CUDA_SUCCESS) {
LOG_WARN(
"System not support hardware accelerated decode, use default software "

View File

@@ -3,7 +3,6 @@
#include <chrono>
#include "log.h"
#include "nvcodec_api.h"
#include "nvcodec_common.h"
// #define SAVE_RECEIVED_NV12_STREAM
@@ -36,6 +35,11 @@ NvidiaVideoEncoder::~NvidiaVideoEncoder() {
if (encoder_) {
encoder_->DestroyEncoder();
}
if (cuda_context_) {
cuCtxDestroy(cuda_context_);
cuda_context_ = nullptr;
}
}
int NvidiaVideoEncoder::Init() {

View File

@@ -6,7 +6,6 @@
#else
#include "aom/aom_av1_encoder.h"
#include "nvcodec/nvidia_video_encoder.h"
#include "nvcodec_api.h"
#include "openh264/openh264_encoder.h"
#endif
@@ -49,7 +48,7 @@ bool VideoEncoderFactory::CheckIsHardwareAccerlerationSupported() {
CUresult cuResult;
NV_ENCODE_API_FUNCTION_LIST functionList = {NV_ENCODE_API_FUNCTION_LIST_VER};
cuResult = cuInit_ld(0);
cuResult = cuInit(0);
if (cuResult != CUDA_SUCCESS) {
LOG_WARN(
"System not support hardware accelerated encode, use default software "
@@ -57,7 +56,7 @@ bool VideoEncoderFactory::CheckIsHardwareAccerlerationSupported() {
return false;
}
NVENCSTATUS nvEncStatus = NvEncodeAPICreateInstance_ld(&functionList);
NVENCSTATUS nvEncStatus = NvEncodeAPICreateInstance(&functionList);
if (nvEncStatus != NV_ENC_SUCCESS) {
LOG_WARN(
"System not support hardware accelerated encode, use default software "