[feat] openh264 encoder and dav1d decoder support dynamic resolution

This commit is contained in:
dijunkun
2024-09-06 13:05:57 +08:00
parent c477643aed
commit 255ef0edec
10 changed files with 284 additions and 252 deletions

View File

@@ -27,11 +27,12 @@ class VideoFrame {
public:
const uint8_t *Buffer() { return buffer_; }
size_t Size() { return size_; }
size_t Width() { return width_; }
size_t Height() { return height_; }
uint8_t *GetBuffer() { return buffer_; }
size_t GetWidth() { return width_; }
size_t GetHeight() { return height_; }
void SetSize(size_t size) { size_ = size; }
void SetWidth(size_t width) { width_ = width; }
void SetHeight(size_t height) { height_ = height; }
private:
uint8_t *buffer_ = nullptr;

View File

@@ -30,6 +30,17 @@ class ScopedDav1dData {
// Calling `dav1d_data_wrap` requires a `free_callback` to be registered.
void NullFreeCallback(const uint8_t *buffer, void *opaque) {}
void Yuv420pToNv12(unsigned char *SrcY, unsigned char *SrcU,
unsigned char *SrcV, unsigned char *Dst, int Width,
int Height) {
memcpy(Dst, SrcY, Width * Height);
unsigned char *DstU = Dst + Width * Height;
for (int i = 0; i < Width * Height / 4; i++) {
(*DstU++) = (*SrcU++);
(*DstU++) = (*SrcV++);
}
}
Dav1dAv1Decoder::Dav1dAv1Decoder() {}
Dav1dAv1Decoder::~Dav1dAv1Decoder() {
@@ -45,14 +56,9 @@ Dav1dAv1Decoder::~Dav1dAv1Decoder() {
file_nv12_ = nullptr;
}
if (decoded_frame_yuv_) {
delete decoded_frame_yuv_;
decoded_frame_yuv_ = nullptr;
}
if (decoded_frame_nv12_) {
delete decoded_frame_nv12_;
decoded_frame_nv12_ = nullptr;
if (nv12_frame_) {
delete nv12_frame_;
nv12_frame_ = nullptr;
}
}
@@ -72,9 +78,6 @@ int Dav1dAv1Decoder::Init() {
LOG_ERROR("Dav1d AV1 decoder open failed");
}
decoded_frame_yuv_ = new VideoFrame(1280 * 720 * 3 / 2);
// decoded_frame_nv12_ = new VideoFrame(1280 * 720 * 3 / 2);
if (SAVE_RECEIVED_AV1_STREAM) {
file_av1_ = fopen("received_av1_stream.ivf", "w+b");
if (!file_av1_) {
@@ -92,17 +95,6 @@ int Dav1dAv1Decoder::Init() {
return 0;
}
void YUV420PtoNV12(unsigned char *SrcY, unsigned char *SrcU,
unsigned char *SrcV, unsigned char *Dst, int Width,
int Height) {
memcpy(Dst, SrcY, Width * Height);
unsigned char *DstU = Dst + Width * Height;
for (int i = 0; i < Width * Height / 4; i++) {
(*DstU++) = (*SrcU++);
(*DstU++) = (*SrcV++);
}
}
int Dav1dAv1Decoder::Decode(
const uint8_t *data, int size,
std::function<void(VideoFrame)> on_receive_decoded_frame) {
@@ -150,43 +142,52 @@ int Dav1dAv1Decoder::Decode(
return -1;
}
frame_width_ = dav1d_picture.p.w;
frame_height_ = dav1d_picture.p.h;
nv12_frame_size_ = dav1d_picture.p.w * dav1d_picture.p.h * 3 / 2;
if (!nv12_frame_) {
nv12_frame_capacity_ = nv12_frame_size_;
nv12_frame_ =
new VideoFrame(nv12_frame_capacity_, frame_width_, frame_height_);
}
if (nv12_frame_capacity_ < nv12_frame_size_) {
nv12_frame_capacity_ = nv12_frame_size_;
delete nv12_frame_;
nv12_frame_ =
new VideoFrame(nv12_frame_capacity_, frame_width_, frame_height_);
}
if (nv12_frame_->Size() != nv12_frame_size_ ||
nv12_frame_->Width() != frame_width_ ||
nv12_frame_->Height() != frame_height_) {
nv12_frame_->SetSize(nv12_frame_size_);
nv12_frame_->SetWidth(frame_width_);
nv12_frame_->SetHeight(frame_height_);
}
if (0) {
YUV420PtoNV12((unsigned char *)dav1d_picture.data[0],
Yuv420pToNv12((unsigned char *)dav1d_picture.data[0],
(unsigned char *)dav1d_picture.data[1],
(unsigned char *)dav1d_picture.data[2],
decoded_frame_nv12_->GetBuffer(), dav1d_picture.p.w,
dav1d_picture.p.h);
(unsigned char *)nv12_frame_->Buffer(), frame_width_,
frame_height_);
} else {
if (!decoded_frame_nv12_) {
decoded_frame_nv12_capacity_ =
dav1d_picture.p.w * dav1d_picture.p.h * 3 / 2;
decoded_frame_nv12_ = new VideoFrame(
decoded_frame_nv12_capacity_, dav1d_picture.p.w, dav1d_picture.p.h);
}
if (decoded_frame_nv12_capacity_ <
dav1d_picture.p.w * dav1d_picture.p.h * 3 / 2) {
delete decoded_frame_nv12_;
decoded_frame_nv12_capacity_ =
dav1d_picture.p.w * dav1d_picture.p.h * 3 / 2;
decoded_frame_nv12_ = new VideoFrame(
decoded_frame_nv12_capacity_, dav1d_picture.p.w, dav1d_picture.p.h);
}
libyuv::I420ToNV12(
(const uint8_t *)dav1d_picture.data[0], dav1d_picture.p.w,
(const uint8_t *)dav1d_picture.data[1], dav1d_picture.p.w / 2,
(const uint8_t *)dav1d_picture.data[2], dav1d_picture.p.w / 2,
decoded_frame_nv12_->GetBuffer(), dav1d_picture.p.w,
decoded_frame_nv12_->GetBuffer() +
dav1d_picture.p.w * dav1d_picture.p.h,
dav1d_picture.p.w, dav1d_picture.p.w, dav1d_picture.p.h);
(uint8_t *)nv12_frame_->Buffer(), frame_width_,
(uint8_t *)nv12_frame_->Buffer() + frame_width_ * frame_height_,
frame_width_, frame_width_, frame_height_);
}
on_receive_decoded_frame(*decoded_frame_nv12_);
on_receive_decoded_frame(*nv12_frame_);
if (SAVE_DECODED_NV12_STREAM) {
fwrite((unsigned char *)decoded_frame_nv12_->Buffer(), 1,
decoded_frame_nv12_->Size(), file_nv12_);
fwrite((unsigned char *)nv12_frame_->Buffer(), 1, nv12_frame_->Size(),
file_nv12_);
}
return 0;

View File

@@ -23,9 +23,12 @@ class Dav1dAv1Decoder : public VideoDecoder {
std::function<void(VideoFrame)> on_receive_decoded_frame);
private:
VideoFrame *decoded_frame_yuv_ = nullptr;
VideoFrame *decoded_frame_nv12_ = nullptr;
int decoded_frame_nv12_capacity_ = 0;
VideoFrame *nv12_frame_ = 0;
int nv12_frame_capacity_ = 0;
int nv12_frame_size_ = 0;
int frame_width_ = 0;
int frame_height_ = 0;
FILE *file_av1_ = nullptr;
FILE *file_nv12_ = nullptr;

View File

@@ -8,50 +8,45 @@
#define SAVE_NV12_STREAM 0
#define SAVE_H264_STREAM 0
static const int YUV420P_BUFFER_SIZE = 1280 * 720 * 3 / 2;
void CopyYuvWithStride(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v,
int width, int height, int stride_y, int stride_u,
int stride_v, uint8_t *yuv420p_frame) {
int actual_stride_y = width;
int actual_stride_u = width / 2;
int actual_stride_v = width / 2;
void CopyYUVWithStride(uint8_t *srcY, uint8_t *srcU, uint8_t *srcV, int width,
int height, int strideY, int strideU, int strideV,
uint8_t *yuv_data_) {
int actualWidth = width;
int actualHeight = height;
int actualStrideY = actualWidth;
int actualStrideU = actualWidth / 2;
int actualStrideV = actualWidth / 2;
for (int row = 0; row < actualHeight; row++) {
memcpy(yuv_data_, srcY, actualStrideY);
srcY += strideY;
yuv_data_ += actualStrideY;
for (int row = 0; row < height; row++) {
memcpy(yuv420p_frame, src_y, actual_stride_y);
src_y += stride_y;
yuv420p_frame += actual_stride_y;
}
for (int row = 0; row < actualHeight / 2; row++) {
memcpy(yuv_data_, srcU, actualStrideU);
srcU += strideU;
yuv_data_ += actualStrideU;
for (int row = 0; row < height / 2; row++) {
memcpy(yuv420p_frame, src_u, actual_stride_u);
src_u += stride_u;
yuv420p_frame += actual_stride_u;
}
for (int row = 0; row < actualHeight / 2; row++) {
memcpy(yuv_data_, srcV, actualStrideV);
srcV += strideV;
yuv_data_ += actualStrideV;
for (int row = 0; row < height / 2; row++) {
memcpy(yuv420p_frame, src_v, actual_stride_v);
src_v += stride_v;
yuv420p_frame += actual_stride_v;
}
}
void ConvertYUV420toNV12(const unsigned char *yuvData, unsigned char *nv12Data,
int width, int height) {
int ySize = width * height;
int uvSize = ySize / 4;
const unsigned char *yData = yuvData;
const unsigned char *uData = yData + ySize;
const unsigned char *vData = uData + uvSize;
void ConvertYuv420pToNv12(const unsigned char *yuv_data,
unsigned char *nv12_data, int width, int height) {
int y_size = width * height;
int uv_size = y_size / 4;
const unsigned char *y_data = yuv_data;
const unsigned char *u_data = y_data + y_size;
const unsigned char *v_data = u_data + uv_size;
std::memcpy(nv12Data, yData, ySize);
std::memcpy(nv12_data, y_data, y_size);
for (int i = 0; i < uvSize; i++) {
nv12Data[ySize + i * 2] = uData[i];
nv12Data[ySize + i * 2 + 1] = vData[i];
for (int i = 0; i < uv_size; i++) {
nv12_data[y_size + i * 2] = u_data[i];
nv12_data[y_size + i * 2 + 1] = v_data[i];
}
}
@@ -66,6 +61,10 @@ OpenH264Decoder::~OpenH264Decoder() {
delete nv12_frame_;
}
if (yuv420p_frame_) {
delete[] yuv420p_frame_;
}
if (SAVE_H264_STREAM && h264_stream_) {
fflush(h264_stream_);
h264_stream_ = nullptr;
@@ -95,10 +94,6 @@ int OpenH264Decoder::Init() {
frame_width_ = 1280;
frame_height_ = 720;
decoded_frame_size_ = YUV420P_BUFFER_SIZE;
decoded_frame_ = new uint8_t[YUV420P_BUFFER_SIZE];
nv12_frame_ = new uint8_t[YUV420P_BUFFER_SIZE];
if (WelsCreateDecoder(&openh264_decoder_) != 0) {
LOG_ERROR("Failed to create OpenH264 decoder");
return -1;
@@ -137,45 +132,76 @@ int OpenH264Decoder::Decode(
SBufferInfo sDstBufInfo;
memset(&sDstBufInfo, 0, sizeof(SBufferInfo));
openh264_decoder_->DecodeFrameNoDelay(data, size, yuv_data_, &sDstBufInfo);
openh264_decoder_->DecodeFrameNoDelay(data, size, yuv420p_planes_,
&sDstBufInfo);
frame_width_ = sDstBufInfo.UsrData.sSystemBuffer.iWidth;
frame_height_ = sDstBufInfo.UsrData.sSystemBuffer.iHeight;
yuv420p_frame_size_ = frame_width_ * frame_height_ * 3 / 2;
nv12_frame_size_ = frame_width_ * frame_height_ * 3 / 2;
if (!yuv420p_frame_) {
yuv420p_frame_capacity_ = yuv420p_frame_size_;
yuv420p_frame_ = new unsigned char[yuv420p_frame_capacity_];
}
if (yuv420p_frame_capacity_ < yuv420p_frame_size_) {
yuv420p_frame_capacity_ = yuv420p_frame_size_;
delete[] yuv420p_frame_;
yuv420p_frame_ = new unsigned char[yuv420p_frame_capacity_];
}
if (!nv12_frame_) {
nv12_frame_capacity_ = yuv420p_frame_size_;
nv12_frame_ =
new VideoFrame(nv12_frame_capacity_, frame_width_, frame_height_);
}
if (nv12_frame_capacity_ < yuv420p_frame_size_) {
nv12_frame_capacity_ = yuv420p_frame_size_;
delete nv12_frame_;
nv12_frame_ =
new VideoFrame(nv12_frame_capacity_, frame_width_, frame_height_);
}
if (nv12_frame_->Size() != nv12_frame_size_ ||
nv12_frame_->Width() != frame_width_ ||
nv12_frame_->Height() != frame_height_) {
nv12_frame_->SetSize(nv12_frame_size_);
nv12_frame_->SetWidth(frame_width_);
nv12_frame_->SetHeight(frame_height_);
}
if (sDstBufInfo.iBufferStatus == 1) {
if (on_receive_decoded_frame) {
CopyYUVWithStride(yuv_data_[0], yuv_data_[1], yuv_data_[2],
CopyYuvWithStride(
yuv420p_planes_[0], yuv420p_planes_[1], yuv420p_planes_[2],
sDstBufInfo.UsrData.sSystemBuffer.iWidth,
sDstBufInfo.UsrData.sSystemBuffer.iHeight,
sDstBufInfo.UsrData.sSystemBuffer.iStride[0],
sDstBufInfo.UsrData.sSystemBuffer.iStride[1],
sDstBufInfo.UsrData.sSystemBuffer.iStride[1],
decoded_frame_);
if (SAVE_NV12_STREAM) {
fwrite((unsigned char *)decoded_frame_, 1,
frame_width_ * frame_height_ * 3 / 2, nv12_stream_);
}
sDstBufInfo.UsrData.sSystemBuffer.iStride[1], yuv420p_frame_);
if (0) {
ConvertYUV420toNV12(decoded_frame_, nv12_frame_, frame_width_,
frame_height_);
ConvertYuv420pToNv12(yuv420p_frame_,
(unsigned char *)nv12_frame_->Buffer(),
frame_width_, frame_height_);
} else {
libyuv::I420ToNV12(
(const uint8_t *)decoded_frame_, frame_width_,
(const uint8_t *)decoded_frame_ + frame_width_ * frame_height_,
(const uint8_t *)yuv420p_frame_, frame_width_,
(const uint8_t *)yuv420p_frame_ + frame_width_ * frame_height_,
frame_width_ / 2,
(const uint8_t *)decoded_frame_ +
(const uint8_t *)yuv420p_frame_ +
frame_width_ * frame_height_ * 5 / 4,
frame_width_ / 2, nv12_frame_, frame_width_,
nv12_frame_ + frame_width_ * frame_height_, frame_width_,
frame_width_, frame_height_);
frame_width_ / 2, (uint8_t *)nv12_frame_->Buffer(), frame_width_,
(uint8_t *)nv12_frame_->Buffer() + frame_width_ * frame_height_,
frame_width_, frame_width_, frame_height_);
}
VideoFrame decoded_frame(nv12_frame_,
frame_width_ * frame_height_ * 3 / 2,
frame_width_, frame_height_);
on_receive_decoded_frame(*nv12_frame_);
on_receive_decoded_frame(decoded_frame);
if (SAVE_NV12_STREAM) {
fwrite((unsigned char *)decoded_frame.Buffer(), 1, decoded_frame.Size(),
fwrite((unsigned char *)nv12_frame_->Buffer(), 1, nv12_frame_->Size(),
nv12_stream_);
}
}

View File

@@ -34,10 +34,17 @@ class OpenH264Decoder : public VideoDecoder {
FILE* h264_stream_ = nullptr;
uint8_t* decoded_frame_ = nullptr;
int decoded_frame_size_ = 0;
uint8_t* nv12_frame_ = nullptr;
int frame_width_ = 1280;
int frame_height_ = 720;
unsigned char* yuv_data_[3];
unsigned char* yuv420p_planes_[3] = {nullptr, nullptr, nullptr};
unsigned char* yuv420p_frame_ = nullptr;
int yuv420p_frame_capacity_ = 0;
int yuv420p_frame_size_ = 0;
VideoFrame* nv12_frame_ = 0;
int nv12_frame_capacity_ = 0;
int nv12_frame_size_ = 0;
};
#endif

View File

@@ -266,74 +266,6 @@ int AomAv1Encoder::Init() {
return 0;
}
int AomAv1Encoder::Encode(const uint8_t *pData, int nSize,
std::function<int(char *encoded_packets, size_t size,
VideoFrameType frame_type)>
on_encoded_image) {
if (SAVE_RECEIVED_NV12_STREAM) {
fwrite(pData, 1, nSize, file_nv12_);
}
const uint32_t duration =
kRtpTicksPerSecond / static_cast<float>(max_frame_rate_);
timestamp_ += duration;
frame_for_encode_->planes[AOM_PLANE_Y] = const_cast<unsigned char *>(pData);
frame_for_encode_->planes[AOM_PLANE_U] =
const_cast<unsigned char *>(pData + frame_width_ * frame_height_);
frame_for_encode_->planes[AOM_PLANE_V] = nullptr;
frame_for_encode_->stride[AOM_PLANE_Y] = frame_width_;
frame_for_encode_->stride[AOM_PLANE_U] = frame_width_;
frame_for_encode_->stride[AOM_PLANE_V] = 0;
VideoFrameType frame_type;
if (0 == seq_++ % 300) {
force_i_frame_flags_ = AOM_EFLAG_FORCE_KF;
frame_type = VideoFrameType::kVideoFrameKey;
} else {
force_i_frame_flags_ = 0;
frame_type = VideoFrameType::kVideoFrameDelta;
}
// Encode a frame. The presentation timestamp `pts` should not use real
// timestamps from frames or the wall clock, as that can cause the rate
// controller to misbehave.
aom_codec_err_t ret =
aom_codec_encode(&aom_av1_encoder_ctx_, frame_for_encode_, timestamp_,
duration, force_i_frame_flags_);
if (ret != AOM_CODEC_OK) {
LOG_ERROR("AomAv1Encoder::Encode returned {} on aom_codec_encode",
(int)ret);
return -1;
}
aom_codec_iter_t iter = nullptr;
int data_pkt_count = 0;
while (const aom_codec_cx_pkt_t *pkt =
aom_codec_get_cx_data(&aom_av1_encoder_ctx_, &iter)) {
if (pkt->kind == AOM_CODEC_CX_FRAME_PKT && pkt->data.frame.sz > 0) {
memcpy(encoded_frame_, pkt->data.frame.buf, pkt->data.frame.sz);
encoded_frame_size_ = pkt->data.frame.sz;
int qp = -1;
SET_ENCODER_PARAM_OR_RETURN_ERROR(AOME_GET_LAST_QUANTIZER, &qp);
// LOG_INFO("Encoded frame qp = {}", qp);
if (on_encoded_image) {
on_encoded_image((char *)encoded_frame_, encoded_frame_size_,
frame_type);
if (SAVE_ENCODED_AV1_STREAM) {
fwrite(encoded_frame_, 1, encoded_frame_size_, file_av1_);
}
} else {
OnEncodedImage((char *)encoded_frame_, encoded_frame_size_);
}
}
}
return 0;
}
int AomAv1Encoder::Encode(const XVideoFrame *video_frame,
std::function<int(char *encoded_packets, size_t size,
VideoFrameType frame_type)>
@@ -345,18 +277,18 @@ int AomAv1Encoder::Encode(const XVideoFrame *video_frame,
aom_codec_err_t ret = AOM_CODEC_OK;
if (!encoded_frame_) {
encoded_frame_ = new uint8_t[video_frame->size];
encoded_frame_capacity_ = video_frame->size;
encoded_frame_ = new uint8_t[encoded_frame_capacity_];
}
if (encoded_frame_capacity_ < video_frame->size) {
encoded_frame_capacity_ = video_frame->size;
delete[] encoded_frame_;
encoded_frame_ = new uint8_t[video_frame->size];
encoded_frame_ = new uint8_t[encoded_frame_capacity_];
}
if (frame_width_ != video_frame->width ||
frame_height_ != video_frame->height) {
if (video_frame->width != frame_width_ ||
video_frame->height != frame_height_) {
if (AOM_CODEC_OK !=
ResetEncodeResolution(video_frame->width, video_frame->height)) {
LOG_ERROR("Reset encode resolution failed");

View File

@@ -39,14 +39,16 @@ class AomAv1Encoder : public VideoEncoder {
int Encode(const uint8_t* pData, int nSize,
std::function<int(char* encoded_packets, size_t size,
VideoFrameType frame_type)>
on_encoded_image);
on_encoded_image) {
return 0;
}
int Encode(const XVideoFrame* video_frame,
std::function<int(char* encoded_packets, size_t size,
VideoFrameType frame_type)>
on_encoded_image);
virtual int OnEncodedImage(char* encoded_packets, size_t size);
int OnEncodedImage(char* encoded_packets, size_t size);
void ForceIdr();

View File

@@ -9,9 +9,9 @@
#define SAVE_ENCODED_H264_STREAM 0
#define YUV420P_BUFFER_SIZE 1280 * 720 * 3 / 2
static unsigned char yuv420p_buffer[YUV420P_BUFFER_SIZE];
static unsigned char yuv420p_frame_[YUV420P_BUFFER_SIZE];
void nv12ToI420(unsigned char *Src_data, int src_width, int src_height,
void Nv12ToI420(unsigned char *Src_data, int src_width, int src_height,
unsigned char *Dst_data) {
// NV12 video size
int NV12_Size = src_width * src_height * 3 / 2;
@@ -57,54 +57,81 @@ OpenH264Encoder::~OpenH264Encoder() {
fclose(file_h264_);
file_h264_ = nullptr;
}
delete encoded_frame_;
if (encoded_frame_) {
delete[] encoded_frame_;
encoded_frame_ = nullptr;
}
Release();
}
SEncParamExt OpenH264Encoder::CreateEncoderParams() const {
SEncParamExt encoder_params;
openh264_encoder_->GetDefaultParams(&encoder_params);
int OpenH264Encoder::InitEncoderParams() {
int ret = 0;
if (!openh264_encoder_) {
LOG_ERROR("Invalid openh264 encoder");
return -1;
}
ret = openh264_encoder_->GetDefaultParams(&encoder_params_);
// if (codec_.mode == VideoCodecMode::kRealtimeVideo) { //
encoder_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
encoder_params_.iUsageType = CAMERA_VIDEO_REAL_TIME;
// } else if (codec_.mode == VideoCodecMode::kScreensharing) {
// encoder_params.iUsageType = SCREEN_CONTENT_REAL_TIME;
// encoder_params_.iUsageType = SCREEN_CONTENT_REAL_TIME;
// }
encoder_params.iPicWidth = frame_width_;
encoder_params.iPicHeight = frame_height_;
encoder_params.iTargetBitrate = target_bitrate_;
encoder_params.iMaxBitrate = max_bitrate_;
encoder_params.iRCMode = RC_BITRATE_MODE;
encoder_params.fMaxFrameRate = 60;
encoder_params.bEnableFrameSkip = false;
encoder_params.uiIntraPeriod = key_frame_interval_;
encoder_params.uiMaxNalSize = 0;
encoder_params.iMaxQp = 38;
encoder_params.iMinQp = 16;
encoder_params_.iPicWidth = frame_width_;
encoder_params_.iPicHeight = frame_height_;
encoder_params_.iTargetBitrate = target_bitrate_;
encoder_params_.iMaxBitrate = max_bitrate_;
encoder_params_.iRCMode = RC_BITRATE_MODE;
encoder_params_.fMaxFrameRate = 60;
encoder_params_.bEnableFrameSkip = false;
encoder_params_.uiIntraPeriod = key_frame_interval_;
encoder_params_.uiMaxNalSize = 0;
encoder_params_.iMaxQp = 38;
encoder_params_.iMinQp = 16;
// Threading model: use auto.
// 0: auto (dynamic imp. internal encoder)
// 1: single thread (default value)
// >1: number of threads
encoder_params.iMultipleThreadIdc = 1;
encoder_params_.iMultipleThreadIdc = 1;
// The base spatial layer 0 is the only one we use.
encoder_params.sSpatialLayers[0].iVideoWidth = encoder_params.iPicWidth;
encoder_params.sSpatialLayers[0].iVideoHeight = encoder_params.iPicHeight;
encoder_params.sSpatialLayers[0].fFrameRate = encoder_params.fMaxFrameRate;
encoder_params.sSpatialLayers[0].iSpatialBitrate =
encoder_params.iTargetBitrate;
encoder_params.sSpatialLayers[0].iMaxSpatialBitrate =
encoder_params.iMaxBitrate;
encoder_params_.sSpatialLayers[0].iVideoWidth = encoder_params_.iPicWidth;
encoder_params_.sSpatialLayers[0].iVideoHeight = encoder_params_.iPicHeight;
encoder_params_.sSpatialLayers[0].fFrameRate = encoder_params_.fMaxFrameRate;
encoder_params_.sSpatialLayers[0].iSpatialBitrate =
encoder_params_.iTargetBitrate;
encoder_params_.sSpatialLayers[0].iMaxSpatialBitrate =
encoder_params_.iMaxBitrate;
// SingleNalUnit
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
encoder_params_.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
encoder_params_.sSpatialLayers[0].sSliceArgument.uiSliceMode =
SM_SIZELIMITED_SLICE;
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint =
encoder_params_.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint =
static_cast<unsigned int>(max_payload_size_);
LOG_INFO("Encoder is configured with NALU constraint: {} bytes",
max_payload_size_);
return encoder_params;
return ret;
}
int OpenH264Encoder::ResetEncodeResolution(unsigned int width,
unsigned int height) {
frame_width_ = width;
frame_height_ = height;
encoder_params_.iPicWidth = width;
encoder_params_.iPicHeight = height;
if (openh264_encoder_->InitializeExt(&encoder_params_) != 0) {
LOG_ERROR("Failed to initialize OpenH264 encoder");
return -1;
}
return 0;
}
int OpenH264Encoder::Init() {
@@ -114,15 +141,13 @@ int OpenH264Encoder::Init() {
return -1;
}
encoded_frame_ = new uint8_t[YUV420P_BUFFER_SIZE];
int trace_level = WELS_LOG_QUIET;
openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
// Create encoder parameters based on the layer configuration.
SEncParamExt encoder_params = CreateEncoderParams();
InitEncoderParams();
if (openh264_encoder_->InitializeExt(&encoder_params) != 0) {
if (openh264_encoder_->InitializeExt(&encoder_params_) != 0) {
LOG_ERROR("Failed to initialize OpenH264 encoder");
// Release();
return -1;
@@ -147,8 +172,9 @@ int OpenH264Encoder::Init() {
return 0;
}
int OpenH264Encoder::Encode(
const uint8_t *pData, int nSize,
const XVideoFrame *video_frame,
std::function<int(char *encoded_packets, size_t size,
VideoFrameType frame_type)>
on_encoded_image) {
@@ -158,9 +184,39 @@ int OpenH264Encoder::Encode(
}
if (SAVE_RECEIVED_NV12_STREAM) {
fwrite(pData, 1, nSize, file_nv12_);
fwrite(video_frame->data, 1, video_frame->size, file_nv12_);
}
if (!yuv420p_frame_) {
yuv420p_frame_capacity_ = video_frame->size;
yuv420p_frame_ = new unsigned char[yuv420p_frame_capacity_];
}
if (yuv420p_frame_capacity_ < video_frame->size) {
yuv420p_frame_capacity_ = video_frame->size;
delete[] yuv420p_frame_;
yuv420p_frame_ = new unsigned char[yuv420p_frame_capacity_];
}
if (!encoded_frame_) {
encoded_frame_capacity_ = video_frame->size;
encoded_frame_ = new unsigned char[encoded_frame_capacity_];
}
if (encoded_frame_capacity_ < video_frame->size) {
encoded_frame_capacity_ = video_frame->size;
delete[] encoded_frame_;
encoded_frame_ = new unsigned char[encoded_frame_capacity_];
}
if (video_frame->width != frame_width_ ||
video_frame->height != frame_height_) {
ResetEncodeResolution(video_frame->width, video_frame->height);
}
Nv12ToI420((unsigned char *)video_frame->data, video_frame->width,
video_frame->height, yuv420p_frame_);
VideoFrameType frame_type;
if (0 == seq_++ % 300) {
ForceIdr();
@@ -169,23 +225,21 @@ int OpenH264Encoder::Encode(
frame_type = VideoFrameType::kVideoFrameDelta;
}
nv12ToI420((unsigned char *)pData, frame_width_, frame_height_,
yuv420p_buffer);
raw_frame_ = {0};
raw_frame_.iPicWidth = frame_width_;
raw_frame_.iPicHeight = frame_height_;
raw_frame_.iPicWidth = video_frame->width;
raw_frame_.iPicHeight = video_frame->height;
raw_frame_.iColorFormat = EVideoFormatType::videoFormatI420;
raw_frame_.uiTimeStamp =
std::chrono::system_clock::now().time_since_epoch().count();
raw_frame_.iStride[0] = frame_width_;
raw_frame_.iStride[1] = frame_width_ >> 1;
raw_frame_.iStride[2] = frame_width_ >> 1;
raw_frame_.pData[0] = (unsigned char *)yuv420p_buffer;
raw_frame_.pData[1] = raw_frame_.pData[0] + frame_width_ * frame_height_;
raw_frame_.iStride[0] = video_frame->width;
raw_frame_.iStride[1] = video_frame->width >> 1;
raw_frame_.iStride[2] = video_frame->width >> 1;
raw_frame_.pData[0] = (unsigned char *)yuv420p_frame_;
raw_frame_.pData[1] =
raw_frame_.pData[0] + video_frame->width * video_frame->height;
raw_frame_.pData[2] =
raw_frame_.pData[1] + (frame_width_ * frame_height_ >> 2);
raw_frame_.pData[1] + (video_frame->width * video_frame->height >> 2);
SFrameBSInfo info;
memset(&info, 0, sizeof(SFrameBSInfo));

View File

@@ -24,23 +24,25 @@ class OpenH264Encoder : public VideoEncoder {
int Init();
int Encode(const uint8_t* pData, int nSize,
std::function<int(char* encoded_packets, size_t size,
VideoFrameType frame_type)>
on_encoded_image);
int Encode(const XVideoFrame* video_frame,
std::function<int(char* encoded_packets, size_t size,
VideoFrameType frame_type)>
on_encoded_image) {
return 0;
}
virtual int OnEncodedImage(char* encoded_packets, size_t size);
int Encode(const XVideoFrame* video_frame,
std::function<int(char* encoded_packets, size_t size,
VideoFrameType frame_type)>
on_encoded_image);
int OnEncodedImage(char* encoded_packets, size_t size);
void ForceIdr();
private:
SEncParamExt CreateEncoderParams() const;
int InitEncoderParams();
int ResetEncodeResolution(unsigned int width, unsigned int height);
int Release();
private:
@@ -60,8 +62,12 @@ class OpenH264Encoder : public VideoEncoder {
// openh264
ISVCEncoder* openh264_encoder_ = nullptr;
SEncParamExt encoder_params_;
SSourcePicture raw_frame_;
unsigned char* yuv420p_frame_ = nullptr;
int yuv420p_frame_capacity_ = 0;
uint8_t* encoded_frame_ = nullptr;
int encoded_frame_capacity_ = 0;
int encoded_frame_size_ = 0;
bool got_output = false;
bool is_keyframe = false;

View File

@@ -149,8 +149,8 @@ int PeerConnection::Init(PeerConnectionParams params,
if (on_receive_video_frame_) {
XVideoFrame x_video_frame;
x_video_frame.data = (const char *)video_frame.Buffer();
x_video_frame.width = video_frame.GetWidth();
x_video_frame.height = video_frame.GetHeight();
x_video_frame.width = video_frame.Width();
x_video_frame.height = video_frame.Height();
x_video_frame.size = video_frame.Size();
on_receive_video_frame_(&x_video_frame, user_id, user_id_size,
user_data_);