mirror of
https://github.com/kunkundi/crossdesk.git
synced 2025-10-27 04:35:34 +08:00
Implementation for openh264 codec
This commit is contained in:
142
src/media/video/decode/openh264/openh264_decoder.cpp
Normal file
142
src/media/video/decode/openh264/openh264_decoder.cpp
Normal file
@@ -0,0 +1,142 @@
|
||||
#include "openh264_decoder.h"
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "log.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavdevice/avdevice.h>
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
};
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
|
||||
#define SAVE_DECODER_STREAM 0
|
||||
static const int YUV420P_BUFFER_SIZE = 1280 * 720 * 3 / 2;
|
||||
|
||||
int YUV420ToNV12PFFmpeg(unsigned char *src_buffer, int width, int height,
|
||||
unsigned char *dst_buffer) {
|
||||
AVFrame *Input_pFrame = av_frame_alloc();
|
||||
AVFrame *Output_pFrame = av_frame_alloc();
|
||||
struct SwsContext *img_convert_ctx = sws_getContext(
|
||||
width, height, AV_PIX_FMT_YUV420P, 1280, 720, AV_PIX_FMT_NV12,
|
||||
SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
|
||||
|
||||
av_image_fill_arrays(Input_pFrame->data, Input_pFrame->linesize, src_buffer,
|
||||
AV_PIX_FMT_YUV420P, width, height, 1);
|
||||
av_image_fill_arrays(Output_pFrame->data, Output_pFrame->linesize, dst_buffer,
|
||||
AV_PIX_FMT_NV12, 1280, 720, 1);
|
||||
|
||||
sws_scale(img_convert_ctx, (uint8_t const **)Input_pFrame->data,
|
||||
Input_pFrame->linesize, 0, height, Output_pFrame->data,
|
||||
Output_pFrame->linesize);
|
||||
|
||||
if (Input_pFrame) av_free(Input_pFrame);
|
||||
if (Output_pFrame) av_free(Output_pFrame);
|
||||
if (img_convert_ctx) sws_freeContext(img_convert_ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
OpenH264Decoder::OpenH264Decoder() {}
|
||||
OpenH264Decoder::~OpenH264Decoder() {
|
||||
if (nv12_frame_) {
|
||||
delete nv12_frame_;
|
||||
}
|
||||
|
||||
if (pData[0]) {
|
||||
delete pData[0];
|
||||
}
|
||||
|
||||
if (pData[1]) {
|
||||
delete pData[1];
|
||||
}
|
||||
|
||||
if (pData[2]) {
|
||||
delete pData[2];
|
||||
}
|
||||
}
|
||||
|
||||
int OpenH264Decoder::Init() {
|
||||
SEncParamExt sParam;
|
||||
sParam.iPicWidth = 1280;
|
||||
sParam.iPicHeight = 720;
|
||||
sParam.iTargetBitrate = 1000;
|
||||
sParam.iTemporalLayerNum = 1;
|
||||
sParam.fMaxFrameRate = 30;
|
||||
sParam.iSpatialLayerNum = 1;
|
||||
|
||||
decoded_frame_size_ = YUV420P_BUFFER_SIZE;
|
||||
decoded_frame_ = new uint8_t[YUV420P_BUFFER_SIZE];
|
||||
nv12_frame_ = new uint8_t[YUV420P_BUFFER_SIZE];
|
||||
pData[0] = new uint8_t[1280 * 720];
|
||||
pData[1] = new uint8_t[1280 * 720];
|
||||
pData[2] = new uint8_t[1280 * 720];
|
||||
|
||||
if (WelsCreateDecoder(&openh264_decoder_) != 0) {
|
||||
LOG_ERROR("Failed to create OpenH264 decoder");
|
||||
return -1;
|
||||
}
|
||||
|
||||
SDecodingParam sDecParam;
|
||||
|
||||
memset(&sDecParam, 0, sizeof(SDecodingParam));
|
||||
sDecParam.sVideoProperty.eVideoBsType = VIDEO_BITSTREAM_DEFAULT;
|
||||
sDecParam.bParseOnly = false;
|
||||
|
||||
int32_t iRet = openh264_decoder_->Initialize(&sDecParam);
|
||||
|
||||
LOG_ERROR("inited decoded_frame_size_ {}", decoded_frame_size_);
|
||||
LOG_ERROR("inited");
|
||||
printf("1 this is %p\n", this);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenH264Decoder::Decode(
|
||||
const uint8_t *data, int size,
|
||||
std::function<void(VideoFrame)> on_receive_decoded_frame) {
|
||||
if (!openh264_decoder_) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
SBufferInfo sDstBufInfo;
|
||||
memset(&sDstBufInfo, 0, sizeof(SBufferInfo));
|
||||
|
||||
int32_t iRet =
|
||||
openh264_decoder_->DecodeFrameNoDelay(data, size, pData, &sDstBufInfo);
|
||||
|
||||
if (iRet != 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (sDstBufInfo.iBufferStatus == 1) {
|
||||
if (on_receive_decoded_frame) {
|
||||
memcpy(decoded_frame_, pData[0], frame_width_ * frame_height_);
|
||||
memcpy(decoded_frame_ + frame_width_ * frame_height_, pData[1],
|
||||
frame_width_ * frame_height_ / 2);
|
||||
memcpy(decoded_frame_ + frame_width_ * frame_height_ * 3 / 2, pData[2],
|
||||
frame_width_ * frame_height_ / 2);
|
||||
YUV420ToNV12PFFmpeg(decoded_frame_, frame_width_, frame_height_,
|
||||
nv12_frame_);
|
||||
|
||||
VideoFrame decoded_frame(nv12_frame_,
|
||||
frame_width_ * frame_height_ * 3 / 2,
|
||||
frame_width_, frame_height_);
|
||||
on_receive_decoded_frame(decoded_frame);
|
||||
if (SAVE_DECODER_STREAM) {
|
||||
fwrite((unsigned char *)decoded_frame.Buffer(), 1, decoded_frame.Size(),
|
||||
file_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
42
src/media/video/decode/openh264/openh264_decoder.h
Normal file
42
src/media/video/decode/openh264/openh264_decoder.h
Normal file
@@ -0,0 +1,42 @@
|
||||
/*
|
||||
* @Author: DI JUNKUN
|
||||
* @Date: 2023-11-03
|
||||
* Copyright (c) 2023 by DI JUNKUN, All Rights Reserved.
|
||||
*/
|
||||
|
||||
#ifndef _OPENH264_DECODER_H_
|
||||
#define _OPENH264_DECODER_H_
|
||||
|
||||
#include <wels/codec_api.h>
|
||||
#include <wels/codec_app_def.h>
|
||||
#include <wels/codec_def.h>
|
||||
#include <wels/codec_ver.h>
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "video_decoder.h"
|
||||
|
||||
class OpenH264Decoder : public VideoDecoder {
|
||||
public:
|
||||
OpenH264Decoder();
|
||||
virtual ~OpenH264Decoder();
|
||||
|
||||
public:
|
||||
int Init();
|
||||
int Decode(const uint8_t* data, int size,
|
||||
std::function<void(VideoFrame)> on_receive_decoded_frame);
|
||||
|
||||
private:
|
||||
ISVCDecoder* openh264_decoder_ = nullptr;
|
||||
bool get_first_keyframe_ = false;
|
||||
bool skip_frame_ = false;
|
||||
FILE* file_ = nullptr;
|
||||
uint8_t* decoded_frame_ = nullptr;
|
||||
int decoded_frame_size_ = 0;
|
||||
uint8_t* nv12_frame_ = nullptr;
|
||||
unsigned char* pData[3] = {};
|
||||
int frame_width_ = 1280;
|
||||
int frame_height_ = 720;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -5,6 +5,7 @@
|
||||
#else
|
||||
#include "ffmpeg/ffmpeg_video_decoder.h"
|
||||
#include "nvcodec/nvidia_video_decoder.h"
|
||||
#include "openh264/openh264_decoder.h"
|
||||
#endif
|
||||
|
||||
#include "log.h"
|
||||
@@ -25,7 +26,8 @@ std::unique_ptr<VideoDecoder> VideoDecoderFactory::CreateVideoDecoder(
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
return std::make_unique<FfmpegVideoDecoder>(FfmpegVideoDecoder());
|
||||
// return std::make_unique<FfmpegVideoDecoder>(FfmpegVideoDecoder());
|
||||
return std::make_unique<OpenH264Decoder>(OpenH264Decoder());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -4,14 +4,48 @@
|
||||
|
||||
#include "log.h"
|
||||
|
||||
#define SAVE_ENCODER_STREAM 0
|
||||
#define SAVE_NV12_STREAM 0
|
||||
#define SAVE_H264_STREAM 1
|
||||
|
||||
#define YUV420P_BUFFER_SIZE 1280 * 720 * 3 / 2
|
||||
unsigned char yuv420p_buffer[YUV420P_BUFFER_SIZE];
|
||||
|
||||
int NV12ToYUV420PFFmpeg(unsigned char *src_buffer, int width, int height,
|
||||
unsigned char *dst_buffer) {
|
||||
AVFrame *Input_pFrame = av_frame_alloc();
|
||||
AVFrame *Output_pFrame = av_frame_alloc();
|
||||
struct SwsContext *img_convert_ctx = sws_getContext(
|
||||
width, height, AV_PIX_FMT_NV12, 1280, 720, AV_PIX_FMT_YUV420P,
|
||||
SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
|
||||
|
||||
av_image_fill_arrays(Input_pFrame->data, Input_pFrame->linesize, src_buffer,
|
||||
AV_PIX_FMT_NV12, width, height, 1);
|
||||
av_image_fill_arrays(Output_pFrame->data, Output_pFrame->linesize, dst_buffer,
|
||||
AV_PIX_FMT_YUV420P, 1280, 720, 1);
|
||||
|
||||
sws_scale(img_convert_ctx, (uint8_t const **)Input_pFrame->data,
|
||||
Input_pFrame->linesize, 0, height, Output_pFrame->data,
|
||||
Output_pFrame->linesize);
|
||||
|
||||
if (Input_pFrame) av_free(Input_pFrame);
|
||||
if (Output_pFrame) av_free(Output_pFrame);
|
||||
if (img_convert_ctx) sws_freeContext(img_convert_ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
FFmpegVideoEncoder::FFmpegVideoEncoder() {}
|
||||
FFmpegVideoEncoder::~FFmpegVideoEncoder() {
|
||||
if (SAVE_ENCODER_STREAM && file_) {
|
||||
fflush(file_);
|
||||
fclose(file_);
|
||||
file_ = nullptr;
|
||||
if (SAVE_NV12_STREAM && file_nv12_) {
|
||||
fflush(file_nv12_);
|
||||
fclose(file_nv12_);
|
||||
file_nv12_ = nullptr;
|
||||
}
|
||||
|
||||
if (SAVE_H264_STREAM && file_h264_) {
|
||||
fflush(file_h264_);
|
||||
fclose(file_h264_);
|
||||
file_h264_ = nullptr;
|
||||
}
|
||||
|
||||
if (nv12_data_) {
|
||||
@@ -25,14 +59,19 @@ FFmpegVideoEncoder::~FFmpegVideoEncoder() {
|
||||
}
|
||||
|
||||
int FFmpegVideoEncoder::Init() {
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
av_log_set_level(AV_LOG_VERBOSE);
|
||||
|
||||
codec_ = avcodec_find_encoder(AV_CODEC_ID_H264);
|
||||
|
||||
if (!codec_) {
|
||||
LOG_ERROR("Failed to find H.264 encoder");
|
||||
return -1;
|
||||
} else {
|
||||
LOG_INFO("Use H264 encoder [{}]", codec_->name);
|
||||
if (0 == strcmp(codec_->name, "libx264")) {
|
||||
use_libx264_ = true;
|
||||
}
|
||||
}
|
||||
use_libx264_ = true;
|
||||
|
||||
codec_ctx_ = avcodec_alloc_context3(codec_);
|
||||
if (!codec_ctx_) {
|
||||
@@ -46,7 +85,11 @@ int FFmpegVideoEncoder::Init() {
|
||||
codec_ctx_->height = frame_height;
|
||||
codec_ctx_->time_base.num = 1;
|
||||
codec_ctx_->time_base.den = fps_;
|
||||
codec_ctx_->pix_fmt = AV_PIX_FMT_NV12;
|
||||
if (use_libx264_) {
|
||||
codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
} else {
|
||||
codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
}
|
||||
codec_ctx_->gop_size = keyFrameInterval_;
|
||||
codec_ctx_->keyint_min = keyFrameInterval_;
|
||||
codec_ctx_->max_b_frames = 0;
|
||||
@@ -77,10 +120,17 @@ int FFmpegVideoEncoder::Init() {
|
||||
|
||||
packet_ = av_packet_alloc();
|
||||
|
||||
if (SAVE_ENCODER_STREAM) {
|
||||
file_ = fopen("encode_stream.h264", "w+b");
|
||||
if (!file_) {
|
||||
LOG_WARN("Fail to open stream.h264");
|
||||
if (SAVE_H264_STREAM) {
|
||||
file_h264_ = fopen("encoded_stream.h264", "w+b");
|
||||
if (!file_h264_) {
|
||||
LOG_WARN("Fail to open encoded_stream.h264");
|
||||
}
|
||||
}
|
||||
|
||||
if (SAVE_NV12_STREAM) {
|
||||
file_nv12_ = fopen("raw_stream.yuv", "w+b");
|
||||
if (!file_nv12_) {
|
||||
LOG_WARN("Fail to open raw_stream.yuv");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,9 +145,33 @@ int FFmpegVideoEncoder::Encode(
|
||||
return -1;
|
||||
}
|
||||
|
||||
memcpy(frame_->data[0], pData, frame_->width * frame_->height);
|
||||
memcpy(frame_->data[1], pData + frame_->width * frame_->height,
|
||||
frame_->width * frame_->height / 2);
|
||||
if (use_libx264_) {
|
||||
NV12ToYUV420PFFmpeg((unsigned char *)pData, frame_->width, frame_->height,
|
||||
(unsigned char *)yuv420p_buffer);
|
||||
memcpy(frame_->data[0], yuv420p_buffer, frame_->width * frame_->height);
|
||||
memcpy(frame_->data[1], yuv420p_buffer + frame_->width * frame_->height,
|
||||
frame_->width * frame_->height / 2);
|
||||
memcpy(frame_->data[2],
|
||||
yuv420p_buffer + frame_->width * frame_->height * 3 / 2,
|
||||
frame_->width * frame_->height / 2);
|
||||
|
||||
// frame_->data[0] = yuv420p_buffer;
|
||||
// frame_->data[1] = yuv420p_buffer + frame_->width * frame_->height;
|
||||
// frame_->data[2] = yuv420p_buffer + frame_->width * frame_->height * 3 /
|
||||
// 2;
|
||||
|
||||
if (SAVE_NV12_STREAM) {
|
||||
fwrite(yuv420p_buffer, 1, nSize, file_nv12_);
|
||||
}
|
||||
} else {
|
||||
memcpy(frame_->data[0], pData, frame_->width * frame_->height);
|
||||
memcpy(frame_->data[1], pData + frame_->width * frame_->height,
|
||||
frame_->width * frame_->height / 2);
|
||||
|
||||
if (SAVE_NV12_STREAM) {
|
||||
fwrite(pData, 1, nSize, file_nv12_);
|
||||
}
|
||||
}
|
||||
|
||||
frame_->pts = pts_++;
|
||||
|
||||
@@ -113,17 +187,17 @@ int FFmpegVideoEncoder::Encode(
|
||||
}
|
||||
|
||||
// Remove first 6 bytes in I frame, SEI ?
|
||||
if (0x00 == packet_->data[0] && 0x00 == packet_->data[1] &&
|
||||
0x00 == packet_->data[2] && 0x01 == packet_->data[3] &&
|
||||
0x09 == packet_->data[4] && 0x10 == packet_->data[5]) {
|
||||
packet_->data += 6;
|
||||
packet_->size -= 6;
|
||||
}
|
||||
// if (0x00 == packet_->data[0] && 0x00 == packet_->data[1] &&
|
||||
// 0x00 == packet_->data[2] && 0x01 == packet_->data[3] &&
|
||||
// 0x09 == packet_->data[4] && 0x10 == packet_->data[5]) {
|
||||
// packet_->data += 6;
|
||||
// packet_->size -= 6;
|
||||
// }
|
||||
|
||||
if (on_encoded_image) {
|
||||
on_encoded_image((char *)packet_->data, packet_->size);
|
||||
if (SAVE_ENCODER_STREAM) {
|
||||
fwrite(packet_->data, 1, packet_->size, file_);
|
||||
if (SAVE_H264_STREAM) {
|
||||
fwrite(packet_->data, 1, packet_->size, file_h264_);
|
||||
}
|
||||
} else {
|
||||
OnEncodedImage((char *)packet_->data, packet_->size);
|
||||
|
||||
@@ -14,10 +14,12 @@ extern "C" {
|
||||
#endif
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavdevice/avdevice.h>
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/opt.h>
|
||||
}
|
||||
#include <libswscale/swscale.h>
|
||||
};
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
@@ -51,9 +53,11 @@ class FFmpegVideoEncoder : public VideoEncoder {
|
||||
|
||||
std::vector<std::vector<uint8_t>> encoded_packets_;
|
||||
unsigned char* encoded_image_ = nullptr;
|
||||
FILE* file_ = nullptr;
|
||||
FILE* file_h264_ = nullptr;
|
||||
FILE* file_nv12_ = nullptr;
|
||||
unsigned char* nv12_data_ = nullptr;
|
||||
unsigned int seq_ = 0;
|
||||
bool use_libx264_ = false;
|
||||
|
||||
const AVCodec* codec_ = nullptr;
|
||||
AVCodecContext* codec_ctx_ = nullptr;
|
||||
|
||||
271
src/media/video/encode/openh264/openh264_encoder.cpp
Normal file
271
src/media/video/encode/openh264/openh264_encoder.cpp
Normal file
@@ -0,0 +1,271 @@
|
||||
#include "openh264_encoder.h"
|
||||
|
||||
#include <chrono>
|
||||
|
||||
#include "log.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavdevice/avdevice.h>
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
};
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
|
||||
#define YUV420P_BUFFER_SIZE 1280 * 720 * 3 / 2
|
||||
unsigned char yuv420p_buffer[YUV420P_BUFFER_SIZE];
|
||||
|
||||
int NV12ToYUV420PFFmpeg(unsigned char *src_buffer, int width, int height,
|
||||
unsigned char *dst_buffer) {
|
||||
AVFrame *Input_pFrame = av_frame_alloc();
|
||||
AVFrame *Output_pFrame = av_frame_alloc();
|
||||
struct SwsContext *img_convert_ctx = sws_getContext(
|
||||
width, height, AV_PIX_FMT_NV12, 1280, 720, AV_PIX_FMT_YUV420P,
|
||||
SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
|
||||
|
||||
av_image_fill_arrays(Input_pFrame->data, Input_pFrame->linesize, src_buffer,
|
||||
AV_PIX_FMT_NV12, width, height, 1);
|
||||
av_image_fill_arrays(Output_pFrame->data, Output_pFrame->linesize, dst_buffer,
|
||||
AV_PIX_FMT_YUV420P, 1280, 720, 1);
|
||||
|
||||
sws_scale(img_convert_ctx, (uint8_t const **)Input_pFrame->data,
|
||||
Input_pFrame->linesize, 0, height, Output_pFrame->data,
|
||||
Output_pFrame->linesize);
|
||||
|
||||
if (Input_pFrame) av_free(Input_pFrame);
|
||||
if (Output_pFrame) av_free(Output_pFrame);
|
||||
if (img_convert_ctx) sws_freeContext(img_convert_ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
OpenH264Encoder::OpenH264Encoder() { delete encoded_frame_; }
|
||||
OpenH264Encoder::~OpenH264Encoder() { Release(); }
|
||||
|
||||
SEncParamExt OpenH264Encoder::CreateEncoderParams() const {
|
||||
SEncParamExt encoder_params;
|
||||
openh264_encoder_->GetDefaultParams(&encoder_params);
|
||||
// if (codec_.mode == VideoCodecMode::kRealtimeVideo) { //
|
||||
encoder_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
|
||||
// } else if (codec_.mode == VideoCodecMode::kScreensharing) {
|
||||
// encoder_params.iUsageType = SCREEN_CONTENT_REAL_TIME;
|
||||
// }
|
||||
|
||||
encoder_params.iPicWidth = frame_width_;
|
||||
encoder_params.iPicHeight = frame_height_;
|
||||
encoder_params.iTargetBitrate = target_bitrate_;
|
||||
encoder_params.iMaxBitrate = max_bitrate_;
|
||||
encoder_params.iRCMode = RC_BITRATE_MODE;
|
||||
encoder_params.fMaxFrameRate = max_frame_rate_;
|
||||
encoder_params.bEnableFrameSkip = false;
|
||||
encoder_params.uiIntraPeriod = key_frame_interval_;
|
||||
encoder_params.uiMaxNalSize = 0;
|
||||
// Threading model: use auto.
|
||||
// 0: auto (dynamic imp. internal encoder)
|
||||
// 1: single thread (default value)
|
||||
// >1: number of threads
|
||||
encoder_params.iMultipleThreadIdc = 1;
|
||||
// The base spatial layer 0 is the only one we use.
|
||||
encoder_params.sSpatialLayers[0].iVideoWidth = encoder_params.iPicWidth;
|
||||
encoder_params.sSpatialLayers[0].iVideoHeight = encoder_params.iPicHeight;
|
||||
encoder_params.sSpatialLayers[0].fFrameRate = encoder_params.fMaxFrameRate;
|
||||
encoder_params.sSpatialLayers[0].iSpatialBitrate =
|
||||
encoder_params.iTargetBitrate;
|
||||
encoder_params.sSpatialLayers[0].iMaxSpatialBitrate =
|
||||
encoder_params.iMaxBitrate;
|
||||
encoder_params.iTemporalLayerNum = 1;
|
||||
if (encoder_params.iTemporalLayerNum > 1) {
|
||||
encoder_params.iNumRefFrame = 1;
|
||||
}
|
||||
LOG_INFO("OpenH264 version is [{}.{}]", OPENH264_MAJOR, OPENH264_MINOR);
|
||||
|
||||
// SingleNalUnit
|
||||
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceNum = 1;
|
||||
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceMode =
|
||||
SM_SIZELIMITED_SLICE;
|
||||
encoder_params.sSpatialLayers[0].sSliceArgument.uiSliceSizeConstraint =
|
||||
static_cast<unsigned int>(max_payload_size_);
|
||||
LOG_INFO("Encoder is configured with NALU constraint: {} bytes",
|
||||
max_payload_size_);
|
||||
|
||||
return encoder_params;
|
||||
}
|
||||
|
||||
int OpenH264Encoder::Init() {
|
||||
// Create encoder.
|
||||
if (WelsCreateSVCEncoder(&openh264_encoder_) != 0) {
|
||||
LOG_ERROR("Failed to create OpenH264 encoder");
|
||||
return -1;
|
||||
}
|
||||
|
||||
encoded_frame_ = new uint8_t[YUV420P_BUFFER_SIZE];
|
||||
|
||||
int trace_level = WELS_LOG_WARNING;
|
||||
openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL, &trace_level);
|
||||
|
||||
// Create encoder parameters based on the layer configuration.
|
||||
SEncParamExt encoder_params = CreateEncoderParams();
|
||||
|
||||
if (openh264_encoder_->InitializeExt(&encoder_params) != 0) {
|
||||
LOG_ERROR("Failed to initialize OpenH264 encoder");
|
||||
// Release();
|
||||
return -1;
|
||||
}
|
||||
|
||||
int video_format = EVideoFormatType::videoFormatI420;
|
||||
openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, &video_format);
|
||||
|
||||
return 0;
|
||||
}
|
||||
int OpenH264Encoder::Encode(
|
||||
const uint8_t *pData, int nSize,
|
||||
std::function<int(char *encoded_packets, size_t size)> on_encoded_image) {
|
||||
if (!openh264_encoder_) {
|
||||
LOG_ERROR("Invalid openh264 encoder");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (0 == seq_++ % 300) {
|
||||
ForceIdr();
|
||||
}
|
||||
|
||||
NV12ToYUV420PFFmpeg((unsigned char *)pData, frame_width_, frame_height_,
|
||||
(unsigned char *)yuv420p_buffer);
|
||||
// memcpy(frame_->data[0], yuv420p_buffer, frame_width_ * frame_height_);
|
||||
// memcpy(frame_->data[1], yuv420p_buffer + frame_width_ * frame_height_,
|
||||
// frame_width_ * frame_height_ / 2);
|
||||
// memcpy(frame_->data[2], yuv420p_buffer + frame_width_ * frame_height_ * 3 /
|
||||
// 2,
|
||||
// frame_width_ * frame_height_ / 2);
|
||||
|
||||
raw_frame_ = {0};
|
||||
raw_frame_.iPicWidth = frame_width_;
|
||||
raw_frame_.iPicHeight = frame_height_;
|
||||
raw_frame_.iColorFormat = EVideoFormatType::videoFormatI420;
|
||||
raw_frame_.uiTimeStamp =
|
||||
std::chrono::high_resolution_clock::now().time_since_epoch().count();
|
||||
|
||||
raw_frame_.iStride[0] = frame_width_;
|
||||
raw_frame_.iStride[1] = frame_width_ >> 1;
|
||||
raw_frame_.iStride[2] = frame_width_ >> 1;
|
||||
raw_frame_.pData[0] = (unsigned char *)yuv420p_buffer;
|
||||
raw_frame_.pData[1] = raw_frame_.pData[0] + frame_width_ * frame_height_;
|
||||
raw_frame_.pData[2] =
|
||||
raw_frame_.pData[1] + (frame_width_ * frame_height_ >> 2);
|
||||
|
||||
SFrameBSInfo info;
|
||||
memset(&info, 0, sizeof(SFrameBSInfo));
|
||||
|
||||
int enc_ret = openh264_encoder_->EncodeFrame(&raw_frame_, &info);
|
||||
if (enc_ret != 0) {
|
||||
LOG_ERROR("OpenH264 frame encoding failed, EncodeFrame returned {}",
|
||||
enc_ret);
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
#if 0
|
||||
int encoded_frame_size = 0;
|
||||
|
||||
for (int layer = 0; layer < info.iLayerNum; ++layer) {
|
||||
const SLayerBSInfo &layerInfo = info.sLayerInfo[layer];
|
||||
size_t layer_len = 0;
|
||||
memcpy(encoded_frame_ + encoded_frame_size, layerInfo.pBsBuf, layer_len);
|
||||
encoded_frame_size += layer_len;
|
||||
}
|
||||
|
||||
encoded_frame_size_ = encoded_frame_size;
|
||||
|
||||
if (on_encoded_image) {
|
||||
on_encoded_image((char *)encoded_frame_, encoded_frame_size_);
|
||||
} else {
|
||||
OnEncodedImage((char *)encoded_frame_, encoded_frame_size_);
|
||||
}
|
||||
#else
|
||||
if (info.eFrameType == videoFrameTypeInvalid) {
|
||||
LOG_ERROR("videoFrameTypeInvalid");
|
||||
return -1;
|
||||
}
|
||||
|
||||
int temporal_id = 0;
|
||||
|
||||
int encoded_frame_size = 0;
|
||||
|
||||
if (info.eFrameType != videoFrameTypeSkip) {
|
||||
int layer = 0;
|
||||
while (layer < info.iLayerNum) {
|
||||
SLayerBSInfo *pLayerBsInfo = &(info.sLayerInfo[layer]);
|
||||
if (pLayerBsInfo != NULL) {
|
||||
int layer_size = 0;
|
||||
temporal_id = pLayerBsInfo->uiTemporalId;
|
||||
int nal_index = pLayerBsInfo->iNalCount - 1;
|
||||
do {
|
||||
layer_size += pLayerBsInfo->pNalLengthInByte[nal_index];
|
||||
--nal_index;
|
||||
} while (nal_index >= 0);
|
||||
memcpy(encoded_frame_ + encoded_frame_size, pLayerBsInfo->pBsBuf,
|
||||
layer_size);
|
||||
encoded_frame_size += layer_size;
|
||||
}
|
||||
++layer;
|
||||
}
|
||||
|
||||
got_output = true;
|
||||
|
||||
} else {
|
||||
is_keyframe = false;
|
||||
}
|
||||
|
||||
if (encoded_frame_size > 0) {
|
||||
encoded_frame_size_ = encoded_frame_size;
|
||||
|
||||
if (on_encoded_image) {
|
||||
on_encoded_image((char *)encoded_frame_, encoded_frame_size_);
|
||||
} else {
|
||||
OnEncodedImage((char *)encoded_frame_, encoded_frame_size_);
|
||||
}
|
||||
|
||||
EVideoFrameType ft_temp = info.eFrameType;
|
||||
if (ft_temp == 1 || ft_temp == 2) {
|
||||
is_keyframe = true;
|
||||
} else if (ft_temp == 3) {
|
||||
is_keyframe = false;
|
||||
if (temporal_) {
|
||||
if (temporal_id == 0 || temporal_id == 1) {
|
||||
is_keyframe = true;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
is_keyframe = false;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int OpenH264Encoder::OnEncodedImage(char *encoded_packets, size_t size) {
|
||||
LOG_INFO("OnEncodedImage not implemented");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void OpenH264Encoder::ForceIdr() {
|
||||
if (openh264_encoder_) {
|
||||
openh264_encoder_->ForceIntraFrame(true);
|
||||
}
|
||||
}
|
||||
|
||||
int OpenH264Encoder::Release() {
|
||||
if (openh264_encoder_) {
|
||||
WelsDestroySVCEncoder(openh264_encoder_);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
62
src/media/video/encode/openh264/openh264_encoder.h
Normal file
62
src/media/video/encode/openh264/openh264_encoder.h
Normal file
@@ -0,0 +1,62 @@
|
||||
/*
|
||||
* @Author: DI JUNKUN
|
||||
* @Date: 2023-11-03
|
||||
* Copyright (c) 2023 by DI JUNKUN, All Rights Reserved.
|
||||
*/
|
||||
|
||||
#ifndef _OPENH264_ENCODER_H_
|
||||
#define _OPENH264_ENCODER_H_
|
||||
|
||||
#include <wels/codec_api.h>
|
||||
#include <wels/codec_app_def.h>
|
||||
#include <wels/codec_def.h>
|
||||
#include <wels/codec_ver.h>
|
||||
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
#include "video_encoder.h"
|
||||
|
||||
class OpenH264Encoder : public VideoEncoder {
|
||||
public:
|
||||
OpenH264Encoder();
|
||||
virtual ~OpenH264Encoder();
|
||||
|
||||
int Init();
|
||||
int Encode(
|
||||
const uint8_t* pData, int nSize,
|
||||
std::function<int(char* encoded_packets, size_t size)> on_encoded_image);
|
||||
|
||||
virtual int OnEncodedImage(char* encoded_packets, size_t size);
|
||||
|
||||
void ForceIdr();
|
||||
|
||||
private:
|
||||
SEncParamExt CreateEncoderParams() const;
|
||||
int Release();
|
||||
|
||||
private:
|
||||
int frame_width_ = 1280;
|
||||
int frame_height_ = 720;
|
||||
int key_frame_interval_ = 3000;
|
||||
int target_bitrate_ = 1000;
|
||||
int max_bitrate_ = 1000;
|
||||
int max_payload_size_ = 3000;
|
||||
int max_frame_rate_ = 30;
|
||||
std::vector<std::vector<uint8_t>> encoded_packets_;
|
||||
unsigned char* encoded_image_ = nullptr;
|
||||
FILE* file_ = nullptr;
|
||||
unsigned char* nv12_data_ = nullptr;
|
||||
unsigned int seq_ = 0;
|
||||
|
||||
// openh264
|
||||
ISVCEncoder* openh264_encoder_ = nullptr;
|
||||
SSourcePicture raw_frame_;
|
||||
uint8_t* encoded_frame_ = nullptr;
|
||||
int encoded_frame_size_ = 0;
|
||||
bool got_output = false;
|
||||
bool is_keyframe = false;
|
||||
int temporal_ = 1;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -5,6 +5,7 @@
|
||||
#else
|
||||
#include "ffmpeg/ffmpeg_video_encoder.h"
|
||||
#include "nvcodec/nvidia_video_encoder.h"
|
||||
#include "openh264/openh264_encoder.h"
|
||||
#endif
|
||||
|
||||
#include "log.h"
|
||||
@@ -25,7 +26,8 @@ std::unique_ptr<VideoEncoder> VideoEncoderFactory::CreateVideoEncoder(
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
return std::make_unique<FFmpegVideoEncoder>(FFmpegVideoEncoder());
|
||||
// return std::make_unique<FFmpegVideoEncoder>(FFmpegVideoEncoder());
|
||||
return std::make_unique<OpenH264Encoder>(OpenH264Encoder());
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ void RtpVideoReceiver::InsertRtpPacket(RtpPacket& rtp_packet) {
|
||||
|
||||
rtcp_rr.Encode();
|
||||
|
||||
SendRtcpRR(rtcp_rr);
|
||||
// SendRtcpRR(rtcp_rr);
|
||||
}
|
||||
|
||||
if (RtpPacket::NAL_UNIT_TYPE::NALU == rtp_packet.NalUnitType()) {
|
||||
|
||||
@@ -85,7 +85,7 @@ int RtpVideoSender::SendRtpPacket(RtpPacket& rtp_packet) {
|
||||
|
||||
rtcp_sr.Encode();
|
||||
|
||||
SendRtcpSR(rtcp_sr);
|
||||
// SendRtcpSR(rtcp_sr);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
Reference in New Issue
Block a user