mirror of
https://github.com/kunkundi/crossdesk.git
synced 2025-10-26 20:25:34 +08:00
Remove dependency on FFmpeg
This commit is contained in:
@@ -1,181 +0,0 @@
|
||||
#include "ffmpeg_video_decoder.h"
|
||||
|
||||
#include "log.h"
|
||||
|
||||
#define SAVE_RECEIVED_H264_STREAM 0
|
||||
#define SAVE_DECODED_NV12_STREAM 0
|
||||
|
||||
extern "C" {
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
};
|
||||
|
||||
FfmpegVideoDecoder::FfmpegVideoDecoder() {}
|
||||
|
||||
FfmpegVideoDecoder::~FfmpegVideoDecoder() {
|
||||
if (SAVE_RECEIVED_H264_STREAM && file_h264_) {
|
||||
fflush(file_h264_);
|
||||
fclose(file_h264_);
|
||||
file_h264_ = nullptr;
|
||||
}
|
||||
|
||||
if (SAVE_DECODED_NV12_STREAM && file_nv12_) {
|
||||
fflush(file_nv12_);
|
||||
fclose(file_nv12_);
|
||||
file_nv12_ = nullptr;
|
||||
}
|
||||
|
||||
if (decoded_frame_) {
|
||||
delete decoded_frame_;
|
||||
decoded_frame_ = nullptr;
|
||||
}
|
||||
|
||||
if (packet_) {
|
||||
av_packet_free(&packet_);
|
||||
}
|
||||
|
||||
if (frame_) {
|
||||
av_frame_free(&frame_);
|
||||
}
|
||||
if (frame_nv12_) {
|
||||
av_frame_free(&frame_nv12_);
|
||||
}
|
||||
|
||||
if (img_convert_ctx) {
|
||||
sws_freeContext(img_convert_ctx);
|
||||
}
|
||||
if (codec_ctx_) {
|
||||
avcodec_close(codec_ctx_);
|
||||
}
|
||||
if (codec_ctx_) {
|
||||
av_free(codec_ctx_);
|
||||
}
|
||||
}
|
||||
|
||||
int FfmpegVideoDecoder::Init() {
|
||||
av_log_set_level(AV_LOG_QUIET);
|
||||
|
||||
codec_id_ = AV_CODEC_ID_H264;
|
||||
codec_ = avcodec_find_decoder(codec_id_);
|
||||
if (!codec_) {
|
||||
printf("Codec not found\n");
|
||||
return -1;
|
||||
}
|
||||
codec_ctx_ = avcodec_alloc_context3(codec_);
|
||||
if (!codec_ctx_) {
|
||||
printf("Could not allocate video codec context\n");
|
||||
return -1;
|
||||
} else {
|
||||
LOG_INFO("Use H264 decoder [{}]", codec_->name);
|
||||
}
|
||||
|
||||
codec_ctx_->time_base.num = 1;
|
||||
codec_ctx_->frame_number = 1;
|
||||
codec_ctx_->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
codec_ctx_->bit_rate = 0;
|
||||
codec_ctx_->time_base.den = 29;
|
||||
codec_ctx_->width = 1280;
|
||||
codec_ctx_->height = 720;
|
||||
codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P; // yuv420 default?
|
||||
codec_ctx_->color_range = AVCOL_RANGE_MPEG;
|
||||
|
||||
if (avcodec_open2(codec_ctx_, codec_, NULL) < 0) {
|
||||
printf("Could not open codec\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
frame_ = av_frame_alloc();
|
||||
frame_nv12_ = av_frame_alloc();
|
||||
|
||||
packet_ = av_packet_alloc();
|
||||
|
||||
img_convert_ctx =
|
||||
sws_getContext(1280, 720, AV_PIX_FMT_YUV420P, 1280, 720, AV_PIX_FMT_NV12,
|
||||
SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
|
||||
|
||||
decoded_frame_ = new VideoFrame(1280 * 720 * 3 / 2);
|
||||
|
||||
if (SAVE_RECEIVED_H264_STREAM) {
|
||||
file_h264_ = fopen("received_h264_stream.h264", "w+b");
|
||||
if (!file_h264_) {
|
||||
LOG_WARN("Fail to open received_h264_stream.h264");
|
||||
}
|
||||
}
|
||||
|
||||
if (SAVE_DECODED_NV12_STREAM) {
|
||||
file_nv12_ = fopen("decoded_nv12_stream.yuv", "w+b");
|
||||
if (!file_nv12_) {
|
||||
LOG_WARN("Fail to open decoded_nv12_stream.yuv");
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int FfmpegVideoDecoder::Decode(
|
||||
const uint8_t *data, int size,
|
||||
std::function<void(VideoFrame)> on_receive_decoded_frame) {
|
||||
if (!first_) {
|
||||
if ((*(data + 4) & 0x1f) != 0x07) {
|
||||
return -1;
|
||||
} else {
|
||||
first_ = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (SAVE_RECEIVED_H264_STREAM) {
|
||||
fwrite((unsigned char *)data, 1, size, file_h264_);
|
||||
}
|
||||
|
||||
packet_->data = (uint8_t *)data;
|
||||
packet_->size = size;
|
||||
|
||||
int ret = avcodec_send_packet(codec_ctx_, packet_);
|
||||
av_packet_unref(packet_);
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_frame(codec_ctx_, frame_);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
continue;
|
||||
} else if (ret < 0) {
|
||||
LOG_ERROR("Error receive decoding video frame ret=%d", ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (on_receive_decoded_frame) {
|
||||
// uint64_t start_ts = static_cast<uint64_t>(
|
||||
// std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
// std::chrono::high_resolution_clock::now().time_since_epoch())
|
||||
// .count());
|
||||
|
||||
if (1) {
|
||||
av_image_fill_arrays(frame_nv12_->data, frame_nv12_->linesize,
|
||||
decoded_frame_->GetBuffer(), AV_PIX_FMT_NV12,
|
||||
frame_->width, frame_->height, 1);
|
||||
|
||||
sws_scale(img_convert_ctx, frame_->data, frame_->linesize, 0,
|
||||
frame_->height, frame_nv12_->data, frame_nv12_->linesize);
|
||||
} else {
|
||||
memcpy(decoded_frame_->GetBuffer(), frame_->data[0],
|
||||
frame_->width * frame_->height);
|
||||
memcpy(decoded_frame_->GetBuffer() + frame_->width * frame_->height,
|
||||
frame_->data[1], frame_->width * frame_->height / 2);
|
||||
}
|
||||
|
||||
// uint64_t now_ts = static_cast<uint64_t>(
|
||||
// std::chrono::duration_cast<std::chrono::microseconds>(
|
||||
// std::chrono::high_resolution_clock::now().time_since_epoch())
|
||||
// .count());
|
||||
|
||||
// LOG_ERROR("cost {}", now_ts - start_ts);
|
||||
|
||||
on_receive_decoded_frame(*decoded_frame_);
|
||||
if (SAVE_DECODED_NV12_STREAM) {
|
||||
fwrite((unsigned char *)decoded_frame_->Buffer(), 1,
|
||||
decoded_frame_->Size(), file_nv12_);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
#ifndef _FFMPEG_VIDEO_DECODER_H_
|
||||
#define _FFMPEG_VIDEO_DECODER_H_
|
||||
|
||||
#ifdef _WIN32
|
||||
extern "C" {
|
||||
#include "libavcodec/avcodec.h"
|
||||
};
|
||||
#else
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
#include <libavcodec/avcodec.h>
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#include <functional>
|
||||
|
||||
#include "video_decoder.h"
|
||||
|
||||
class FfmpegVideoDecoder : public VideoDecoder {
|
||||
public:
|
||||
FfmpegVideoDecoder();
|
||||
virtual ~FfmpegVideoDecoder();
|
||||
|
||||
public:
|
||||
int Init();
|
||||
int Decode(const uint8_t *data, int size,
|
||||
std::function<void(VideoFrame)> on_receive_decoded_frame);
|
||||
|
||||
private:
|
||||
AVCodecID codec_id_;
|
||||
const AVCodec *codec_;
|
||||
AVCodecContext *codec_ctx_ = nullptr;
|
||||
AVPacket *packet_ = nullptr;
|
||||
AVFrame *frame_ = nullptr;
|
||||
AVFrame *frame_nv12_ = nullptr;
|
||||
struct SwsContext *img_convert_ctx = nullptr;
|
||||
|
||||
VideoFrame *decoded_frame_ = nullptr;
|
||||
|
||||
FILE *file_h264_ = nullptr;
|
||||
FILE *file_nv12_ = nullptr;
|
||||
bool first_ = false;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -2,23 +2,9 @@
|
||||
|
||||
#include <cstring>
|
||||
|
||||
#include "libyuv.h"
|
||||
#include "log.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavdevice/avdevice.h>
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
};
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
|
||||
#define SAVE_NV12_STREAM 0
|
||||
#define SAVE_H264_STREAM 0
|
||||
|
||||
@@ -53,30 +39,6 @@ void CopyYUVWithStride(uint8_t *srcY, uint8_t *srcU, uint8_t *srcV, int width,
|
||||
}
|
||||
}
|
||||
|
||||
int YUV420ToNV12PFFmpeg(unsigned char *src_buffer, int width, int height,
|
||||
unsigned char *dst_buffer) {
|
||||
AVFrame *Input_pFrame = av_frame_alloc();
|
||||
AVFrame *Output_pFrame = av_frame_alloc();
|
||||
struct SwsContext *img_convert_ctx = sws_getContext(
|
||||
width, height, AV_PIX_FMT_YUV420P, 1280, 720, AV_PIX_FMT_NV12,
|
||||
SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
|
||||
|
||||
av_image_fill_arrays(Input_pFrame->data, Input_pFrame->linesize, src_buffer,
|
||||
AV_PIX_FMT_YUV420P, width, height, 1);
|
||||
av_image_fill_arrays(Output_pFrame->data, Output_pFrame->linesize, dst_buffer,
|
||||
AV_PIX_FMT_NV12, 1280, 720, 1);
|
||||
|
||||
sws_scale(img_convert_ctx, (uint8_t const **)Input_pFrame->data,
|
||||
Input_pFrame->linesize, 0, height, Output_pFrame->data,
|
||||
Output_pFrame->linesize);
|
||||
|
||||
if (Input_pFrame) av_free(Input_pFrame);
|
||||
if (Output_pFrame) av_free(Output_pFrame);
|
||||
if (img_convert_ctx) sws_freeContext(img_convert_ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
OpenH264Decoder::OpenH264Decoder() {}
|
||||
OpenH264Decoder::~OpenH264Decoder() {
|
||||
if (openh264_decoder_) {
|
||||
@@ -175,8 +137,16 @@ int OpenH264Decoder::Decode(
|
||||
fwrite((unsigned char *)decoded_frame_, 1,
|
||||
frame_width_ * frame_height_ * 3 / 2, nv12_stream_);
|
||||
}
|
||||
YUV420ToNV12PFFmpeg(decoded_frame_, frame_width_, frame_height_,
|
||||
nv12_frame_);
|
||||
|
||||
libyuv::I420ToNV12(
|
||||
(const uint8_t *)decoded_frame_, frame_width_,
|
||||
(const uint8_t *)decoded_frame_ + frame_width_ * frame_height_,
|
||||
frame_width_ / 2,
|
||||
(const uint8_t *)decoded_frame_ +
|
||||
frame_width_ * frame_height_ * 3 / 2,
|
||||
frame_width_ / 2, nv12_frame_, frame_width_,
|
||||
nv12_frame_ + frame_width_ * frame_height_, frame_width_,
|
||||
frame_width_, frame_height_);
|
||||
|
||||
VideoFrame decoded_frame(nv12_frame_,
|
||||
frame_width_ * frame_height_ * 3 / 2,
|
||||
|
||||
@@ -2,11 +2,9 @@
|
||||
|
||||
#if __APPLE__
|
||||
#include "dav1d/dav1d_av1_decoder.h"
|
||||
#include "ffmpeg/ffmpeg_video_decoder.h"
|
||||
#include "openh264/openh264_decoder.h"
|
||||
#else
|
||||
#include "dav1d/dav1d_av1_decoder.h"
|
||||
#include "ffmpeg/ffmpeg_video_decoder.h"
|
||||
#include "nvcodec/nvidia_video_decoder.h"
|
||||
#include "openh264/openh264_decoder.h"
|
||||
#endif
|
||||
@@ -24,7 +22,6 @@ std::unique_ptr<VideoDecoder> VideoDecoderFactory::CreateVideoDecoder(
|
||||
} else {
|
||||
#if __APPLE__
|
||||
return std::make_unique<OpenH264Decoder>(OpenH264Decoder());
|
||||
// return std::make_unique<FfmpegVideoDecoder>(FfmpegVideoDecoder());
|
||||
#else
|
||||
if (hardware_acceleration) {
|
||||
if (CheckIsHardwareAccerlerationSupported()) {
|
||||
@@ -33,7 +30,6 @@ std::unique_ptr<VideoDecoder> VideoDecoderFactory::CreateVideoDecoder(
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
// return std::make_unique<FfmpegVideoDecoder>(FfmpegVideoDecoder());
|
||||
return std::make_unique<OpenH264Decoder>(OpenH264Decoder());
|
||||
}
|
||||
#endif
|
||||
|
||||
@@ -8,8 +8,7 @@
|
||||
#define SAVE_RECEIVED_NV12_STREAM 0
|
||||
#define SAVE_ENCODED_AV1_STREAM 0
|
||||
|
||||
#define YUV420P_BUFFER_SIZE 1280 * 720 * 3 / 2
|
||||
static unsigned char yuv420p_buffer[YUV420P_BUFFER_SIZE];
|
||||
#define NV12_BUFFER_SIZE 1280 * 720 * 3 / 2
|
||||
|
||||
#define SET_ENCODER_PARAM_OR_RETURN_ERROR(param_id, param_value) \
|
||||
do { \
|
||||
@@ -106,7 +105,7 @@ AomAv1Encoder::~AomAv1Encoder() {
|
||||
}
|
||||
|
||||
int AomAv1Encoder::Init() {
|
||||
encoded_frame_ = new uint8_t[YUV420P_BUFFER_SIZE];
|
||||
encoded_frame_ = new uint8_t[NV12_BUFFER_SIZE];
|
||||
|
||||
// Initialize encoder configuration structure with default values
|
||||
aom_codec_err_t ret = aom_codec_enc_config_default(
|
||||
|
||||
@@ -1,228 +0,0 @@
|
||||
#include "ffmpeg_video_encoder.h"
|
||||
|
||||
#include <chrono>
|
||||
|
||||
#include "log.h"
|
||||
|
||||
#define SAVE_RECEIVED_NV12_STREAM 0
|
||||
#define SAVE_ENCODED_H264_STREAM 0
|
||||
|
||||
#define YUV420P_BUFFER_SIZE 1280 * 720 * 3 / 2
|
||||
static unsigned char yuv420p_buffer[YUV420P_BUFFER_SIZE];
|
||||
|
||||
static int NV12ToYUV420PFFmpeg(unsigned char *src_buffer, int width, int height,
|
||||
unsigned char *dst_buffer) {
|
||||
AVFrame *Input_pFrame = av_frame_alloc();
|
||||
AVFrame *Output_pFrame = av_frame_alloc();
|
||||
struct SwsContext *img_convert_ctx = sws_getContext(
|
||||
width, height, AV_PIX_FMT_NV12, 1280, 720, AV_PIX_FMT_YUV420P,
|
||||
SWS_FAST_BILINEAR, nullptr, nullptr, nullptr);
|
||||
|
||||
av_image_fill_arrays(Input_pFrame->data, Input_pFrame->linesize, src_buffer,
|
||||
AV_PIX_FMT_NV12, width, height, 1);
|
||||
av_image_fill_arrays(Output_pFrame->data, Output_pFrame->linesize, dst_buffer,
|
||||
AV_PIX_FMT_YUV420P, 1280, 720, 1);
|
||||
|
||||
sws_scale(img_convert_ctx, (uint8_t const **)Input_pFrame->data,
|
||||
Input_pFrame->linesize, 0, height, Output_pFrame->data,
|
||||
Output_pFrame->linesize);
|
||||
|
||||
if (Input_pFrame) av_free(Input_pFrame);
|
||||
if (Output_pFrame) av_free(Output_pFrame);
|
||||
if (img_convert_ctx) sws_freeContext(img_convert_ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
FFmpegVideoEncoder::FFmpegVideoEncoder() {}
|
||||
FFmpegVideoEncoder::~FFmpegVideoEncoder() {
|
||||
if (SAVE_RECEIVED_NV12_STREAM && file_nv12_) {
|
||||
fflush(file_nv12_);
|
||||
fclose(file_nv12_);
|
||||
file_nv12_ = nullptr;
|
||||
}
|
||||
|
||||
if (SAVE_ENCODED_H264_STREAM && file_h264_) {
|
||||
fflush(file_h264_);
|
||||
fclose(file_h264_);
|
||||
file_h264_ = nullptr;
|
||||
}
|
||||
|
||||
if (nv12_data_) {
|
||||
free(nv12_data_);
|
||||
nv12_data_ = nullptr;
|
||||
}
|
||||
|
||||
if (packet_) {
|
||||
av_packet_free(&packet_);
|
||||
}
|
||||
}
|
||||
|
||||
int FFmpegVideoEncoder::Init() {
|
||||
av_log_set_level(AV_LOG_ERROR);
|
||||
|
||||
codec_ = avcodec_find_encoder(AV_CODEC_ID_H264);
|
||||
if (!codec_) {
|
||||
LOG_ERROR("Failed to find H.264 encoder");
|
||||
return -1;
|
||||
} else {
|
||||
#ifdef __linux__
|
||||
if (0 != strcmp(codec_->name, "openh264")) {
|
||||
use_openh264_ = true;
|
||||
LOG_INFO("Use H264 encoder [OpenH264]");
|
||||
}
|
||||
#else
|
||||
LOG_INFO("Use H264 encoder [{}]", codec_->name);
|
||||
#endif
|
||||
}
|
||||
// use_openh264_ = true;
|
||||
|
||||
codec_ctx_ = avcodec_alloc_context3(codec_);
|
||||
if (!codec_ctx_) {
|
||||
LOG_ERROR("Failed to allocate codec context");
|
||||
return -1;
|
||||
}
|
||||
|
||||
codec_ctx_->codec_id = AV_CODEC_ID_H264;
|
||||
codec_ctx_->codec_type = AVMEDIA_TYPE_VIDEO;
|
||||
codec_ctx_->width = frame_width_;
|
||||
codec_ctx_->height = frame_height;
|
||||
codec_ctx_->time_base.num = 1;
|
||||
codec_ctx_->time_base.den = fps_;
|
||||
if (use_openh264_) {
|
||||
codec_ctx_->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
} else {
|
||||
codec_ctx_->pix_fmt = AV_PIX_FMT_NV12;
|
||||
}
|
||||
codec_ctx_->gop_size = keyFrameInterval_;
|
||||
codec_ctx_->keyint_min = keyFrameInterval_;
|
||||
codec_ctx_->max_b_frames = 0;
|
||||
codec_ctx_->bit_rate = maxBitrate_ * 2000;
|
||||
codec_ctx_->qmin = 15;
|
||||
codec_ctx_->qmax = 35;
|
||||
codec_ctx_->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
codec_ctx_->flags2 |= AV_CODEC_FLAG2_LOCAL_HEADER;
|
||||
|
||||
// av_opt_set_int(codec_ctx_->priv_data, "qp", 51, 0);
|
||||
// av_opt_set_int(codec_ctx_->priv_data, "crf", 23, 0);
|
||||
|
||||
if (!use_openh264_) {
|
||||
av_opt_set(codec_ctx_->priv_data, "profile", "baseline", 0);
|
||||
}
|
||||
av_opt_set(codec_ctx_->priv_data, "preset", "ultrafast", 0);
|
||||
av_opt_set(codec_ctx_->priv_data, "tune", "zerolatency", 0);
|
||||
|
||||
if (avcodec_open2(codec_ctx_, codec_, nullptr) < 0) {
|
||||
LOG_ERROR("Failed to open codec");
|
||||
return -1;
|
||||
}
|
||||
|
||||
frame_ = av_frame_alloc();
|
||||
frame_->format = codec_ctx_->pix_fmt;
|
||||
frame_->width = codec_ctx_->width;
|
||||
frame_->height = codec_ctx_->height;
|
||||
|
||||
int ret = av_frame_get_buffer(frame_, 0);
|
||||
if (ret < 0) {
|
||||
LOG_ERROR("Could not allocate the raw frame");
|
||||
return -1;
|
||||
}
|
||||
|
||||
packet_ = av_packet_alloc();
|
||||
|
||||
if (SAVE_RECEIVED_NV12_STREAM) {
|
||||
file_nv12_ = fopen("received_nv12_stream.yuv", "w+b");
|
||||
if (!file_nv12_) {
|
||||
LOG_WARN("Fail to open received_nv12_stream.yuv");
|
||||
}
|
||||
}
|
||||
|
||||
if (SAVE_ENCODED_H264_STREAM) {
|
||||
file_h264_ = fopen("encoded_h264_stream.h264", "w+b");
|
||||
if (!file_h264_) {
|
||||
LOG_WARN("Fail to open encoded_h264_stream.h264");
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int FFmpegVideoEncoder::Encode(
|
||||
const uint8_t *pData, int nSize,
|
||||
std::function<int(char *encoded_packets, size_t size,
|
||||
VideoFrameType frame_type)>
|
||||
on_encoded_image) {
|
||||
if (!codec_ctx_) {
|
||||
LOG_ERROR("Invalid codec context");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (use_openh264_) {
|
||||
NV12ToYUV420PFFmpeg((unsigned char *)pData, frame_->width, frame_->height,
|
||||
(unsigned char *)yuv420p_buffer);
|
||||
|
||||
frame_->data[0] = yuv420p_buffer;
|
||||
frame_->data[1] = yuv420p_buffer + frame_->width * frame_->height;
|
||||
frame_->data[2] = yuv420p_buffer + frame_->width * frame_->height * 5 / 4;
|
||||
|
||||
if (SAVE_RECEIVED_NV12_STREAM) {
|
||||
fwrite(yuv420p_buffer, 1, nSize, file_nv12_);
|
||||
}
|
||||
} else {
|
||||
memcpy(frame_->data[0], pData, frame_->width * frame_->height);
|
||||
memcpy(frame_->data[1], pData + frame_->width * frame_->height,
|
||||
frame_->width * frame_->height / 2);
|
||||
|
||||
if (SAVE_RECEIVED_NV12_STREAM) {
|
||||
fwrite(pData, 1, nSize, file_nv12_);
|
||||
}
|
||||
}
|
||||
|
||||
frame_->pts = pts_++;
|
||||
|
||||
int ret = avcodec_send_frame(codec_ctx_, frame_);
|
||||
|
||||
// frame_->pict_type = AV_PICTURE_TYPE_I;
|
||||
VideoFrameType frame_type;
|
||||
if (0 == seq_++ % 300) {
|
||||
frame_type = VideoFrameType::kVideoFrameKey;
|
||||
} else {
|
||||
frame_type = VideoFrameType::kVideoFrameDelta;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
ret = avcodec_receive_packet(codec_ctx_, packet_);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
return 0;
|
||||
} else if (ret < 0) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
// Remove first 6 bytes in I frame, SEI ?
|
||||
if (0x00 == packet_->data[0] && 0x00 == packet_->data[1] &&
|
||||
0x00 == packet_->data[2] && 0x01 == packet_->data[3] &&
|
||||
0x09 == packet_->data[4] && 0x10 == packet_->data[5]) {
|
||||
packet_->data += 6;
|
||||
packet_->size -= 6;
|
||||
}
|
||||
|
||||
if (on_encoded_image) {
|
||||
on_encoded_image((char *)packet_->data, packet_->size, frame_type);
|
||||
if (SAVE_ENCODED_H264_STREAM) {
|
||||
fwrite(packet_->data, 1, packet_->size, file_h264_);
|
||||
}
|
||||
} else {
|
||||
OnEncodedImage((char *)packet_->data, packet_->size);
|
||||
}
|
||||
av_packet_unref(packet_);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int FFmpegVideoEncoder::OnEncodedImage(char *encoded_packets, size_t size) {
|
||||
LOG_INFO("OnEncodedImage not implemented");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void FFmpegVideoEncoder::ForceIdr() { frame_->pict_type = AV_PICTURE_TYPE_I; }
|
||||
@@ -1,72 +0,0 @@
|
||||
#ifndef _FFMPEG_VIDEO_ENCODER_H_
|
||||
#define _FFMPEG_VIDEO_ENCODER_H_
|
||||
|
||||
#ifdef _WIN32
|
||||
extern "C" {
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libswscale/swscale.h"
|
||||
}
|
||||
#else
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
extern "C" {
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavdevice/avdevice.h>
|
||||
#include <libavfilter/avfilter.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
};
|
||||
#ifdef __cplusplus
|
||||
};
|
||||
#endif
|
||||
#endif
|
||||
#include <functional>
|
||||
#include <vector>
|
||||
|
||||
#include "video_encoder.h"
|
||||
|
||||
class FFmpegVideoEncoder : public VideoEncoder {
|
||||
public:
|
||||
FFmpegVideoEncoder();
|
||||
virtual ~FFmpegVideoEncoder();
|
||||
|
||||
int Init();
|
||||
int Encode(const uint8_t* pData, int nSize,
|
||||
std::function<int(char* encoded_packets, size_t size,
|
||||
VideoFrameType frame_type)>
|
||||
on_encoded_image);
|
||||
|
||||
virtual int OnEncodedImage(char* encoded_packets, size_t size);
|
||||
|
||||
void ForceIdr();
|
||||
|
||||
private:
|
||||
int frame_width_ = 1280;
|
||||
int frame_height = 720;
|
||||
int keyFrameInterval_ = 3000;
|
||||
int maxBitrate_ = 1000;
|
||||
int fps_ = 30;
|
||||
int max_payload_size_ = 3000;
|
||||
|
||||
std::vector<std::vector<uint8_t>> encoded_packets_;
|
||||
unsigned char* encoded_image_ = nullptr;
|
||||
FILE* file_h264_ = nullptr;
|
||||
FILE* file_nv12_ = nullptr;
|
||||
unsigned char* nv12_data_ = nullptr;
|
||||
unsigned int seq_ = 0;
|
||||
bool use_openh264_ = false;
|
||||
|
||||
const AVCodec* codec_ = nullptr;
|
||||
AVCodecContext* codec_ctx_ = nullptr;
|
||||
AVFrame* frame_ = nullptr;
|
||||
AVPacket* packet_ = nullptr;
|
||||
bool got_output_ = false;
|
||||
uint32_t pts_ = 0;
|
||||
};
|
||||
|
||||
#endif
|
||||
@@ -2,11 +2,9 @@
|
||||
|
||||
#if __APPLE__
|
||||
#include "aom/aom_av1_encoder.h"
|
||||
#include "ffmpeg/ffmpeg_video_encoder.h"
|
||||
#include "openh264/openh264_encoder.h"
|
||||
#else
|
||||
#include "aom/aom_av1_encoder.h"
|
||||
#include "ffmpeg/ffmpeg_video_encoder.h"
|
||||
#include "nvcodec/nvidia_video_encoder.h"
|
||||
#include "openh264/openh264_encoder.h"
|
||||
#endif
|
||||
@@ -23,7 +21,6 @@ std::unique_ptr<VideoEncoder> VideoEncoderFactory::CreateVideoEncoder(
|
||||
return std::make_unique<AomAv1Encoder>(AomAv1Encoder());
|
||||
} else {
|
||||
#if __APPLE__
|
||||
// return std::make_unique<FFmpegVideoEncoder>(FFmpegVideoEncoder());
|
||||
return std::make_unique<OpenH264Encoder>(OpenH264Encoder());
|
||||
#else
|
||||
if (hardware_acceleration) {
|
||||
@@ -33,7 +30,6 @@ std::unique_ptr<VideoEncoder> VideoEncoderFactory::CreateVideoEncoder(
|
||||
return nullptr;
|
||||
}
|
||||
} else {
|
||||
// return std::make_unique<FFmpegVideoEncoder>(FFmpegVideoEncoder());
|
||||
return std::make_unique<OpenH264Encoder>(OpenH264Encoder());
|
||||
}
|
||||
#endif
|
||||
|
||||
24
xmake.lua
24
xmake.lua
@@ -17,28 +17,25 @@ add_packages("asio", "nlohmann_json", "spdlog", "openfec", "libopus", "dav1d", "
|
||||
includes("thirdparty")
|
||||
|
||||
if is_os("windows") then
|
||||
add_requires("vcpkg::ffmpeg 5.1.2", {configs = {shared = false}})
|
||||
add_requires("vcpkg::libnice 0.1.21")
|
||||
add_requires("openh264 2.1.1", {configs = {shared = false}})
|
||||
add_requires("vcpkg::aom")
|
||||
add_packages("vcpkg::ffmpeg", "vcpkg::libnice", "openh264", "vcpkg::aom", "cuda")
|
||||
add_packages("vcpkg::libnice", "openh264", "vcpkg::aom", "cuda")
|
||||
add_defines("_WEBSOCKETPP_CPP11_INTERNAL_")
|
||||
add_requires("cuda")
|
||||
elseif is_os("linux") then
|
||||
add_requires("ffmpeg 5.1.2", {system = false})
|
||||
add_requires("glib", {system = true})
|
||||
add_requires("vcpkg::libnice 0.1.21")
|
||||
add_requires("openh264 2.1.1", {configs = {shared = false}})
|
||||
add_requires("vcpkg::aom")
|
||||
add_packages("ffmpeg", "glib", "vcpkg::libnice", "openh264", "cuda")
|
||||
add_packages("glib", "vcpkg::libnice", "openh264", "cuda")
|
||||
add_cxflags("-fPIC")
|
||||
add_syslinks("pthread")
|
||||
elseif is_os("macosx") then
|
||||
add_requires("ffmpeg 5.1.2", {system = false})
|
||||
add_requires("vcpkg::libnice", {configs = {shared = false}})
|
||||
add_requires("vcpkg::openh264", {configs = {shared = false}})
|
||||
add_requires("vcpkg::aom")
|
||||
add_packages("ffmpeg", "vcpkg::libnice", "vcpkg::openh264", "vcpkg::aom")
|
||||
add_packages("vcpkg::libnice", "vcpkg::openh264", "vcpkg::aom")
|
||||
add_ldflags("-Wl,-ld_classic")
|
||||
end
|
||||
|
||||
@@ -119,8 +116,6 @@ target("media")
|
||||
"src/media/video/decode/*.cpp",
|
||||
"src/media/video/encode/nvcodec/*.cpp",
|
||||
"src/media/video/decode/nvcodec/*.cpp",
|
||||
"src/media/video/encode/ffmpeg/*.cpp",
|
||||
"src/media/video/decode/ffmpeg/*.cpp",
|
||||
"src/media/video/encode/openh264/*.cpp",
|
||||
"src/media/video/decode/openh264/*.cpp",
|
||||
"src/media/video/encode/aom/*.cpp",
|
||||
@@ -129,8 +124,6 @@ target("media")
|
||||
"src/media/video/decode",
|
||||
"src/media/video/encode/nvcodec",
|
||||
"src/media/video/decode/nvcodec",
|
||||
"src/media/video/encode/ffmpeg",
|
||||
"src/media/video/decode/ffmpeg",
|
||||
"src/media/video/encode/openh264",
|
||||
"src/media/video/decode/openh264",
|
||||
"src/media/video/encode/aom",
|
||||
@@ -142,8 +135,6 @@ target("media")
|
||||
"src/media/video/decode/*.cpp",
|
||||
"src/media/video/encode/nvcodec/*.cpp",
|
||||
"src/media/video/decode/nvcodec/*.cpp",
|
||||
"src/media/video/encode/ffmpeg/*.cpp",
|
||||
"src/media/video/decode/ffmpeg/*.cpp",
|
||||
"src/media/video/encode/openh264/*.cpp",
|
||||
"src/media/video/decode/openh264/*.cpp",
|
||||
"src/media/video/encode/aom/*.cpp",
|
||||
@@ -152,8 +143,6 @@ target("media")
|
||||
"src/media/video/decode",
|
||||
"src/media/video/encode/nvcodec",
|
||||
"src/media/video/decode/nvcodec",
|
||||
"src/media/video/encode/ffmpeg",
|
||||
"src/media/video/decode/ffmpeg",
|
||||
"src/media/video/encode/openh264",
|
||||
"src/media/video/decode/openh264",
|
||||
"src/media/video/encode/aom",
|
||||
@@ -163,16 +152,12 @@ target("media")
|
||||
elseif is_os("macosx") then
|
||||
add_files("src/media/video/encode/*.cpp",
|
||||
"src/media/video/decode/*.cpp",
|
||||
"src/media/video/encode/ffmpeg/*.cpp",
|
||||
"src/media/video/decode/ffmpeg/*.cpp",
|
||||
"src/media/video/encode/openh264/*.cpp",
|
||||
"src/media/video/decode/openh264/*.cpp",
|
||||
"src/media/video/encode/aom/*.cpp",
|
||||
"src/media/video/decode/dav1d/*.cpp")
|
||||
add_includedirs("src/media/video/encode",
|
||||
"src/media/video/decode",
|
||||
"src/media/video/encode/ffmpeg",
|
||||
"src/media/video/decode/ffmpeg",
|
||||
"src/media/video/encode/openh264",
|
||||
"src/media/video/decode/openh264",
|
||||
"src/media/video/encode/aom",
|
||||
@@ -197,8 +182,7 @@ target("transmission")
|
||||
|
||||
target("pc")
|
||||
set_kind("object")
|
||||
add_deps("log")
|
||||
add_deps("ws", "ice", "transmission", "inih", "common", "media")
|
||||
add_deps("log", "ws", "ice", "transmission", "inih", "common", "media")
|
||||
add_files("src/pc/*.cpp")
|
||||
add_includedirs("src/transmission", "src/interface", {public = true})
|
||||
|
||||
|
||||
Reference in New Issue
Block a user