diff --git a/src/common/copy_on_write_buffer.h b/src/common/copy_on_write_buffer.h index eaf70c4..95ca1b5 100644 --- a/src/common/copy_on_write_buffer.h +++ b/src/common/copy_on_write_buffer.h @@ -16,6 +16,10 @@ class CopyOnWriteBuffer { CopyOnWriteBuffer(size_t size) { buffer_ = std::make_shared>(size); } + CopyOnWriteBuffer(const uint8_t* data, size_t size) { + buffer_ = std::make_shared>(data, data + size); + } + CopyOnWriteBuffer(const CopyOnWriteBuffer& other) = default; CopyOnWriteBuffer(CopyOnWriteBuffer&& other) noexcept = default; CopyOnWriteBuffer& operator=(const CopyOnWriteBuffer& other) = default; diff --git a/src/common/task_queue.h b/src/common/task_queue.h index 1736650..77c55fa 100644 --- a/src/common/task_queue.h +++ b/src/common/task_queue.h @@ -20,8 +20,13 @@ class TaskQueue { public: - TaskQueue(size_t numThreads = 1) - : stop_(false), workers_(), taskQueue_(), mutex_(), cond_var_() { + TaskQueue(std::string task_name, size_t numThreads = 1) + : task_name_(task_name), + stop_(false), + workers_(), + taskQueue_(), + mutex_(), + cond_var_() { for (size_t i = 0; i < numThreads; i++) { workers_.emplace_back([this]() { this->WorkerThread(); }); } @@ -40,12 +45,10 @@ class TaskQueue { } } - // 立即执行任务 void PostTask(AnyInvocable task) { PostDelayedTask(std::move(task), 0); } - // 延迟执行任务 void PostDelayedTask(AnyInvocable task, int delay_ms) { auto execute_time = std::chrono::steady_clock::now() + std::chrono::milliseconds(delay_ms); @@ -92,10 +95,11 @@ class TaskQueue { const_cast &>(taskQueue_.top().task)); taskQueue_.pop(); } - task(); // 执行任务 + task(); } } + std::string task_name_; std::vector workers_; std::priority_queue, std::greater<>> taskQueue_; diff --git a/src/frame/decoded_frame.h b/src/frame/decoded_frame.h index 75b0c4f..1f0138a 100644 --- a/src/frame/decoded_frame.h +++ b/src/frame/decoded_frame.h @@ -14,9 +14,9 @@ class DecodedFrame : public VideoFrame { DecodedFrame(const uint8_t *buffer, size_t size, uint32_t width, uint32_t height) : VideoFrame(buffer, size, width, height) {} + DecodedFrame(const uint8_t *buffer, size_t size) : VideoFrame(buffer, size) {} DecodedFrame(size_t size, uint32_t width, uint32_t height) : VideoFrame(size, width, height) {} - DecodedFrame(const uint8_t *buffer, size_t size) : VideoFrame(buffer, size) {} DecodedFrame() = default; ~DecodedFrame() = default; diff --git a/src/frame/encoded_frame.h b/src/frame/encoded_frame.h index 5187d32..74cf68c 100644 --- a/src/frame/encoded_frame.h +++ b/src/frame/encoded_frame.h @@ -14,9 +14,9 @@ class EncodedFrame : public VideoFrame { EncodedFrame(const uint8_t *buffer, size_t size, uint32_t width, uint32_t height) : VideoFrame(buffer, size, width, height) {} + EncodedFrame(const uint8_t *buffer, size_t size) : VideoFrame(buffer, size) {} EncodedFrame(size_t size, uint32_t width, uint32_t height) : VideoFrame(size, width, height) {} - EncodedFrame(const uint8_t *buffer, size_t size) : VideoFrame(buffer, size) {} EncodedFrame() = default; ~EncodedFrame() = default; diff --git a/src/frame/raw_frame.cpp b/src/frame/raw_frame.cpp new file mode 100644 index 0000000..e69de29 diff --git a/src/frame/raw_frame.h b/src/frame/raw_frame.h new file mode 100644 index 0000000..611e650 --- /dev/null +++ b/src/frame/raw_frame.h @@ -0,0 +1,32 @@ +/* + * @Author: DI JUNKUN + * @Date: 2025-03-25 + * Copyright (c) 2025 by DI JUNKUN, All Rights Reserved. + */ + +#ifndef _RAW_FRAME_H_ +#define _RAW_FRAME_H_ + +#include "video_frame.h" + +class RawFrame : public VideoFrame { + public: + RawFrame(const uint8_t *buffer, size_t size, uint32_t width, uint32_t height) + : VideoFrame(buffer, size, width, height) {} + RawFrame(size_t size, uint32_t width, uint32_t height) + : VideoFrame(size, width, height) {} + RawFrame(const uint8_t *buffer, size_t size) : VideoFrame(buffer, size) {} + RawFrame() = default; + ~RawFrame() = default; + + int64_t CapturedTimestamp() const { return captured_timestamp_us_; } + + void SetCapturedTimestamp(int64_t captured_timestamp_us) { + captured_timestamp_us_ = captured_timestamp_us; + } + + private: + int64_t captured_timestamp_us_ = 0; +}; + +#endif \ No newline at end of file diff --git a/src/frame/video_frame.cpp b/src/frame/video_frame.cpp index eb0426e..5c0aea3 100644 --- a/src/frame/video_frame.cpp +++ b/src/frame/video_frame.cpp @@ -5,94 +5,40 @@ VideoFrame::VideoFrame() {} -VideoFrame::VideoFrame(size_t size) { - buffer_ = new uint8_t[size]; +VideoFrame::VideoFrame(size_t size) : buffer_(size) { size_ = size; width_ = 0; height_ = 0; } -VideoFrame::VideoFrame(size_t size, uint32_t width, uint32_t height) { - buffer_ = new uint8_t[size]; +VideoFrame::VideoFrame(size_t size, uint32_t width, uint32_t height) + : buffer_(size) { size_ = size; width_ = width; height_ = height; } -VideoFrame::VideoFrame(const uint8_t *buffer, size_t size) { - buffer_ = new uint8_t[size]; - memcpy(buffer_, buffer, size); +VideoFrame::VideoFrame(const uint8_t *buffer, size_t size) + : buffer_(buffer, size) { size_ = size; width_ = 0; height_ = 0; } VideoFrame::VideoFrame(const uint8_t *buffer, size_t size, uint32_t width, - uint32_t height) { - buffer_ = new uint8_t[size]; - memcpy(buffer_, buffer, size); + uint32_t height) + : buffer_(buffer, size) { size_ = size; width_ = width; height_ = height; } -VideoFrame::VideoFrame(const VideoFrame &video_frame) { - if (video_frame.size_ > 0) { - buffer_ = new uint8_t[video_frame.size_]; - memcpy(buffer_, video_frame.buffer_, video_frame.size_); - size_ = video_frame.size_; - width_ = video_frame.width_; - height_ = video_frame.height_; - } -} +VideoFrame::VideoFrame(const VideoFrame &video_frame) = default; -VideoFrame::VideoFrame(VideoFrame &&video_frame) - : buffer_((uint8_t *)std::move(video_frame.buffer_)), - size_(video_frame.size_), - width_(video_frame.width_), - height_(video_frame.height_) { - video_frame.buffer_ = nullptr; - video_frame.size_ = 0; - video_frame.width_ = 0; - video_frame.height_ = 0; -} +VideoFrame::VideoFrame(VideoFrame &&video_frame) = default; -VideoFrame &VideoFrame::operator=(const VideoFrame &video_frame) { - if (&video_frame != this) { - if (buffer_) { - delete buffer_; - buffer_ = nullptr; - } - buffer_ = new uint8_t[video_frame.size_]; - memcpy(buffer_, video_frame.buffer_, video_frame.size_); - size_ = video_frame.size_; - width_ = video_frame.width_; - height_ = video_frame.height_; - } - return *this; -} +VideoFrame &VideoFrame::operator=(const VideoFrame &video_frame) = default; -VideoFrame &VideoFrame::operator=(VideoFrame &&video_frame) { - if (&video_frame != this) { - buffer_ = std::move(video_frame.buffer_); - video_frame.buffer_ = nullptr; - size_ = video_frame.size_; - video_frame.size_ = 0; - width_ = video_frame.width_; - video_frame.width_ = 0; - height_ = video_frame.height_; - video_frame.height_ = 0; - } - return *this; -} +VideoFrame &VideoFrame::operator=(VideoFrame &&video_frame) = default; -VideoFrame::~VideoFrame() { - if (buffer_) { - delete buffer_; - buffer_ = nullptr; - } - - size_ = 0; - width_ = 0; - height_ = 0; -} \ No newline at end of file +VideoFrame::~VideoFrame() = default; \ No newline at end of file diff --git a/src/frame/video_frame.h b/src/frame/video_frame.h index e48f0df..562e1eb 100644 --- a/src/frame/video_frame.h +++ b/src/frame/video_frame.h @@ -10,6 +10,8 @@ #include #include +#include "copy_on_write_buffer.h" + enum VideoFrameType { kEmptyFrame = 0, kVideoFrameKey = 3, @@ -20,8 +22,8 @@ class VideoFrame { public: VideoFrame(); VideoFrame(size_t size); - VideoFrame(size_t size, uint32_t width, uint32_t height); VideoFrame(const uint8_t *buffer, size_t size); + VideoFrame(size_t size, uint32_t width, uint32_t height); VideoFrame(const uint8_t *buffer, size_t size, uint32_t width, uint32_t height); VideoFrame(const VideoFrame &video_frame); @@ -32,7 +34,7 @@ class VideoFrame { ~VideoFrame(); public: - const uint8_t *Buffer() const { return buffer_; } + const uint8_t *Buffer() const { return buffer_.data(); } size_t Size() const { return size_; } uint32_t Width() const { return width_; } uint32_t Height() const { return height_; } @@ -42,7 +44,7 @@ class VideoFrame { void SetHeight(uint32_t height) { height_ = height; } private: - uint8_t *buffer_ = nullptr; + CopyOnWriteBuffer buffer_; size_t size_ = 0; uint32_t width_ = 0; uint32_t height_ = 0; diff --git a/src/media/video/decode/aom/aom_av1_decoder.cpp b/src/media/video/decode/aom/aom_av1_decoder.cpp index ed9d87e..85053be 100644 --- a/src/media/video/decode/aom/aom_av1_decoder.cpp +++ b/src/media/video/decode/aom/aom_av1_decoder.cpp @@ -26,7 +26,7 @@ AomAv1Decoder::~AomAv1Decoder() { #endif if (nv12_frame_) { - delete nv12_frame_; + delete[] nv12_frame_; nv12_frame_ = nullptr; } } @@ -120,41 +120,33 @@ int AomAv1Decoder::Decode( return -1; } - frame_width_ = img_->d_w; - frame_height_ = img_->d_h; + size_t nv12_size = img_->d_w * img_->d_h + img_->d_w * img_->d_h / 2; + std::vector nv12_data(nv12_size); - nv12_frame_size_ = frame_width_ * frame_height_ * 3 / 2; + uint8_t *y_data = nv12_data.data(); + memcpy(y_data, img_->planes[0], img_->d_w * img_->d_h); - if (!nv12_frame_) { - nv12_frame_capacity_ = nv12_frame_size_; - nv12_frame_ = - new DecodedFrame(nv12_frame_capacity_, frame_width_, frame_height_); + uint8_t *uv_data = nv12_data.data() + img_->d_w * img_->d_h; + uint8_t *u_plane = img_->planes[1]; + uint8_t *v_plane = img_->planes[2]; + + for (int i = 0; i < img_->d_w * img_->d_h / 2; i++) { + uv_data[2 * i] = u_plane[i]; + uv_data[2 * i + 1] = v_plane[i]; } - if (nv12_frame_capacity_ < nv12_frame_size_) { - nv12_frame_capacity_ = nv12_frame_size_; - delete nv12_frame_; - nv12_frame_ = - new DecodedFrame(nv12_frame_capacity_, frame_width_, frame_height_); - } + DecodedFrame decode_frame(nv12_data.data(), nv12_size, img_->d_w, + img_->d_h); - if (nv12_frame_->Size() != nv12_frame_size_ || - nv12_frame_->Width() != frame_width_ || - nv12_frame_->Height() != frame_height_) { - nv12_frame_->SetSize(nv12_frame_size_); - nv12_frame_->SetWidth(frame_width_); - nv12_frame_->SetHeight(frame_height_); - } - - nv12_frame_->SetReceivedTimestamp(received_frame.ReceivedTimestamp()); - nv12_frame_->SetCapturedTimestamp(received_frame.CapturedTimestamp()); - nv12_frame_->SetDecodedTimestamp(clock_->CurrentTime()); - on_receive_decoded_frame(*nv12_frame_); + decode_frame.SetReceivedTimestamp(received_frame.ReceivedTimestamp()); + decode_frame.SetCapturedTimestamp(received_frame.CapturedTimestamp()); + decode_frame.SetDecodedTimestamp(clock_->CurrentTime()); #ifdef SAVE_DECODED_NV12_STREAM - fwrite((unsigned char *)nv12_frame_->Buffer(), 1, nv12_frame_->Size(), + fwrite((unsigned char *)decode_frame.Buffer(), 1, decode_frame.Size(), file_nv12_); #endif + on_receive_decoded_frame(decode_frame); return 0; } diff --git a/src/media/video/decode/aom/aom_av1_decoder.h b/src/media/video/decode/aom/aom_av1_decoder.h index e487863..8199701 100644 --- a/src/media/video/decode/aom/aom_av1_decoder.h +++ b/src/media/video/decode/aom/aom_av1_decoder.h @@ -30,7 +30,7 @@ class AomAv1Decoder : public VideoDecoder { private: std::shared_ptr clock_ = nullptr; - DecodedFrame *nv12_frame_ = 0; + unsigned char *nv12_frame_ = 0; int nv12_frame_capacity_ = 0; int nv12_frame_size_ = 0; diff --git a/src/media/video/decode/dav1d/dav1d_av1_decoder.cpp b/src/media/video/decode/dav1d/dav1d_av1_decoder.cpp index c34f8b9..2a1c848 100644 --- a/src/media/video/decode/dav1d/dav1d_av1_decoder.cpp +++ b/src/media/video/decode/dav1d/dav1d_av1_decoder.cpp @@ -68,7 +68,7 @@ Dav1dAv1Decoder::~Dav1dAv1Decoder() { #endif if (nv12_frame_) { - delete nv12_frame_; + delete[] nv12_frame_; nv12_frame_ = nullptr; } } @@ -162,23 +162,13 @@ int Dav1dAv1Decoder::Decode( if (!nv12_frame_) { nv12_frame_capacity_ = nv12_frame_size_; - nv12_frame_ = - new DecodedFrame(nv12_frame_capacity_, frame_width_, frame_height_); + nv12_frame_ = new unsigned char[nv12_frame_capacity_]; } if (nv12_frame_capacity_ < nv12_frame_size_) { nv12_frame_capacity_ = nv12_frame_size_; - delete nv12_frame_; - nv12_frame_ = - new DecodedFrame(nv12_frame_capacity_, frame_width_, frame_height_); - } - - if (nv12_frame_->Size() != nv12_frame_size_ || - nv12_frame_->Width() != frame_width_ || - nv12_frame_->Height() != frame_height_) { - nv12_frame_->SetSize(nv12_frame_size_); - nv12_frame_->SetWidth(frame_width_); - nv12_frame_->SetHeight(frame_height_); + delete[] nv12_frame_; + nv12_frame_ = new unsigned char[nv12_frame_capacity_]; } if (0) { @@ -186,27 +176,29 @@ int Dav1dAv1Decoder::Decode( (unsigned char *)dav1d_picture.data[1], (unsigned char *)dav1d_picture.data[2], (int)dav1d_picture.stride[0], (int)dav1d_picture.stride[1], - (unsigned char *)nv12_frame_->Buffer(), frame_width_, - frame_height_); + nv12_frame_, frame_width_, frame_height_); } else { libyuv::I420ToNV12( (const uint8_t *)dav1d_picture.data[0], (int)dav1d_picture.stride[0], (const uint8_t *)dav1d_picture.data[1], (int)dav1d_picture.stride[1], (const uint8_t *)dav1d_picture.data[2], (int)dav1d_picture.stride[1], - (uint8_t *)nv12_frame_->Buffer(), frame_width_, - (uint8_t *)nv12_frame_->Buffer() + frame_width_ * frame_height_, - frame_width_, frame_width_, frame_height_); + (uint8_t *)nv12_frame_, frame_width_, + (uint8_t *)nv12_frame_ + frame_width_ * frame_height_, frame_width_, + frame_width_, frame_height_); } - nv12_frame_->SetReceivedTimestamp(received_frame.ReceivedTimestamp()); - nv12_frame_->SetCapturedTimestamp(received_frame.CapturedTimestamp()); - nv12_frame_->SetDecodedTimestamp(clock_->CurrentTime()); - on_receive_decoded_frame(*nv12_frame_); + DecodedFrame decoded_frame(nv12_frame_, nv12_frame_capacity_, frame_width_, + frame_height_); + + decoded_frame.SetReceivedTimestamp(received_frame.ReceivedTimestamp()); + decoded_frame.SetCapturedTimestamp(received_frame.CapturedTimestamp()); + decoded_frame.SetDecodedTimestamp(clock_->CurrentTime()); #ifdef SAVE_DECODED_NV12_STREAM - fwrite((unsigned char *)nv12_frame_->Buffer(), 1, nv12_frame_->Size(), + fwrite((unsigned char *)decoded_frame.Buffer(), 1, decoded_frame.Size(), file_nv12_); #endif + on_receive_decoded_frame(decoded_frame); return 0; } \ No newline at end of file diff --git a/src/media/video/decode/dav1d/dav1d_av1_decoder.h b/src/media/video/decode/dav1d/dav1d_av1_decoder.h index d4dfed9..ea517ff 100644 --- a/src/media/video/decode/dav1d/dav1d_av1_decoder.h +++ b/src/media/video/decode/dav1d/dav1d_av1_decoder.h @@ -28,7 +28,7 @@ class Dav1dAv1Decoder : public VideoDecoder { private: std::shared_ptr clock_ = nullptr; - DecodedFrame *nv12_frame_ = 0; + unsigned char *nv12_frame_ = 0; size_t nv12_frame_capacity_ = 0; size_t nv12_frame_size_ = 0; diff --git a/src/media/video/decode/openh264/openh264_decoder.cpp b/src/media/video/decode/openh264/openh264_decoder.cpp index b8543f1..9dbefa1 100644 --- a/src/media/video/decode/openh264/openh264_decoder.cpp +++ b/src/media/video/decode/openh264/openh264_decoder.cpp @@ -34,22 +34,6 @@ void CopyYuvWithStride(uint8_t *src_y, uint8_t *src_u, uint8_t *src_v, } } -void ConvertYuv420pToNv12(const unsigned char *yuv_data, - unsigned char *nv12_data, int width, int height) { - int y_size = width * height; - int uv_size = y_size / 4; - const unsigned char *y_data = yuv_data; - const unsigned char *u_data = y_data + y_size; - const unsigned char *v_data = u_data + uv_size; - - std::memcpy(nv12_data, y_data, y_size); - - for (int i = 0; i < uv_size; i++) { - nv12_data[y_size + i * 2] = u_data[i]; - nv12_data[y_size + i * 2 + 1] = v_data[i]; - } -} - OpenH264Decoder::OpenH264Decoder(std::shared_ptr clock) : clock_(clock) {} OpenH264Decoder::~OpenH264Decoder() { @@ -59,7 +43,7 @@ OpenH264Decoder::~OpenH264Decoder() { } if (nv12_frame_) { - delete nv12_frame_; + delete[] nv12_frame_; } if (yuv420p_frame_) { @@ -133,15 +117,19 @@ int OpenH264Decoder::Decode( fwrite((unsigned char *)data, 1, size, h264_stream_); #endif - if ((*(data + 4) & 0x1f) == 0x07) { - // LOG_WARN("Receive key frame"); + if (size > 4 && (*(data + 4) & 0x1f) == 0x07) { + // Key frame received } SBufferInfo sDstBufInfo; memset(&sDstBufInfo, 0, sizeof(SBufferInfo)); - openh264_decoder_->DecodeFrameNoDelay(data, (int)size, yuv420p_planes_, - &sDstBufInfo); + int ret = openh264_decoder_->DecodeFrameNoDelay( + data, (int)size, yuv420p_planes_, &sDstBufInfo); + if (ret != 0) { + LOG_ERROR("Failed to decode frame, error code: {}", ret); + return -1; + } frame_width_ = sDstBufInfo.UsrData.sSystemBuffer.iWidth; frame_height_ = sDstBufInfo.UsrData.sSystemBuffer.iHeight; @@ -161,23 +149,13 @@ int OpenH264Decoder::Decode( if (!nv12_frame_) { nv12_frame_capacity_ = yuv420p_frame_size_; - nv12_frame_ = - new DecodedFrame(nv12_frame_capacity_, frame_width_, frame_height_); + nv12_frame_ = new unsigned char[nv12_frame_capacity_]; } if (nv12_frame_capacity_ < yuv420p_frame_size_) { nv12_frame_capacity_ = yuv420p_frame_size_; - delete nv12_frame_; - nv12_frame_ = - new DecodedFrame(nv12_frame_capacity_, frame_width_, frame_height_); - } - - if (nv12_frame_->Size() != nv12_frame_size_ || - nv12_frame_->Width() != frame_width_ || - nv12_frame_->Height() != frame_height_) { - nv12_frame_->SetSize(nv12_frame_size_); - nv12_frame_->SetWidth(frame_width_); - nv12_frame_->SetHeight(frame_height_); + delete[] nv12_frame_; + nv12_frame_ = new unsigned char[nv12_frame_capacity_]; } if (sDstBufInfo.iBufferStatus == 1) { @@ -188,33 +166,29 @@ int OpenH264Decoder::Decode( sDstBufInfo.UsrData.sSystemBuffer.iHeight, sDstBufInfo.UsrData.sSystemBuffer.iStride[0], sDstBufInfo.UsrData.sSystemBuffer.iStride[1], - sDstBufInfo.UsrData.sSystemBuffer.iStride[1], yuv420p_frame_); + sDstBufInfo.UsrData.sSystemBuffer.iStride[2], yuv420p_frame_); - if (0) { - ConvertYuv420pToNv12(yuv420p_frame_, - (unsigned char *)nv12_frame_->Buffer(), - frame_width_, frame_height_); - } else { - libyuv::I420ToNV12( - (const uint8_t *)yuv420p_frame_, frame_width_, - (const uint8_t *)yuv420p_frame_ + frame_width_ * frame_height_, - frame_width_ / 2, - (const uint8_t *)yuv420p_frame_ + - frame_width_ * frame_height_ * 5 / 4, - frame_width_ / 2, (uint8_t *)nv12_frame_->Buffer(), frame_width_, - (uint8_t *)nv12_frame_->Buffer() + frame_width_ * frame_height_, - frame_width_, frame_width_, frame_height_); - } + libyuv::I420ToNV12( + (const uint8_t *)yuv420p_frame_, frame_width_, + (const uint8_t *)yuv420p_frame_ + frame_width_ * frame_height_, + frame_width_ / 2, + (const uint8_t *)yuv420p_frame_ + + frame_width_ * frame_height_ * 5 / 4, + frame_width_ / 2, (uint8_t *)nv12_frame_, frame_width_, + (uint8_t *)nv12_frame_ + frame_width_ * frame_height_, frame_width_, + frame_width_, frame_height_); - nv12_frame_->SetReceivedTimestamp(received_frame.ReceivedTimestamp()); - nv12_frame_->SetCapturedTimestamp(received_frame.CapturedTimestamp()); - nv12_frame_->SetDecodedTimestamp(clock_->CurrentTime()); - on_receive_decoded_frame(*nv12_frame_); + DecodedFrame decoded_frame(nv12_frame_, nv12_frame_capacity_, + frame_width_, frame_height_); + decoded_frame.SetReceivedTimestamp(received_frame.ReceivedTimestamp()); + decoded_frame.SetCapturedTimestamp(received_frame.CapturedTimestamp()); + decoded_frame.SetDecodedTimestamp(clock_->CurrentTime()); #ifdef SAVE_DECODED_NV12_STREAM - fwrite((unsigned char *)nv12_frame_->Buffer(), 1, nv12_frame_->Size(), + fwrite((unsigned char *)decoded_frame.Buffer(), 1, decoded_frame.Size(), nv12_stream_); #endif + on_receive_decoded_frame(decoded_frame); } } diff --git a/src/media/video/decode/openh264/openh264_decoder.h b/src/media/video/decode/openh264/openh264_decoder.h index 8350047..408ac48 100644 --- a/src/media/video/decode/openh264/openh264_decoder.h +++ b/src/media/video/decode/openh264/openh264_decoder.h @@ -43,11 +43,11 @@ class OpenH264Decoder : public VideoDecoder { unsigned char* yuv420p_planes_[3] = {nullptr, nullptr, nullptr}; unsigned char* yuv420p_frame_ = nullptr; + unsigned char* nv12_frame_ = nullptr; int yuv420p_frame_capacity_ = 0; int yuv420p_frame_size_ = 0; - DecodedFrame* nv12_frame_ = 0; - int nv12_frame_capacity_ = 0; + int nv12_frame_capacity_ = 0; int nv12_frame_size_ = 0; }; diff --git a/src/media/video/encode/aom/aom_av1_encoder.cpp b/src/media/video/encode/aom/aom_av1_encoder.cpp index 59615a1..4e507dc 100644 --- a/src/media/video/encode/aom/aom_av1_encoder.cpp +++ b/src/media/video/encode/aom/aom_av1_encoder.cpp @@ -268,30 +268,29 @@ int AomAv1Encoder::Init() { } int AomAv1Encoder::Encode( - const XVideoFrame *video_frame, - std::function encoded_frame)> - on_encoded_image) { + const RawFrame &raw_frame, + std::function on_encoded_image) { #ifdef SAVE_RECEIVED_NV12_STREAM - fwrite(video_frame->data, 1, video_frame->size, file_nv12_); + fwrite(raw_frame.Buffer(), 1, raw_frame.Size(), file_nv12_); #endif aom_codec_err_t ret = AOM_CODEC_OK; if (!encoded_frame_) { - encoded_frame_capacity_ = video_frame->size; + encoded_frame_capacity_ = raw_frame.Size(); encoded_frame_ = new uint8_t[encoded_frame_capacity_]; } - if (encoded_frame_capacity_ < video_frame->size) { - encoded_frame_capacity_ = video_frame->size; + if (encoded_frame_capacity_ < raw_frame.Size()) { + encoded_frame_capacity_ = raw_frame.Size(); delete[] encoded_frame_; encoded_frame_ = new uint8_t[encoded_frame_capacity_]; } - if (video_frame->width != frame_width_ || - video_frame->height != frame_height_) { + if (raw_frame.Width() != frame_width_ || + raw_frame.Height() != frame_height_) { if (AOM_CODEC_OK != - ResetEncodeResolution(video_frame->width, video_frame->height)) { + ResetEncodeResolution(raw_frame.Width(), raw_frame.Height())) { LOG_ERROR("Reset encode resolution failed"); return -1; } @@ -301,13 +300,14 @@ int AomAv1Encoder::Encode( (uint32_t)(kRtpTicksPerSecond / static_cast(max_frame_rate_)); timestamp_ += duration; - frame_for_encode_->planes[AOM_PLANE_Y] = (unsigned char *)(video_frame->data); + frame_for_encode_->planes[AOM_PLANE_Y] = + (unsigned char *)(raw_frame.Buffer()); frame_for_encode_->planes[AOM_PLANE_U] = - (unsigned char *)(video_frame->data + - video_frame->width * video_frame->height); + (unsigned char *)(raw_frame.Buffer() + + raw_frame.Width() * raw_frame.Height()); frame_for_encode_->planes[AOM_PLANE_V] = nullptr; - frame_for_encode_->stride[AOM_PLANE_Y] = video_frame->width; - frame_for_encode_->stride[AOM_PLANE_U] = video_frame->width; + frame_for_encode_->stride[AOM_PLANE_Y] = raw_frame.Width(); + frame_for_encode_->stride[AOM_PLANE_U] = raw_frame.Width(); frame_for_encode_->stride[AOM_PLANE_V] = 0; VideoFrameType frame_type; @@ -343,15 +343,13 @@ int AomAv1Encoder::Encode( // LOG_INFO("Encoded frame qp = {}", qp); if (on_encoded_image) { - std::shared_ptr encoded_frame = - std::make_shared(encoded_frame_, encoded_frame_size_, - video_frame->width, - video_frame->height); - encoded_frame->SetFrameType(frame_type); - encoded_frame->SetEncodedWidth(video_frame->width); - encoded_frame->SetEncodedHeight(video_frame->height); - encoded_frame->SetCapturedTimestamp(video_frame->captured_timestamp); - encoded_frame->SetEncodedTimestamp(clock_->CurrentTime()); + EncodedFrame encoded_frame(encoded_frame_, encoded_frame_size_, + raw_frame.Width(), raw_frame.Height()); + encoded_frame.SetFrameType(frame_type); + encoded_frame.SetEncodedWidth(raw_frame.Width()); + encoded_frame.SetEncodedHeight(raw_frame.Height()); + encoded_frame.SetCapturedTimestamp(raw_frame.CapturedTimestamp()); + encoded_frame.SetEncodedTimestamp(clock_->CurrentTime()); on_encoded_image(encoded_frame); #ifdef SAVE_ENCODED_AV1_STREAM fwrite(encoded_frame_, 1, encoded_frame_size_, file_av1_); diff --git a/src/media/video/encode/aom/aom_av1_encoder.h b/src/media/video/encode/aom/aom_av1_encoder.h index 19f7800..5920a5d 100644 --- a/src/media/video/encode/aom/aom_av1_encoder.h +++ b/src/media/video/encode/aom/aom_av1_encoder.h @@ -37,9 +37,9 @@ class AomAv1Encoder : public VideoEncoder { public: int Init(); - int Encode(const XVideoFrame* video_frame, - std::function encoded_frame)> - on_encoded_image); + int Encode( + const RawFrame& raw_frame, + std::function on_encoded_image); int ForceIdr(); diff --git a/src/media/video/encode/nvcodec/nvidia_video_encoder.cpp b/src/media/video/encode/nvcodec/nvidia_video_encoder.cpp index 7da7c8a..6c692ad 100644 --- a/src/media/video/encode/nvcodec/nvidia_video_encoder.cpp +++ b/src/media/video/encode/nvcodec/nvidia_video_encoder.cpp @@ -130,22 +130,21 @@ int NvidiaVideoEncoder::Init() { } int NvidiaVideoEncoder::Encode( - const XVideoFrame *video_frame, - std::function encoded_frame)> - on_encoded_image) { + const RawFrame &raw_frame, + std::function on_encoded_image) { if (!encoder_) { LOG_ERROR("Invalid encoder"); return -1; } #ifdef SAVE_RECEIVED_NV12_STREAM - fwrite(video_frame->data, 1, video_frame->size, file_nv12_); + fwrite(raw_frame.Buffer(), 1, raw_frame.Size(), file_nv12_); #endif - if (video_frame->width != frame_width_ || - video_frame->height != frame_height_) { + if (raw_frame.Width() != frame_width_ || + raw_frame.Height() != frame_height_) { if (support_dynamic_resolution_) { - if (0 != ResetEncodeResolution(video_frame->width, video_frame->height)) { + if (0 != ResetEncodeResolution(raw_frame.Width(), raw_frame.Height())) { return -1; } } @@ -168,7 +167,7 @@ int NvidiaVideoEncoder::Encode( // encoder_->GetEncodeHeight()); NvEncoderCuda::CopyToDeviceFrame( cuda_context_, - (void *)video_frame->data, // NOLINT + (void *)raw_frame.Buffer(), // NOLINT 0, (CUdeviceptr)encoder_inputframe->inputPtr, encoder_inputframe->pitch, encoder_->GetEncodeWidth(), encoder_->GetEncodeHeight(), CU_MEMORYTYPE_HOST, encoder_inputframe->bufferFormat, @@ -182,15 +181,15 @@ int NvidiaVideoEncoder::Encode( for (const auto &packet : encoded_packets_) { if (on_encoded_image) { - std::shared_ptr encoded_frame = - std::make_shared(packet.data(), packet.size(), - encoder_->GetEncodeWidth(), - encoder_->GetEncodeHeight()); - encoded_frame->SetFrameType(frame_type); - encoded_frame->SetEncodedWidth(encoder_->GetEncodeWidth()); - encoded_frame->SetEncodedHeight(encoder_->GetEncodeHeight()); - encoded_frame->SetCapturedTimestamp(video_frame->captured_timestamp); - encoded_frame->SetEncodedTimestamp(clock_->CurrentTime()); + EncodedFrame encoded_frame(packet.data(), packet.size(), + encoder_->GetEncodeWidth(), + encoder_->GetEncodeHeight()); + + encoded_frame.SetFrameType(frame_type); + encoded_frame.SetEncodedWidth(encoder_->GetEncodeWidth()); + encoded_frame.SetEncodedHeight(encoder_->GetEncodeHeight()); + encoded_frame.SetCapturedTimestamp(raw_frame.CapturedTimestamp()); + encoded_frame.SetEncodedTimestamp(clock_->CurrentTime()); on_encoded_image(encoded_frame); #ifdef SAVE_ENCODED_H264_STREAM fwrite((unsigned char *)packet.data(), 1, packet.size(), file_h264_); diff --git a/src/media/video/encode/nvcodec/nvidia_video_encoder.h b/src/media/video/encode/nvcodec/nvidia_video_encoder.h index 6d84435..6432d0c 100644 --- a/src/media/video/encode/nvcodec/nvidia_video_encoder.h +++ b/src/media/video/encode/nvcodec/nvidia_video_encoder.h @@ -14,9 +14,9 @@ class NvidiaVideoEncoder : public VideoEncoder { int Init(); - int Encode(const XVideoFrame* video_frame, - std::function encoded_frame)> - on_encoded_image); + int Encode( + const RawFrame& raw_frame, + std::function on_encoded_image); int ForceIdr(); diff --git a/src/media/video/encode/openh264/openh264_encoder.cpp b/src/media/video/encode/openh264/openh264_encoder.cpp index 52c9e20..cf3c590 100644 --- a/src/media/video/encode/openh264/openh264_encoder.cpp +++ b/src/media/video/encode/openh264/openh264_encoder.cpp @@ -181,47 +181,46 @@ int OpenH264Encoder::Init() { } int OpenH264Encoder::Encode( - const XVideoFrame *video_frame, - std::function encoded_frame)> - on_encoded_image) { + const RawFrame &raw_frame, + std::function on_encoded_image) { if (!openh264_encoder_) { LOG_ERROR("Invalid openh264 encoder"); return -1; } #ifdef SAVE_RECEIVED_NV12_STREAM - fwrite(video_frame->data, 1, video_frame->size, file_nv12_); + fwrite(raw_frame.Buffer(), 1, raw_frame.Size(), file_nv12_); #endif if (!yuv420p_frame_) { - yuv420p_frame_capacity_ = video_frame->size; + yuv420p_frame_capacity_ = raw_frame.Size(); yuv420p_frame_ = new unsigned char[yuv420p_frame_capacity_]; } - if (yuv420p_frame_capacity_ < video_frame->size) { - yuv420p_frame_capacity_ = video_frame->size; + if (yuv420p_frame_capacity_ < raw_frame.Size()) { + yuv420p_frame_capacity_ = raw_frame.Size(); delete[] yuv420p_frame_; yuv420p_frame_ = new unsigned char[yuv420p_frame_capacity_]; } if (!encoded_frame_) { - encoded_frame_capacity_ = video_frame->size; + encoded_frame_capacity_ = raw_frame.Size(); encoded_frame_ = new unsigned char[encoded_frame_capacity_]; } - if (encoded_frame_capacity_ < video_frame->size) { - encoded_frame_capacity_ = video_frame->size; + if (encoded_frame_capacity_ < raw_frame.Size()) { + encoded_frame_capacity_ = raw_frame.Size(); delete[] encoded_frame_; encoded_frame_ = new unsigned char[encoded_frame_capacity_]; } - if (video_frame->width != frame_width_ || - video_frame->height != frame_height_) { - ResetEncodeResolution(video_frame->width, video_frame->height); + if (raw_frame.Width() != frame_width_ || + raw_frame.Height() != frame_height_) { + ResetEncodeResolution(raw_frame.Width(), raw_frame.Height()); } - Nv12ToI420((unsigned char *)video_frame->data, video_frame->width, - video_frame->height, yuv420p_frame_); + Nv12ToI420((unsigned char *)raw_frame.Buffer(), raw_frame.Width(), + raw_frame.Height(), yuv420p_frame_); VideoFrameType frame_type; if (0 == seq_++ % key_frame_interval_) { @@ -232,20 +231,20 @@ int OpenH264Encoder::Encode( } raw_frame_ = {0}; - raw_frame_.iPicWidth = video_frame->width; - raw_frame_.iPicHeight = video_frame->height; + raw_frame_.iPicWidth = raw_frame.Width(); + raw_frame_.iPicHeight = raw_frame.Height(); raw_frame_.iColorFormat = video_format_; raw_frame_.uiTimeStamp = std::chrono::system_clock::now().time_since_epoch().count(); - raw_frame_.iStride[0] = video_frame->width; - raw_frame_.iStride[1] = video_frame->width >> 1; - raw_frame_.iStride[2] = video_frame->width >> 1; + raw_frame_.iStride[0] = raw_frame.Width(); + raw_frame_.iStride[1] = raw_frame.Width() >> 1; + raw_frame_.iStride[2] = raw_frame.Width() >> 1; raw_frame_.pData[0] = (unsigned char *)yuv420p_frame_; raw_frame_.pData[1] = - raw_frame_.pData[0] + video_frame->width * video_frame->height; + raw_frame_.pData[0] + raw_frame.Width() * raw_frame.Height(); raw_frame_.pData[2] = - raw_frame_.pData[1] + (video_frame->width * video_frame->height >> 2); + raw_frame_.pData[1] + (raw_frame.Width() * raw_frame.Height() >> 2); SFrameBSInfo info; memset(&info, 0, sizeof(SFrameBSInfo)); @@ -282,15 +281,13 @@ int OpenH264Encoder::Encode( encoded_frame_size_ = encoded_frame_size; if (on_encoded_image) { - std::shared_ptr encoded_frame = - std::make_shared(encoded_frame_, encoded_frame_size_, - raw_frame_.iPicWidth, - raw_frame_.iPicHeight); - encoded_frame->SetFrameType(frame_type); - encoded_frame->SetEncodedWidth(raw_frame_.iPicWidth); - encoded_frame->SetEncodedHeight(raw_frame_.iPicHeight); - encoded_frame->SetCapturedTimestamp(video_frame->captured_timestamp); - encoded_frame->SetEncodedTimestamp(clock_->CurrentTime()); + EncodedFrame encoded_frame(encoded_frame_, encoded_frame_size_, + raw_frame_.iPicWidth, raw_frame_.iPicHeight); + encoded_frame.SetFrameType(frame_type); + encoded_frame.SetEncodedWidth(raw_frame_.iPicWidth); + encoded_frame.SetEncodedHeight(raw_frame_.iPicHeight); + encoded_frame.SetCapturedTimestamp(raw_frame.CapturedTimestamp()); + encoded_frame.SetEncodedTimestamp(clock_->CurrentTime()); on_encoded_image(encoded_frame); #ifdef SAVE_ENCODED_H264_STREAM fwrite(encoded_frame_, 1, encoded_frame_size_, file_h264_); @@ -335,11 +332,11 @@ int OpenH264Encoder::Encode( encoded_frame_size_ = encoded_frame_size; if (on_encoded_image) { - encoded_frame->SetFrameType(frame_type); - encoded_frame->SetEncodedWidth(raw_frame_.iPicWidth); - encoded_frame->SetEncodedHeight(raw_frame_.iPicHeight); - encoded_frame->SetCapturedTimestamp(video_frame->captured_timestamp); - encoded_frame->SetEncodedTimestamp(clock_->CurrentTime()); + encoded_frame.SetFrameType(frame_type); + encoded_frame.SetEncodedWidth(raw_frame_.iPicWidth); + encoded_frame.SetEncodedHeight(raw_frame_.iPicHeight); + encoded_frame.SetCapturedTimestamp(raw_frame.captured_timestamp); + encoded_frame.SetEncodedTimestamp(clock_->CurrentTime()); on_encoded_image((char *)encoded_frame_, frame_type); #ifdef SAVE_ENCODED_H264_STREAM fwrite(encoded_frame_, 1, encoded_frame_size_, file_h264_); diff --git a/src/media/video/encode/openh264/openh264_encoder.h b/src/media/video/encode/openh264/openh264_encoder.h index 12ca498..ae85a6e 100644 --- a/src/media/video/encode/openh264/openh264_encoder.h +++ b/src/media/video/encode/openh264/openh264_encoder.h @@ -24,9 +24,9 @@ class OpenH264Encoder : public VideoEncoder { int Init(); - int Encode(const XVideoFrame* video_frame, - std::function encoded_frame)> - on_encoded_image); + int Encode( + const RawFrame& raw_frame, + std::function on_encoded_image); int ForceIdr(); diff --git a/src/media/video/encode/video_encoder.h b/src/media/video/encode/video_encoder.h index 46d77ca..89f9548 100644 --- a/src/media/video/encode/video_encoder.h +++ b/src/media/video/encode/video_encoder.h @@ -10,6 +10,7 @@ #include "clock/system_clock.h" #include "encoded_frame.h" +#include "raw_frame.h" #include "x.h" #define I_FRAME_INTERVAL 3000 @@ -17,10 +18,9 @@ class VideoEncoder { public: virtual int Init() = 0; - virtual int Encode( - const XVideoFrame* video_frame, - std::function encoded_frame)> - on_encoded_image) = 0; + virtual int Encode(const RawFrame& raw_frame, + std::function + on_encoded_image) = 0; virtual int ForceIdr() = 0; diff --git a/src/rtp/rtp_packet/rtp_packet.cpp b/src/rtp/rtp_packet/rtp_packet.cpp index be1bec1..4d43456 100644 --- a/src/rtp/rtp_packet/rtp_packet.cpp +++ b/src/rtp/rtp_packet/rtp_packet.cpp @@ -6,6 +6,9 @@ RtpPacket::RtpPacket() {} RtpPacket::RtpPacket(size_t size) : buffer_(size) {} +RtpPacket::RtpPacket(const uint8_t *buffer, uint32_t size) + : buffer_(buffer, size) {} + RtpPacket::RtpPacket(const RtpPacket &rtp_packet) = default; RtpPacket::RtpPacket(RtpPacket &&rtp_packet) = default; @@ -14,7 +17,7 @@ RtpPacket &RtpPacket::operator=(const RtpPacket &rtp_packet) = default; RtpPacket &RtpPacket::operator=(RtpPacket &&rtp_packet) = default; -RtpPacket::~RtpPacket() {} +RtpPacket::~RtpPacket() = default; bool RtpPacket::Build(const uint8_t *buffer, uint32_t size) { if (!Parse(buffer, size)) { diff --git a/src/rtp/rtp_packet/rtp_packet.h b/src/rtp/rtp_packet/rtp_packet.h index c14ceca..0f26eba 100644 --- a/src/rtp/rtp_packet/rtp_packet.h +++ b/src/rtp/rtp_packet/rtp_packet.h @@ -179,6 +179,7 @@ class RtpPacket { public: RtpPacket(); RtpPacket(size_t size); + RtpPacket(const uint8_t *buffer, uint32_t size); RtpPacket(const RtpPacket &rtp_packet); RtpPacket(RtpPacket &&rtp_packet); RtpPacket &operator=(const RtpPacket &rtp_packet); diff --git a/src/rtp/rtp_statistics/rtp_statistics.cpp b/src/rtp/rtp_statistics/rtp_statistics.cpp deleted file mode 100644 index 6143dc3..0000000 --- a/src/rtp/rtp_statistics/rtp_statistics.cpp +++ /dev/null @@ -1,50 +0,0 @@ -#include "rtp_statistics.h" - -#include "log.h" - -RtpStatistics::RtpStatistics() { - SetPeriod(std::chrono::milliseconds(1000)); - SetThreadName("RtpStatistics"); -} - -RtpStatistics::~RtpStatistics() {} - -void RtpStatistics::UpdateSentBytes(uint32_t sent_bytes) { - sent_bytes_ += sent_bytes; -} - -void RtpStatistics::UpdateReceiveBytes(uint32_t received_bytes) { - received_bytes_ += received_bytes; -} - -void RtpStatistics::UpdatePacketLossRate(uint16_t seq_num) { - if (last_received_seq_num_ != 0) { - if (last_received_seq_num_ < seq_num) { - // seq wrap - if (seq_num - last_received_seq_num_ > 0x8000) { - lost_packets_num_ += 0xffff - last_received_seq_num_ + seq_num + 1; - } else { - lost_packets_num_ += seq_num - last_received_seq_num_ - 1; - } - } else if (last_received_seq_num_ > seq_num) { - lost_packets_num_ += 0xffff - last_received_seq_num_ + seq_num + 1; - } - } - last_received_seq_num_ = seq_num; -} - -bool RtpStatistics::Process() { - if (!sent_bytes_) { - // LOG_INFO("rtp statistics: Send [{} bps]", sent_bytes_); - } - - if (!received_bytes_) { - // LOG_INFO("rtp statistics: Receive [{} bps]", received_bytes_); - } - - sent_bytes_ = 0; - received_bytes_ = 0; - - std::this_thread::sleep_for(std::chrono::seconds(1)); - return true; -} \ No newline at end of file diff --git a/src/rtp/rtp_statistics/rtp_statistics.h b/src/rtp/rtp_statistics/rtp_statistics.h deleted file mode 100644 index e0efdb6..0000000 --- a/src/rtp/rtp_statistics/rtp_statistics.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef _RTP_STATISTICS_H_ -#define _RTP_STATISTICS_H_ - -#include "thread_base.h" - -class RtpStatistics : public ThreadBase { - public: - RtpStatistics(); - virtual ~RtpStatistics(); - - public: - // send side - void UpdateSentBytes(uint32_t sent_bytes); - - // receive side - void UpdateReceiveBytes(uint32_t received_bytes); - void UpdatePacketLossRate(uint16_t seq_num); - - private: - bool Process(); - - private: - uint32_t sent_bytes_ = 0; - uint32_t received_bytes_ = 0; - uint16_t last_received_seq_num_ = 0; - uint32_t lost_packets_num_ = 0; -}; - -#endif \ No newline at end of file diff --git a/src/transport/channel/rtp_audio_receiver.cpp b/src/transport/channel/rtp_audio_receiver.cpp index 445acc0..833e4a1 100644 --- a/src/transport/channel/rtp_audio_receiver.cpp +++ b/src/transport/channel/rtp_audio_receiver.cpp @@ -7,26 +7,13 @@ RtpAudioReceiver::RtpAudioReceiver() {} RtpAudioReceiver::RtpAudioReceiver(std::shared_ptr io_statistics) : io_statistics_(io_statistics) {} -RtpAudioReceiver::~RtpAudioReceiver() { - if (rtp_statistics_) { - rtp_statistics_->Stop(); - } -} +RtpAudioReceiver::~RtpAudioReceiver() {} void RtpAudioReceiver::InsertRtpPacket(RtpPacket& rtp_packet) { - if (!rtp_statistics_) { - rtp_statistics_ = std::make_unique(); - rtp_statistics_->Start(); - } - last_recv_bytes_ = (uint32_t)rtp_packet.Size(); total_rtp_payload_recv_ += (uint32_t)rtp_packet.PayloadSize(); total_rtp_packets_recv_++; - if (rtp_statistics_) { - rtp_statistics_->UpdateReceiveBytes(last_recv_bytes_); - } - if (io_statistics_) { io_statistics_->UpdateAudioInboundBytes(last_recv_bytes_); io_statistics_->IncrementAudioInboundRtpPacketCount(); diff --git a/src/transport/channel/rtp_audio_receiver.h b/src/transport/channel/rtp_audio_receiver.h index f522e18..a6404c8 100644 --- a/src/transport/channel/rtp_audio_receiver.h +++ b/src/transport/channel/rtp_audio_receiver.h @@ -12,7 +12,6 @@ #include "io_statistics.h" #include "receiver_report.h" #include "rtp_packet.h" -#include "rtp_statistics.h" #include "sender_report.h" class RtpAudioReceiver { @@ -44,7 +43,6 @@ class RtpAudioReceiver { uint32_t last_complete_frame_ts_ = 0; private: - std::unique_ptr rtp_statistics_ = nullptr; std::shared_ptr io_statistics_ = nullptr; uint32_t last_recv_bytes_ = 0; uint32_t total_rtp_payload_recv_ = 0; diff --git a/src/transport/channel/rtp_audio_sender.cpp b/src/transport/channel/rtp_audio_sender.cpp index 7d58ae7..b462f26 100644 --- a/src/transport/channel/rtp_audio_sender.cpp +++ b/src/transport/channel/rtp_audio_sender.cpp @@ -15,21 +15,10 @@ RtpAudioSender::RtpAudioSender(std::shared_ptr io_statistics) SetThreadName("RtpAudioSender"); } -RtpAudioSender::~RtpAudioSender() { - if (rtp_statistics_) { - rtp_statistics_->Stop(); - } - - SSRCManager::Instance().DeleteSsrc(ssrc_); -} +RtpAudioSender::~RtpAudioSender() { SSRCManager::Instance().DeleteSsrc(ssrc_); } void RtpAudioSender::Enqueue( std::vector>& rtp_packets) { - if (!rtp_statistics_) { - rtp_statistics_ = std::make_unique(); - rtp_statistics_->Start(); - } - for (auto& rtp_packet : rtp_packets) { rtp_packet_queue_.push(std::move(rtp_packet)); } @@ -149,9 +138,5 @@ bool RtpAudioSender::Process() { } } - if (rtp_statistics_) { - rtp_statistics_->UpdateSentBytes(last_send_bytes_); - } - return true; } \ No newline at end of file diff --git a/src/transport/channel/rtp_audio_sender.h b/src/transport/channel/rtp_audio_sender.h index 07df804..9a74d6f 100644 --- a/src/transport/channel/rtp_audio_sender.h +++ b/src/transport/channel/rtp_audio_sender.h @@ -13,7 +13,6 @@ #include "receiver_report.h" #include "ringbuffer.h" #include "rtp_packet.h" -#include "rtp_statistics.h" #include "sender_report.h" #include "thread_base.h" @@ -44,7 +43,6 @@ class RtpAudioSender : public ThreadBase { private: uint32_t ssrc_ = 0; - std::unique_ptr rtp_statistics_ = nullptr; std::shared_ptr io_statistics_ = nullptr; uint32_t last_send_bytes_ = 0; uint32_t total_rtp_payload_sent_ = 0; diff --git a/src/transport/channel/rtp_data_receiver.cpp b/src/transport/channel/rtp_data_receiver.cpp index 24394a5..8fd8066 100644 --- a/src/transport/channel/rtp_data_receiver.cpp +++ b/src/transport/channel/rtp_data_receiver.cpp @@ -7,26 +7,13 @@ RtpDataReceiver::RtpDataReceiver() {} RtpDataReceiver::RtpDataReceiver(std::shared_ptr io_statistics) : io_statistics_(io_statistics) {} -RtpDataReceiver::~RtpDataReceiver() { - if (rtp_statistics_) { - rtp_statistics_->Stop(); - } -} +RtpDataReceiver::~RtpDataReceiver() {} void RtpDataReceiver::InsertRtpPacket(RtpPacket& rtp_packet) { - if (!rtp_statistics_) { - rtp_statistics_ = std::make_unique(); - rtp_statistics_->Start(); - } - last_recv_bytes_ = (uint32_t)rtp_packet.Size(); total_rtp_payload_recv_ += (uint32_t)rtp_packet.PayloadSize(); total_rtp_packets_recv_++; - if (rtp_statistics_) { - rtp_statistics_->UpdateReceiveBytes(last_recv_bytes_); - } - if (io_statistics_) { io_statistics_->UpdateDataInboundBytes(last_recv_bytes_); io_statistics_->IncrementDataInboundRtpPacketCount(); diff --git a/src/transport/channel/rtp_data_receiver.h b/src/transport/channel/rtp_data_receiver.h index 00d95ad..f7711b9 100644 --- a/src/transport/channel/rtp_data_receiver.h +++ b/src/transport/channel/rtp_data_receiver.h @@ -6,7 +6,6 @@ #include "io_statistics.h" #include "receiver_report.h" #include "rtp_packet.h" -#include "rtp_statistics.h" #include "sender_report.h" class RtpDataReceiver { public: @@ -37,7 +36,6 @@ class RtpDataReceiver { uint32_t last_complete_frame_ts_ = 0; private: - std::unique_ptr rtp_statistics_ = nullptr; std::shared_ptr io_statistics_ = nullptr; uint32_t last_recv_bytes_ = 0; uint32_t total_rtp_payload_recv_ = 0; diff --git a/src/transport/channel/rtp_data_sender.cpp b/src/transport/channel/rtp_data_sender.cpp index 6b869a0..da145fa 100644 --- a/src/transport/channel/rtp_data_sender.cpp +++ b/src/transport/channel/rtp_data_sender.cpp @@ -15,21 +15,10 @@ RtpDataSender::RtpDataSender(std::shared_ptr io_statistics) SetThreadName("RtpDataSender"); } -RtpDataSender::~RtpDataSender() { - if (rtp_statistics_) { - rtp_statistics_->Stop(); - } - - SSRCManager::Instance().DeleteSsrc(ssrc_); -} +RtpDataSender::~RtpDataSender() { SSRCManager::Instance().DeleteSsrc(ssrc_); } void RtpDataSender::Enqueue( std::vector>& rtp_packets) { - if (!rtp_statistics_) { - rtp_statistics_ = std::make_unique(); - rtp_statistics_->Start(); - } - for (auto& rtp_packet : rtp_packets) { rtp_packet_queue_.push(std::move(rtp_packet)); } @@ -149,9 +138,5 @@ bool RtpDataSender::Process() { } } - if (rtp_statistics_) { - rtp_statistics_->UpdateSentBytes(last_send_bytes_); - } - return true; } \ No newline at end of file diff --git a/src/transport/channel/rtp_data_sender.h b/src/transport/channel/rtp_data_sender.h index 83bc2c0..aa52fec 100644 --- a/src/transport/channel/rtp_data_sender.h +++ b/src/transport/channel/rtp_data_sender.h @@ -13,7 +13,6 @@ #include "receiver_report.h" #include "ringbuffer.h" #include "rtp_packet.h" -#include "rtp_statistics.h" #include "sender_report.h" #include "thread_base.h" @@ -45,7 +44,6 @@ class RtpDataSender : public ThreadBase { private: uint32_t ssrc_ = 0; - std::unique_ptr rtp_statistics_ = nullptr; std::shared_ptr io_statistics_ = nullptr; uint32_t last_send_bytes_ = 0; uint32_t total_rtp_payload_sent_ = 0; diff --git a/src/transport/channel/rtp_video_receiver.cpp b/src/transport/channel/rtp_video_receiver.cpp index 17009dd..50e2b0e 100644 --- a/src/transport/channel/rtp_video_receiver.cpp +++ b/src/transport/channel/rtp_video_receiver.cpp @@ -77,10 +77,6 @@ RtpVideoReceiver::~RtpVideoReceiver() { SSRCManager::Instance().DeleteSsrc(ssrc_); - if (rtp_statistics_) { - rtp_statistics_->Stop(); - } - delete[] nv12_data_; #ifdef SAVE_RTP_RECV_STREAM @@ -93,11 +89,6 @@ RtpVideoReceiver::~RtpVideoReceiver() { } void RtpVideoReceiver::InsertRtpPacket(RtpPacket& rtp_packet) { - if (!rtp_statistics_) { - rtp_statistics_ = std::make_unique(); - rtp_statistics_->Start(); - } - webrtc::RtpPacketReceived rtp_packet_received; rtp_packet_received.Build(rtp_packet.Buffer().data(), rtp_packet.Size()); rtp_packet_received.set_arrival_time(clock_->CurrentTime()); @@ -153,10 +144,6 @@ void RtpVideoReceiver::InsertRtpPacket(RtpPacket& rtp_packet) { total_rtp_payload_recv_ += (uint32_t)rtp_packet.PayloadSize(); total_rtp_packets_recv_++; - if (rtp_statistics_) { - rtp_statistics_->UpdateReceiveBytes(last_recv_bytes_); - } - if (io_statistics_) { io_statistics_->UpdateVideoInboundBytes(last_recv_bytes_); io_statistics_->IncrementVideoInboundRtpPacketCount(); diff --git a/src/transport/channel/rtp_video_receiver.h b/src/transport/channel/rtp_video_receiver.h index deb38f2..2bc8dc7 100644 --- a/src/transport/channel/rtp_video_receiver.h +++ b/src/transport/channel/rtp_video_receiver.h @@ -21,7 +21,6 @@ #include "rtp_packet_av1.h" #include "rtp_packet_h264.h" #include "rtp_rtcp_defines.h" -#include "rtp_statistics.h" #include "sender_report.h" #include "thread_base.h" @@ -103,7 +102,6 @@ class RtpVideoReceiver : public ThreadBase, RingBuffer compelete_video_frame_queue_; private: - std::unique_ptr rtp_statistics_ = nullptr; std::shared_ptr io_statistics_ = nullptr; uint32_t last_recv_bytes_ = 0; uint32_t total_rtp_packets_recv_ = 0; diff --git a/src/transport/channel/rtp_video_sender.cpp b/src/transport/channel/rtp_video_sender.cpp index a7b7876..95dd2f4 100644 --- a/src/transport/channel/rtp_video_sender.cpp +++ b/src/transport/channel/rtp_video_sender.cpp @@ -28,10 +28,6 @@ RtpVideoSender::RtpVideoSender(std::shared_ptr clock, } RtpVideoSender::~RtpVideoSender() { - if (rtp_statistics_) { - rtp_statistics_->Stop(); - } - SSRCManager::Instance().DeleteSsrc(ssrc_); #ifdef SAVE_RTP_SENT_STREAM @@ -46,11 +42,6 @@ RtpVideoSender::~RtpVideoSender() { void RtpVideoSender::Enqueue( std::vector>& rtp_packets, int64_t captured_timestamp_us) { - if (!rtp_statistics_) { - rtp_statistics_ = std::make_unique(); - rtp_statistics_->Start(); - } - std::vector> to_send_rtp_packets; for (auto& rtp_packet : rtp_packets) { std::unique_ptr rtp_packet_to_send( @@ -179,10 +170,6 @@ bool RtpVideoSender::Process() { } } - if (rtp_statistics_) { - rtp_statistics_->UpdateSentBytes(last_send_bytes_); - } - return true; } diff --git a/src/transport/channel/rtp_video_sender.h b/src/transport/channel/rtp_video_sender.h index 847140c..f7dfcab 100644 --- a/src/transport/channel/rtp_video_sender.h +++ b/src/transport/channel/rtp_video_sender.h @@ -10,7 +10,6 @@ #include "ringbuffer.h" #include "rtp_packet.h" #include "rtp_packet_to_send.h" -#include "rtp_statistics.h" #include "sender_report.h" #include "thread_base.h" @@ -59,7 +58,6 @@ class RtpVideoSender : public ThreadBase { private: uint32_t ssrc_ = 0; std::shared_ptr clock_ = nullptr; - std::unique_ptr rtp_statistics_ = nullptr; std::shared_ptr io_statistics_ = nullptr; uint32_t last_send_bytes_ = 0; uint32_t last_send_rtcp_sr_packet_ts_ = 0; diff --git a/src/transport/channel/video_channel_send.cpp b/src/transport/channel/video_channel_send.cpp index 5cba365..95c897e 100644 --- a/src/transport/channel/video_channel_send.cpp +++ b/src/transport/channel/video_channel_send.cpp @@ -105,14 +105,14 @@ void VideoChannelSend::Destroy() { } } -int VideoChannelSend::SendVideo(std::shared_ptr encoded_frame) { +int VideoChannelSend::SendVideo(const EncodedFrame& encoded_frame) { if (rtp_video_sender_ && rtp_packetizer_ && packet_sender_) { int32_t rtp_timestamp = delta_ntp_internal_ms_ + - static_cast(encoded_frame->CapturedTimestamp() / 1000); + static_cast(encoded_frame.CapturedTimestamp() / 1000); std::vector> rtp_packets = - rtp_packetizer_->Build((uint8_t*)encoded_frame->Buffer(), - (uint32_t)encoded_frame->Size(), rtp_timestamp, + rtp_packetizer_->Build((uint8_t*)encoded_frame.Buffer(), + (uint32_t)encoded_frame.Size(), rtp_timestamp, true); packet_sender_->EnqueueRtpPacket(std::move(rtp_packets), rtp_timestamp); } diff --git a/src/transport/channel/video_channel_send.h b/src/transport/channel/video_channel_send.h index d0f69dd..f87f394 100644 --- a/src/transport/channel/video_channel_send.h +++ b/src/transport/channel/video_channel_send.h @@ -53,7 +53,7 @@ class VideoChannelSend { return 0; } - int SendVideo(std::shared_ptr encoded_frame); + int SendVideo(const EncodedFrame& encoded_frame); void OnReceiverReport(const ReceiverReport& receiver_report) { if (rtp_video_sender_) { diff --git a/src/transport/ice_transport_controller.cpp b/src/transport/ice_transport_controller.cpp index c1c58d2..a9a3f73 100644 --- a/src/transport/ice_transport_controller.cpp +++ b/src/transport/ice_transport_controller.cpp @@ -55,10 +55,13 @@ void IceTransportController::Create( CreateVideoCodec(clock_, video_codec_payload_type, hardware_acceleration); CreateAudioCodec(); - task_queue_ = std::make_shared(); + task_queue_cc_ = std::make_shared("congest control"); + task_queue_encode_ = std::make_shared("encode"); + task_queue_decode_ = std::make_shared("decode"); + controller_ = std::make_unique(); - packet_sender_ = - std::make_shared(ice_agent, webrtc_clock_, task_queue_); + packet_sender_ = std::make_shared(ice_agent, webrtc_clock_, + task_queue_cc_); packet_sender_->SetPacingRates(DataRate::BitsPerSec(300000), DataRate::Zero()); packet_sender_->SetSendBurstInterval(TimeDelta::Millis(40)); @@ -201,8 +204,6 @@ int IceTransportController::SendVideo(const XVideoFrame* video_frame) { b_force_i_frame_ = false; } - bool need_to_release = false; - XVideoFrame new_frame; new_frame.data = nullptr; new_frame.width = video_frame->width; @@ -215,30 +216,34 @@ int IceTransportController::SendVideo(const XVideoFrame* video_frame) { resolution_adapter_->ResolutionDowngrade( video_frame, target_width_.value(), target_height_.value(), &new_frame); - need_to_release = true; + } else { + new_frame.data = new char[video_frame->size]; + memcpy((void*)new_frame.data, (void*)video_frame->data, + video_frame->size); } } - int ret = video_encoder_->Encode( - need_to_release ? &new_frame : video_frame, - [this](std::shared_ptr encoded_frame) -> int { - if (video_channel_send_) { - video_channel_send_->SendVideo(encoded_frame); - } + RawFrame raw_frame((const uint8_t*)new_frame.data, new_frame.size, + new_frame.width, new_frame.height); + raw_frame.SetCapturedTimestamp(video_frame->captured_timestamp); - return 0; - }); + delete[] new_frame.data; - if (need_to_release) { - delete[] new_frame.data; + if (task_queue_encode_ && video_encoder_) { + task_queue_encode_->PostTask([this, raw_frame]() mutable { + int ret = video_encoder_->Encode( + std::move(raw_frame), + [this](const EncodedFrame& encoded_frame) -> int { + if (video_channel_send_) { + video_channel_send_->SendVideo(encoded_frame); + } + + return 0; + }); + }); } - if (0 != ret) { - LOG_ERROR("Encode failed"); - return -1; - } else { - return 0; - } + return 0; } int IceTransportController::SendAudio(const char* data, size_t size) { @@ -506,8 +511,8 @@ void IceTransportController::OnReceiverReport( msg.start_time = last_report_block_time_; msg.end_time = now; - if (task_queue_) { - task_queue_->PostTask([this, msg]() mutable { + if (task_queue_cc_) { + task_queue_cc_->PostTask([this, msg]() mutable { if (controller_) { PostUpdates(controller_->OnTransportLossReport(msg)); } @@ -522,8 +527,8 @@ void IceTransportController::OnCongestionControlFeedback( std::optional feedback_msg = transport_feedback_adapter_.ProcessCongestionControlFeedback( feedback, Timestamp::Micros(clock_->CurrentTimeUs())); - if (feedback_msg.has_value() && task_queue_) { - task_queue_->PostTask([this, feedback_msg]() mutable { + if (feedback_msg.has_value() && task_queue_cc_) { + task_queue_cc_->PostTask([this, feedback_msg]() mutable { if (controller_) { PostUpdates( controller_->OnTransportPacketsFeedback(feedback_msg.value())); @@ -633,8 +638,8 @@ bool IceTransportController::Process() { return false; } - if (task_queue_ && controller_) { - task_queue_->PostTask([this]() mutable { + if (task_queue_cc_ && controller_) { + task_queue_cc_->PostTask([this]() mutable { webrtc::ProcessInterval msg; msg.at_time = Timestamp::Millis(webrtc_clock_->TimeInMilliseconds()); PostUpdates(controller_->OnProcessInterval(msg)); diff --git a/src/transport/ice_transport_controller.h b/src/transport/ice_transport_controller.h index 89f8bc1..dbdf1ce 100644 --- a/src/transport/ice_transport_controller.h +++ b/src/transport/ice_transport_controller.h @@ -120,7 +120,9 @@ class IceTransportController webrtc::TransportFeedbackAdapter transport_feedback_adapter_; std::unique_ptr controller_; BitrateProber prober_; - std::shared_ptr task_queue_; + std::shared_ptr task_queue_cc_; + std::shared_ptr task_queue_encode_; + std::shared_ptr task_queue_decode_; webrtc::DataSize congestion_window_size_; bool is_congested_ = false; diff --git a/xmake.lua b/xmake.lua index 39f0fc5..7387670 100644 --- a/xmake.lua +++ b/xmake.lua @@ -74,6 +74,7 @@ target("thread") target("frame") set_kind("object") + add_deps("common") add_files("src/frame/*.cpp") add_includedirs("src/frame", {public = true}) @@ -113,11 +114,9 @@ target("ws") target("rtp") set_kind("object") add_deps("log", "common", "frame", "ringbuffer", "thread", "rtcp", "fec", "statistics") - add_files("src/rtp/rtp_statistics/*.cpp", - "src/rtp/rtp_packet/*.cpp", + add_files("src/rtp/rtp_packet/*.cpp", "src/rtp/rtp_packetizer/*.cpp") - add_includedirs("src/rtp/rtp_statistics", - "src/rtp/rtp_packet", + add_includedirs("src/rtp/rtp_packet", "src/rtp/rtp_packetizer", {public = true}) target("rtcp")