mirror of
https://github.com/kunkundi/crossdesk.git
synced 2025-10-26 20:25:34 +08:00
[feat] complete h264 frame assember
This commit is contained in:
@@ -1,5 +1,7 @@
|
|||||||
#include "h264_frame_assember.h"
|
#include "h264_frame_assember.h"
|
||||||
|
|
||||||
|
#include "log.h"
|
||||||
|
|
||||||
H264FrameAssembler::H264FrameAssembler() {}
|
H264FrameAssembler::H264FrameAssembler() {}
|
||||||
|
|
||||||
H264FrameAssembler::~H264FrameAssembler() {}
|
H264FrameAssembler::~H264FrameAssembler() {}
|
||||||
@@ -30,21 +32,22 @@ int64_t H264FrameAssembler::Unwrap(uint16_t seq_num) {
|
|||||||
return (int64_t)seq_num;
|
return (int64_t)seq_num;
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<std::unique_ptr<RtpPacketH264>>& H264FrameAssembler::InsertPacket(
|
std::vector<std::unique_ptr<RtpPacketH264>> H264FrameAssembler::InsertPacket(
|
||||||
std::unique_ptr<RtpPacketH264> rtp_packet) {
|
std::unique_ptr<RtpPacketH264> rtp_packet) {
|
||||||
std::vector<std::unique_ptr<RtpPacketH264>> result;
|
std::vector<std::unique_ptr<RtpPacketH264>> result;
|
||||||
|
|
||||||
int64_t unwrapped_seq_num = Unwrap(rtp_packet->SequenceNumber());
|
int64_t unwrapped_seq_num =
|
||||||
|
rtp_seq_num_unwrapper_.Unwrap(rtp_packet->SequenceNumber());
|
||||||
auto& packet_slotted = GetPacketFromBuffer(unwrapped_seq_num);
|
auto& packet_slotted = GetPacketFromBuffer(unwrapped_seq_num);
|
||||||
if (packet_slotted != nullptr &&
|
if (packet_slotted != nullptr &&
|
||||||
AheadOrAt(packet_slotted->Timestamp(), rtp_packet->Timestamp())) {
|
AheadOrAt(packet_slotted->Timestamp(), rtp_packet->Timestamp())) {
|
||||||
// The incoming `packet` is old or a duplicate.
|
// The incoming `packet` is old or a duplicate.
|
||||||
return std::move(result);
|
return result;
|
||||||
} else {
|
} else {
|
||||||
packet_slotted = std::move(rtp_packet);
|
packet_slotted = std::move(rtp_packet);
|
||||||
}
|
}
|
||||||
|
|
||||||
return std::move(FindFrames(unwrapped_seq_num));
|
return FindFrames(unwrapped_seq_num);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<RtpPacketH264>& H264FrameAssembler::GetPacketFromBuffer(
|
std::unique_ptr<RtpPacketH264>& H264FrameAssembler::GetPacketFromBuffer(
|
||||||
@@ -98,13 +101,14 @@ std::vector<std::unique_ptr<RtpPacketH264>> H264FrameAssembler::FindFrames(
|
|||||||
if (prev_packet == nullptr ||
|
if (prev_packet == nullptr ||
|
||||||
prev_packet->Timestamp() != rtp_timestamp) {
|
prev_packet->Timestamp() != rtp_timestamp) {
|
||||||
const auto& current_packet = GetPacketFromBuffer(seq_num_start);
|
const auto& current_packet = GetPacketFromBuffer(seq_num_start);
|
||||||
|
|
||||||
if (!current_packet->FuAStart()) {
|
if (!current_packet->FuAStart()) {
|
||||||
// First packet of the frame is missing.
|
// First packet of the frame is missing.
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
for (int64_t seq_num = seq_num_start; seq_num <= seq_num; ++seq_num) {
|
for (int64_t seq = seq_num_start; seq <= seq_num; ++seq) {
|
||||||
auto& packet = GetPacketFromBuffer(seq_num);
|
auto& packet = GetPacketFromBuffer(seq);
|
||||||
result.push_back(std::move(packet));
|
result.push_back(std::move(packet));
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -9,6 +9,7 @@
|
|||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
|
|
||||||
|
#include "rtc_base/numerics/sequence_number_unwrapper.h"
|
||||||
#include "rtp_packet_h264.h"
|
#include "rtp_packet_h264.h"
|
||||||
|
|
||||||
#define MAX_PACKET_BUFFER_SIZE 2048
|
#define MAX_PACKET_BUFFER_SIZE 2048
|
||||||
@@ -20,7 +21,7 @@ class H264FrameAssembler {
|
|||||||
~H264FrameAssembler();
|
~H264FrameAssembler();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
std::vector<std::unique_ptr<RtpPacketH264>>& InsertPacket(
|
std::vector<std::unique_ptr<RtpPacketH264>> InsertPacket(
|
||||||
std::unique_ptr<RtpPacketH264> rtp_packet);
|
std::unique_ptr<RtpPacketH264> rtp_packet);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@@ -37,6 +38,8 @@ class H264FrameAssembler {
|
|||||||
packet_buffer_;
|
packet_buffer_;
|
||||||
std::array<int64_t, MAX_TRACKED_SEQUENCE_SIZE> last_continuous_in_sequence_;
|
std::array<int64_t, MAX_TRACKED_SEQUENCE_SIZE> last_continuous_in_sequence_;
|
||||||
int64_t last_continuous_in_sequence_index_ = 0;
|
int64_t last_continuous_in_sequence_index_ = 0;
|
||||||
|
|
||||||
|
webrtc::SeqNumUnwrapper<uint16_t> rtp_seq_num_unwrapper_;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@@ -7,7 +7,7 @@
|
|||||||
#include "nack.h"
|
#include "nack.h"
|
||||||
#include "rtcp_sender.h"
|
#include "rtcp_sender.h"
|
||||||
|
|
||||||
// #define SAVE_RTP_RECV_STREAM
|
#define SAVE_RTP_RECV_STREAM
|
||||||
|
|
||||||
#define NV12_BUFFER_SIZE (1280 * 720 * 3 / 2)
|
#define NV12_BUFFER_SIZE (1280 * 720 * 3 / 2)
|
||||||
#define RTCP_RR_INTERVAL 1000
|
#define RTCP_RR_INTERVAL 1000
|
||||||
@@ -195,39 +195,52 @@ void RtpVideoReceiver::InsertRtpPacket(RtpPacket& rtp_packet) {
|
|||||||
ProcessAv1RtpPacket(rtp_packet_av1);
|
ProcessAv1RtpPacket(rtp_packet_av1);
|
||||||
} else if (rtp_packet.PayloadType() == rtp::PAYLOAD_TYPE::H264 ||
|
} else if (rtp_packet.PayloadType() == rtp::PAYLOAD_TYPE::H264 ||
|
||||||
rtp_packet.PayloadType() == rtp::PAYLOAD_TYPE::H264 - 1) {
|
rtp_packet.PayloadType() == rtp::PAYLOAD_TYPE::H264 - 1) {
|
||||||
// std::unique_ptr<RtpPacketH264> rtp_packet_h264 =
|
std::unique_ptr<RtpPacketH264> rtp_packet_h264 =
|
||||||
// std::make_unique<RtpPacketH264>();
|
std::make_unique<RtpPacketH264>();
|
||||||
// if (rtp_packet_h264->Build(rtp_packet.Buffer().data(),
|
if (rtp_packet.Buffer().data() != nullptr && rtp_packet.Size() > 0 &&
|
||||||
// rtp_packet.Size())) {
|
rtp_packet_h264->Build(rtp_packet.Buffer().data(), rtp_packet.Size())) {
|
||||||
// std::vector<std::unique_ptr<RtpPacketH264>> complete_frame = std::move(
|
rtp_packet_h264->GetFrameHeaderInfo();
|
||||||
// h264_frame_assembler_.InsertPacket(std::move(rtp_packet_h264)));
|
std::vector<std::unique_ptr<RtpPacketH264>> complete_frame =
|
||||||
// if (!complete_frame.empty()) {
|
h264_frame_assembler_.InsertPacket(std::move(rtp_packet_h264));
|
||||||
// for (auto& frame : complete_frame) {
|
if (!complete_frame.empty()) {
|
||||||
// ReceivedFrame received_frame(frame->Payload(),
|
uint8_t* nv12_data_ = new uint8_t[NV12_BUFFER_SIZE];
|
||||||
// frame->PayloadSize());
|
uint8_t* dest = nv12_data_;
|
||||||
// received_frame.SetReceivedTimestamp(clock_->CurrentTime().us());
|
size_t complete_frame_size = 0;
|
||||||
// received_frame.SetCapturedTimestamp(
|
for (auto& frame : complete_frame) {
|
||||||
// (static_cast<int64_t>(frame->Timestamp()) /
|
memcpy(dest, frame->Payload(), frame->PayloadSize());
|
||||||
// rtp::kMsToRtpTimestamp -
|
dest += frame->PayloadSize();
|
||||||
// delta_ntp_internal_ms_) *
|
complete_frame_size += frame->PayloadSize();
|
||||||
// 1000);
|
}
|
||||||
// compelete_video_frame_queue_.push(received_frame);
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
RtpPacketH264 rtp_packet_h264;
|
ReceivedFrame received_frame(nv12_data_, complete_frame_size);
|
||||||
if (rtp_packet_h264.Build(rtp_packet.Buffer().data(), rtp_packet.Size())) {
|
received_frame.SetReceivedTimestamp(clock_->CurrentTime().us());
|
||||||
rtp_packet_h264.GetFrameHeaderInfo();
|
received_frame.SetCapturedTimestamp(
|
||||||
bool is_missing_packet = ProcessH264RtpPacket(rtp_packet_h264);
|
(static_cast<int64_t>(complete_frame[0]->Timestamp()) /
|
||||||
if (!is_missing_packet) {
|
rtp::kMsToRtpTimestamp -
|
||||||
receive_side_congestion_controller_.OnReceivedPacket(
|
delta_ntp_internal_ms_) *
|
||||||
rtp_packet_received, MediaType::VIDEO);
|
1000);
|
||||||
nack_->OnReceivedPacket(rtp_packet.SequenceNumber(), true);
|
compelete_video_frame_queue_.push(received_frame);
|
||||||
} else {
|
|
||||||
nack_->OnReceivedPacket(rtp_packet.SequenceNumber(), false);
|
fwrite((unsigned char*)received_frame.Buffer(), 1,
|
||||||
|
received_frame.Size(), file_rtp_recv_);
|
||||||
|
|
||||||
|
delete[] nv12_data_;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RtpPacketH264 rtp_packet_h264;
|
||||||
|
// if (rtp_packet_h264.Build(rtp_packet.Buffer().data(), rtp_packet.Size()))
|
||||||
|
// {
|
||||||
|
// rtp_packet_h264.GetFrameHeaderInfo();
|
||||||
|
// bool is_missing_packet = ProcessH264RtpPacket(rtp_packet_h264);
|
||||||
|
// if (!is_missing_packet) {
|
||||||
|
// receive_side_congestion_controller_.OnReceivedPacket(
|
||||||
|
// rtp_packet_received, MediaType::VIDEO);
|
||||||
|
// nack_->OnReceivedPacket(rtp_packet.SequenceNumber(), true);
|
||||||
|
// } else {
|
||||||
|
// nack_->OnReceivedPacket(rtp_packet.SequenceNumber(), false);
|
||||||
|
// }
|
||||||
|
// }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,8 @@
|
|||||||
#include "log.h"
|
#include "log.h"
|
||||||
#include "rtc_base/network/sent_packet.h"
|
#include "rtc_base/network/sent_packet.h"
|
||||||
|
|
||||||
|
#define SAVE_RTP_SENT_STREAM
|
||||||
|
|
||||||
VideoChannelSend::VideoChannelSend(
|
VideoChannelSend::VideoChannelSend(
|
||||||
std::shared_ptr<SystemClock> clock, std::shared_ptr<IceAgent> ice_agent,
|
std::shared_ptr<SystemClock> clock, std::shared_ptr<IceAgent> ice_agent,
|
||||||
std::shared_ptr<PacketSender> packet_sender,
|
std::shared_ptr<PacketSender> packet_sender,
|
||||||
@@ -16,9 +18,24 @@ VideoChannelSend::VideoChannelSend(
|
|||||||
delta_ntp_internal_ms_(clock->CurrentNtpInMilliseconds() -
|
delta_ntp_internal_ms_(clock->CurrentNtpInMilliseconds() -
|
||||||
clock->CurrentTimeMs()),
|
clock->CurrentTimeMs()),
|
||||||
rtp_packet_history_(clock),
|
rtp_packet_history_(clock),
|
||||||
clock_(clock){};
|
clock_(clock) {
|
||||||
|
#ifdef SAVE_RTP_SENT_STREAM
|
||||||
|
file_rtp_sent_ = fopen("rtp_sent_stream.h264", "w+b");
|
||||||
|
if (!file_rtp_sent_) {
|
||||||
|
LOG_WARN("Fail to open rtp_sent_stream.h264");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
VideoChannelSend::~VideoChannelSend() {}
|
VideoChannelSend::~VideoChannelSend() {
|
||||||
|
#ifdef SAVE_RTP_SENT_STREAM
|
||||||
|
if (file_rtp_sent_) {
|
||||||
|
fflush(file_rtp_sent_);
|
||||||
|
fclose(file_rtp_sent_);
|
||||||
|
file_rtp_sent_ = nullptr;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
void VideoChannelSend::Initialize(rtp::PAYLOAD_TYPE payload_type) {
|
void VideoChannelSend::Initialize(rtp::PAYLOAD_TYPE payload_type) {
|
||||||
rtp_video_sender_ =
|
rtp_video_sender_ =
|
||||||
@@ -114,6 +131,12 @@ int VideoChannelSend::SendVideo(const EncodedFrame& encoded_frame) {
|
|||||||
rtp_packetizer_->Build((uint8_t*)encoded_frame.Buffer(),
|
rtp_packetizer_->Build((uint8_t*)encoded_frame.Buffer(),
|
||||||
(uint32_t)encoded_frame.Size(), rtp_timestamp,
|
(uint32_t)encoded_frame.Size(), rtp_timestamp,
|
||||||
true);
|
true);
|
||||||
|
|
||||||
|
#ifdef SAVE_RTP_SENT_STREAM
|
||||||
|
fwrite((unsigned char*)encoded_frame.Buffer(), 1, encoded_frame.Size(),
|
||||||
|
file_rtp_sent_);
|
||||||
|
#endif
|
||||||
|
|
||||||
packet_sender_->EnqueueRtpPacket(std::move(rtp_packets), rtp_timestamp);
|
packet_sender_->EnqueueRtpPacket(std::move(rtp_packets), rtp_timestamp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -78,6 +78,9 @@ class VideoChannelSend {
|
|||||||
std::shared_ptr<SystemClock> clock_;
|
std::shared_ptr<SystemClock> clock_;
|
||||||
RtpPacketHistory rtp_packet_history_;
|
RtpPacketHistory rtp_packet_history_;
|
||||||
int64_t delta_ntp_internal_ms_;
|
int64_t delta_ntp_internal_ms_;
|
||||||
|
|
||||||
|
private:
|
||||||
|
FILE* file_rtp_sent_ = nullptr;
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
Reference in New Issue
Block a user