[feat] receive and parse congestion control feedback supported

This commit is contained in:
dijunkun
2025-01-13 17:12:28 +08:00
parent 63ed77e43a
commit ba268016e4
15 changed files with 1112 additions and 7 deletions

View File

@@ -1,5 +1,201 @@
#include "congestion_control.h"
#include <algorithm>
#include <numeric>
#include <vector>
#include "log.h"
constexpr int64_t kLossUpdateInterval = 1000;
// Pacing-rate relative to our target send rate.
// Multiplicative factor that is applied to the target bitrate to calculate
// the number of bytes that can be transmitted per interval.
// Increasing this factor will result in lower delays in cases of bitrate
// overshoots from the encoder.
constexpr float kDefaultPaceMultiplier = 2.5f;
// If the probe result is far below the current throughput estimate
// it's unlikely that the probe is accurate, so we don't want to drop too far.
// However, if we actually are overusing, we want to drop to something slightly
// below the current throughput estimate to drain the network queues.
constexpr double kProbeDropThroughputFraction = 0.85;
CongestionControl::CongestionControl() {}
CongestionControl::~CongestionControl() {}
CongestionControl::~CongestionControl() {}
NetworkControlUpdate CongestionControl::OnTransportPacketsFeedback(
TransportPacketsFeedback report) {
if (report.packet_feedbacks.empty()) {
// TODO(bugs.webrtc.org/10125): Design a better mechanism to safe-guard
// against building very large network queues.
return NetworkControlUpdate();
}
if (congestion_window_pushback_controller_) {
congestion_window_pushback_controller_->UpdateOutstandingData(
report.data_in_flight);
}
int64_t max_feedback_rtt = std::numeric_limits<int64_t>::min();
int64_t min_propagation_rtt = std::numeric_limits<int64_t>::max();
int64_t max_recv_time = std::numeric_limits<int64_t>::min();
std::vector<PacketResult> feedbacks = report.ReceivedWithSendInfo();
for (const auto& feedback : feedbacks)
max_recv_time = std::max(max_recv_time, feedback.receive_time);
for (const auto& feedback : feedbacks) {
int64_t feedback_rtt =
report.feedback_time - feedback.sent_packet.send_time;
int64_t min_pending_time = max_recv_time - feedback.receive_time;
int64_t propagation_rtt = feedback_rtt - min_pending_time;
max_feedback_rtt = std::max(max_feedback_rtt, feedback_rtt);
min_propagation_rtt = std::min(min_propagation_rtt, propagation_rtt);
}
if (max_feedback_rtt != std::numeric_limits<int64_t>::min() &&
min_propagation_rtt != std::numeric_limits<int64_t>::max()) {
feedback_max_rtts_.push_back(max_feedback_rtt);
const size_t kMaxFeedbackRttWindow = 32;
if (feedback_max_rtts_.size() > kMaxFeedbackRttWindow)
feedback_max_rtts_.pop_front();
// TODO(srte): Use time since last unacknowledged packet.
// bandwidth_estimation_->UpdatePropagationRtt(report.feedback_time,
// min_propagation_rtt);
}
if (packet_feedback_only_) {
if (!feedback_max_rtts_.empty()) {
int64_t sum_rtt_ms =
std::accumulate(feedback_max_rtts_.begin(), feedback_max_rtts_.end(),
static_cast<int64_t>(0));
// int64_t mean_rtt_ms = sum_rtt_ms / feedback_max_rtts_.size();
// if (delay_based_bwe_) delay_based_bwe_->OnRttUpdate(mean_rtt_ms);
}
int64_t feedback_min_rtt = std::numeric_limits<int64_t>::max();
for (const auto& packet_feedback : feedbacks) {
int64_t pending_time = max_recv_time - packet_feedback.receive_time;
int64_t rtt = report.feedback_time -
packet_feedback.sent_packet.send_time - pending_time;
// Value used for predicting NACK round trip time in FEC controller.
feedback_min_rtt = std::min(rtt, feedback_min_rtt);
}
if (feedback_min_rtt != std::numeric_limits<int64_t>::max() &&
feedback_min_rtt != std::numeric_limits<int64_t>::min()) {
// bandwidth_estimation_->UpdateRtt(feedback_min_rtt,
// report.feedback_time);
}
expected_packets_since_last_loss_update_ +=
report.PacketsWithFeedback().size();
for (const auto& packet_feedback : report.PacketsWithFeedback()) {
if (!packet_feedback.IsReceived())
lost_packets_since_last_loss_update_ += 1;
}
if (report.feedback_time > next_loss_update_) {
next_loss_update_ = report.feedback_time + kLossUpdateInterval;
// bandwidth_estimation_->UpdatePacketsLost(
// lost_packets_since_last_loss_update_,
// expected_packets_since_last_loss_update_, report.feedback_time);
expected_packets_since_last_loss_update_ = 0;
lost_packets_since_last_loss_update_ = 0;
}
}
// std::optional<int64_t> alr_start_time =
// alr_detector_->GetApplicationLimitedRegionStartTime();
// if (previously_in_alr_ && !alr_start_time.has_value()) {
// int64_t now_ms = report.feedback_time;
// acknowledged_bitrate_estimator_->SetAlrEndedTime(report.feedback_time);
// probe_controller_->SetAlrEndedTimeMs(now_ms);
// }
// previously_in_alr_ = alr_start_time.has_value();
// acknowledged_bitrate_estimator_->IncomingPacketFeedbackVector(
// report.SortedByReceiveTime());
// auto acknowledged_bitrate = acknowledged_bitrate_estimator_->bitrate();
// bandwidth_estimation_->SetAcknowledgedRate(acknowledged_bitrate,
// report.feedback_time);
for (const auto& feedback : report.SortedByReceiveTime()) {
if (feedback.sent_packet.pacing_info.probe_cluster_id !=
PacedPacketInfo::kNotAProbe) {
// probe_bitrate_estimator_->HandleProbeAndEstimateBitrate(feedback);
}
}
// if (network_estimator_) {
// network_estimator_->OnTransportPacketsFeedback(report);
// // SetNetworkStateEstimate(network_estimator_->GetCurrentEstimate());
// }
// std::optional<int64_t> probe_bitrate =
// probe_bitrate_estimator_->FetchAndResetLastEstimatedBitrate();
// if (ignore_probes_lower_than_network_estimate_ && probe_bitrate &&
// estimate_ && *probe_bitrate < delay_based_bwe_->last_estimate() &&
// *probe_bitrate < estimate_->link_capacity_lower) {
// probe_bitrate.reset();
// }
// if (limit_probes_lower_than_throughput_estimate_ && probe_bitrate &&
// acknowledged_bitrate) {
// Limit the backoff to something slightly below the acknowledged
// bitrate. ("Slightly below" because we want to drain the queues
// if we are actually overusing.)
// The acknowledged bitrate shouldn't normally be higher than the delay
// based estimate, but it could happen e.g. due to packet bursts or
// encoder overshoot. We use std::min to ensure that a probe result
// below the current BWE never causes an increase.
// int64_t limit =
// std::min(delay_based_bwe_->last_estimate(),
// *acknowledged_bitrate * kProbeDropThroughputFraction);
// probe_bitrate = std::max(*probe_bitrate, limit);
// }
NetworkControlUpdate update;
bool recovered_from_overuse = false;
// DelayBasedBwe::Result result;
// result = delay_based_bwe_->IncomingPacketFeedbackVector(
// report, acknowledged_bitrate, probe_bitrate, estimate_,
// alr_start_time.has_value());
// if (result.updated) {
// if (result.probe) {
// bandwidth_estimation_->SetSendBitrate(result.target_bitrate,
// report.feedback_time);
// }
// Since SetSendBitrate now resets the delay-based estimate, we have to
// call UpdateDelayBasedEstimate after SetSendBitrate.
// bandwidth_estimation_->UpdateDelayBasedEstimate(report.feedback_time,
// result.target_bitrate);
// }
// bandwidth_estimation_->UpdateLossBasedEstimator(
// report, result.delay_detector_state, probe_bitrate,
// alr_start_time.has_value());
// if (result.updated) {
// // Update the estimate in the ProbeController, in case we want to probe.
// MaybeTriggerOnNetworkChanged(&update, report.feedback_time);
// }
// recovered_from_overuse = result.recovered_from_overuse;
// if (recovered_from_overuse) {
// probe_controller_->SetAlrStartTimeMs(alr_start_time);
// auto probes = probe_controller_->RequestProbe(report.feedback_time);
// update.probe_cluster_configs.insert(update.probe_cluster_configs.end(),
// probes.begin(), probes.end());
// }
// No valid RTT could be because send-side BWE isn't used, in which case
// we don't try to limit the outstanding packets.
// if (rate_control_settings_.UseCongestionWindow() &&
// max_feedback_rtt.IsFinite()) {
// UpdateCongestionWindowSize();
// }
if (congestion_window_pushback_controller_ && current_data_window_) {
congestion_window_pushback_controller_->SetDataWindow(
*current_data_window_);
} else {
update.congestion_window = current_data_window_;
}
return update;
}