diff --git a/Cargo.lock b/Cargo.lock index 46fce5c0..e184fce1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2643,15 +2643,23 @@ dependencies = [ "atomic_refcell", "bitstream-io", "chrono", + "futures", + "gio", "gst-plugin-version-helper", "gstreamer", "gstreamer-app", + "gstreamer-base", "gstreamer-check", + "gstreamer-net", "gstreamer-rtp", + "gstreamer-video", + "log", "once_cell", "rand", + "rtcp-types", "rtp-types", "smallvec", + "tokio", ] [[package]] @@ -5397,6 +5405,14 @@ dependencies = [ "winapi", ] +[[package]] +name = "rtcp-types" +version = "0.0.1" +source = "git+https://github.com/ystreet/rtcp-types#c1da8a1a193a0c02d798fea5f16863b69abd9000" +dependencies = [ + "thiserror", +] + [[package]] name = "rtp-types" version = "0.1.1" diff --git a/docs/plugins/gst_plugins_cache.json b/docs/plugins/gst_plugins_cache.json index 755ba1c2..97e9c703 100644 --- a/docs/plugins/gst_plugins_cache.json +++ b/docs/plugins/gst_plugins_cache.json @@ -6304,6 +6304,141 @@ }, "rank": "marginal" }, + "rtpbin2": { + "author": "Matthew Waters ", + "description": "RTP sessions management", + "hierarchy": [ + "GstRtpBin2", + "GstElement", + "GstObject", + "GInitiallyUnowned", + "GObject" + ], + "klass": "Network/RTP/Filter", + "pad-templates": { + "rtcp_recv_sink_%%u": { + "caps": "application/x-rtcp:\n", + "direction": "sink", + "presence": "request" + }, + "rtcp_send_src_%%u": { + "caps": "application/x-rtcp:\n", + "direction": "src", + "presence": "request" + }, + "rtp_recv_sink_%%u": { + "caps": "application/x-rtp:\n", + "direction": "sink", + "presence": "request" + }, + "rtp_recv_src_%%u_%%u_%%u": { + "caps": "application/x-rtp:\n", + "direction": "src", + "presence": "sometimes" + }, + "rtp_send_sink_%%u": { + "caps": "application/x-rtp:\n", + "direction": "sink", + "presence": "request" + }, + "rtp_send_src_%%u": { + "caps": "application/x-rtp:\n", + "direction": "src", + "presence": "sometimes" + } + }, + "properties": { + "latency": { + "blurb": "Amount of ms to buffer", + "conditionally-available": false, + "construct": false, + "construct-only": false, + "controllable": false, + "default": "200", + "max": "-1", + "min": "0", + "mutable": "ready", + "readable": true, + "type": "guint", + "writable": true + }, + "min-rtcp-interval": { + "blurb": "Minimum time (in ms) between RTCP reports", + "conditionally-available": false, + "construct": false, + "construct-only": false, + "controllable": false, + "default": "5000", + "max": "-1", + "min": "0", + "mutable": "ready", + "readable": true, + "type": "guint", + "writable": true + }, + "reduced-size-rtcp": { + "blurb": "Use reduced size RTCP. Only has an effect if rtp-profile=avpf", + "conditionally-available": false, + "construct": false, + "construct-only": false, + "controllable": false, + "default": "false", + "mutable": "ready", + "readable": true, + "type": "gboolean", + "writable": true + }, + "rtp-profile": { + "blurb": "RTP Profile to use", + "conditionally-available": false, + "construct": false, + "construct-only": false, + "controllable": false, + "default": "avp (0)", + "mutable": "ready", + "readable": true, + "type": "GstRtpBin2Profile", + "writable": true + }, + "stats": { + "blurb": "Statistics about the session", + "conditionally-available": false, + "construct": false, + "construct-only": false, + "controllable": false, + "mutable": "null", + "readable": true, + "type": "guint", + "writable": false + }, + "timestamping-mode": { + "blurb": "Govern how to pick presentation timestamps for packets", + "conditionally-available": false, + "construct": false, + "construct-only": false, + "controllable": false, + "default": "skew (2)", + "mutable": "ready", + "readable": true, + "type": "GstRtpBin2TimestampingMode", + "writable": true + } + }, + "rank": "none", + "signals": { + "get-session": { + "action": true, + "args": [ + { + "name": "arg0", + "type": "guint" + } + ], + "return-type": "GstRtpBin2Session", + "when": "last" + } + } + }, "rtpgccbwe": { "author": "Thibault Saunier ", "description": "Estimates current network bandwidth using the Google Congestion Control algorithm notifying about it through the 'bitrate' property", diff --git a/net/rtp/Cargo.toml b/net/rtp/Cargo.toml index 7361a4ea..681a69a9 100644 --- a/net/rtp/Cargo.toml +++ b/net/rtp/Cargo.toml @@ -12,12 +12,21 @@ rust-version.workspace = true atomic_refcell = "0.1" bitstream-io = "2.1" chrono = { version = "0.4", default-features = false } -gst = { workspace = true, features = ["v1_20"] } -gst-rtp = { workspace = true, features = ["v1_20"] } +futures = "0.3" +gio.workspace = true +gst = { workspace = true, features = ["v1_20"] } +gst-base = { workspace = true, features = ["v1_20"] } +gst-net = { workspace = true, features = ["v1_20"] } +gst-rtp = { workspace = true, features = ["v1_20"] } +gst-video = { workspace = true, features = ["v1_20"] } +log = "0.4" once_cell.workspace = true rand = { version = "0.8", default-features = false, features = ["std", "std_rng" ] } rtp-types = { version = "0.1" } +rtcp-types = { git = "https://github.com/ystreet/rtcp-types", version = "0.0" } smallvec = { version = "1.11", features = ["union", "write", "const_generics", "const_new"] } +# TODO: experiment with other async executors (mio, async-std, etc) +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "time", "sync"] } [dev-dependencies] gst-check = { workspace = true, features = ["v1_20"] } @@ -48,4 +57,4 @@ versioning = false import_library = false [package.metadata.capi.pkg_config] -requires_private = "gstreamer-1.0, gstreamer-base-1.0, gstreamer-rtp-1.0, gobject-2.0, glib-2.0, gmodule-2.0" +requires_private = "gstreamer-1.0, gstreamer-base-1.0, gstreamer-rtp-1.0, gstreamer-net-1.0, gstreamer-video-1.0 gobject-2.0, glib-2.0, gmodule-2.0, gio-2.0" diff --git a/net/rtp/src/lib.rs b/net/rtp/src/lib.rs index 7d263ae3..9c2d3fea 100644 --- a/net/rtp/src/lib.rs +++ b/net/rtp/src/lib.rs @@ -14,9 +14,15 @@ * * Since: plugins-rs-0.9.0 */ + +#[macro_use] +extern crate log; + use gst::glib; mod gcc; +mod rtpbin2; +mod utils; mod audio_discont; mod baseaudiopay; @@ -32,6 +38,7 @@ mod tests; fn plugin_init(plugin: &gst::Plugin) -> Result<(), glib::BoolError> { gcc::register(plugin)?; + rtpbin2::register(plugin)?; #[cfg(feature = "doc")] { @@ -68,3 +75,14 @@ gst::plugin_define!( env!("CARGO_PKG_REPOSITORY"), env!("BUILD_REL_DATE") ); + +#[cfg(test)] +pub(crate) fn test_init() { + use std::sync::Once; + static INIT: Once = Once::new(); + + INIT.call_once(|| { + gst::init().unwrap(); + plugin_register_static().expect("rtp plugin test"); + }); +} diff --git a/net/rtp/src/rtpbin2/config.rs b/net/rtp/src/rtpbin2/config.rs new file mode 100644 index 00000000..f8579bba --- /dev/null +++ b/net/rtp/src/rtpbin2/config.rs @@ -0,0 +1,318 @@ +// SPDX-License-Identifier: MPL-2.0 + +use gst::glib; +use gst::prelude::*; +use gst::subclass::prelude::*; +use once_cell::sync::Lazy; +use std::sync::{Mutex, Weak}; + +use crate::rtpbin2::internal::SharedSessionInner; + +static CAT: Lazy = Lazy::new(|| { + gst::DebugCategory::new( + "rtp2-config", + gst::DebugColorFlags::empty(), + Some("Rtp2 config"), + ) +}); + +glib::wrapper! { + pub struct Rtp2Session(ObjectSubclass); +} + +impl Rtp2Session { + pub(crate) fn new(weak_session: Weak>) -> Self { + let ret = glib::Object::new::(); + let imp = ret.imp(); + imp.set_session(weak_session); + ret + } +} + +mod imp { + use std::sync::Arc; + + use super::*; + + #[derive(Debug, Default)] + struct State { + pub(super) weak_session: Option>>, + } + + #[derive(Debug, Default)] + pub struct Rtp2Session { + state: Mutex, + } + + impl Rtp2Session { + pub(super) fn set_session(&self, weak_session: Weak>) { + let mut state = self.state.lock().unwrap(); + state.weak_session = Some(weak_session); + } + + fn session(&self) -> Option>> { + self.state + .lock() + .unwrap() + .weak_session + .as_ref() + .unwrap() + .upgrade() + } + + pub fn set_pt_map(&self, pt_map: Option) { + let Some(session) = self.session() else { + return; + }; + let mut session = session.lock().unwrap(); + session.clear_pt_map(); + let Some(pt_map) = pt_map else { + return; + }; + + for (key, value) in pt_map.iter() { + let Ok(pt) = key.parse::() else { + gst::warning!(CAT, "failed to parse key as a pt"); + continue; + }; + if let Ok(caps) = value.get::() { + session.add_caps(caps); + } else { + gst::warning!(CAT, "{pt} does not contain a caps value"); + continue; + } + } + } + + pub fn pt_map(&self) -> gst::Structure { + let mut ret = gst::Structure::builder("application/x-rtp2-pt-map"); + let Some(session) = self.session() else { + return ret.build(); + }; + let session = session.lock().unwrap(); + + for (pt, caps) in session.pt_map() { + ret = ret.field(pt.to_string(), caps); + } + + ret.build() + } + + pub fn stats(&self) -> Option { + let Some(session) = self.session() else { + return None; + }; + let session = session.lock().unwrap(); + Some(session.stats()) + } + } + + #[glib::object_subclass] + impl ObjectSubclass for Rtp2Session { + const NAME: &'static str = "GstRtp2Session"; + type Type = super::Rtp2Session; + type ParentType = glib::Object; + } + + impl ObjectImpl for Rtp2Session { + fn properties() -> &'static [glib::ParamSpec] { + static PROPERTIES: Lazy> = Lazy::new(|| { + vec![glib::ParamSpecBoxed::builder::("pt-map") + .nick("RTP Payload Type Map") + .blurb("Mapping of RTP payload type to caps") + .build()] + }); + + PROPERTIES.as_ref() + } + + fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value { + match pspec.name() { + "pt-map" => self.pt_map().to_value(), + "stats" => self.stats().to_value(), + _ => unreachable!(), + } + } + + fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) { + match pspec.name() { + "pt-map" => self.set_pt_map( + value + .get::>() + .expect("Type checked upstream"), + ), + _ => unreachable!(), + } + } + + fn signals() -> &'static [glib::subclass::Signal] { + static SIGNALS: Lazy> = Lazy::new(|| { + vec![ + glib::subclass::Signal::builder("new-ssrc") + .param_types([u32::static_type()]) + .build(), + glib::subclass::Signal::builder("bye-ssrc") + .param_types([u32::static_type()]) + .build(), + ] + }); + + SIGNALS.as_ref() + } + } +} + +#[cfg(test)] +mod tests { + use std::sync::{ + atomic::{AtomicBool, AtomicUsize}, + Arc, + }; + + use crate::{rtpbin2::session::tests::generate_rtp_packet, test_init}; + + use super::*; + + static ELEMENT_COUNTER: AtomicUsize = AtomicUsize::new(0); + + fn next_element_counter() -> usize { + ELEMENT_COUNTER.fetch_add(1, std::sync::atomic::Ordering::SeqCst) + } + + #[test] + fn pt_map_get_empty() { + test_init(); + let id = next_element_counter(); + let rtpbin2 = gst::ElementFactory::make("rtpsend") + .property("rtp-id", id.to_string()) + .build() + .unwrap(); + let _pad = rtpbin2.request_pad_simple("rtp_sink_0").unwrap(); + let session = rtpbin2.emit_by_name::("get-session", &[&0u32]); + let pt_map = session.property::("pt-map"); + assert!(pt_map.has_name("application/x-rtp2-pt-map")); + assert_eq!(pt_map.fields().len(), 0); + } + + #[test] + fn pt_map_set() { + test_init(); + let id = next_element_counter(); + let rtpbin2 = gst::ElementFactory::make("rtpsend") + .property("rtp-id", id.to_string()) + .build() + .unwrap(); + let _pad = rtpbin2.request_pad_simple("rtp_sink_0").unwrap(); + let session = rtpbin2.emit_by_name::("get-session", &[&0u32]); + let pt = 96i32; + let pt_caps = gst::Caps::builder("application/x-rtp") + .field("payload", pt) + .field("clock-rate", 90000i32) + .build(); + let pt_map = gst::Structure::builder("application/x-rtp2-pt-map") + .field(pt.to_string(), pt_caps.clone()) + .build(); + session.set_property("pt-map", pt_map); + let prop = session.property::("pt-map"); + assert!(prop.has_name("application/x-rtp2-pt-map")); + assert_eq!(prop.fields().len(), 1); + let caps = prop.get::(pt.to_string()).unwrap(); + assert_eq!(pt_caps, caps); + } + + #[test] + fn pt_map_set_none() { + test_init(); + let id = next_element_counter(); + let rtpbin2 = gst::ElementFactory::make("rtpsend") + .property("rtp-id", id.to_string()) + .build() + .unwrap(); + let _pad = rtpbin2.request_pad_simple("rtp_sink_0").unwrap(); + let session = rtpbin2.emit_by_name::("get-session", &[&0u32]); + session.set_property("pt-map", None::); + let prop = session.property::("pt-map"); + assert!(prop.has_name("application/x-rtp2-pt-map")); + } + + #[test] + fn new_send_ssrc() { + test_init(); + let ssrc = 0x12345678; + let new_ssrc_hit = Arc::new(AtomicBool::new(false)); + let id = next_element_counter(); + let rtpbin2 = gst::ElementFactory::make("rtpsend") + .property("rtp-id", id.to_string()) + .build() + .unwrap(); + let mut h = + gst_check::Harness::with_element(&rtpbin2, Some("rtp_sink_0"), Some("rtp_src_0")); + let session = h + .element() + .unwrap() + .emit_by_name::("get-session", &[&0u32]); + let ssrc_hit = new_ssrc_hit.clone(); + session.connect("new-ssrc", false, move |args| { + let new_ssrc = args[1].get::().unwrap(); + ssrc_hit.store(true, std::sync::atomic::Ordering::SeqCst); + assert_eq!(new_ssrc, ssrc); + None + }); + h.set_src_caps_str("application/x-rtp,payload=96,clock-rate=90000"); + let mut segment = gst::Segment::new(); + segment.set_format(gst::Format::Time); + h.push_event(gst::event::Segment::builder(&segment).build()); + let buf1 = gst::Buffer::from_mut_slice(generate_rtp_packet(ssrc, 0x34, 0x10, 16)); + h.push(buf1.clone()).unwrap(); + assert!(new_ssrc_hit.load(std::sync::atomic::Ordering::SeqCst)); + let buf2 = gst::Buffer::from_mut_slice(generate_rtp_packet(ssrc, 0x35, 0x10, 16)); + h.push(buf2.clone()).unwrap(); + + let buf3 = h.pull().unwrap(); + assert_eq!(buf3, buf1); + let buf4 = h.pull().unwrap(); + assert_eq!(buf4, buf2); + } + + #[test] + fn bye_send_ssrc() { + test_init(); + let ssrc = 0x12345678; + let (bye_ssrc_sender, bye_ssrc_receiver) = std::sync::mpsc::sync_channel(16); + let id = next_element_counter(); + let rtpbin2 = gst::ElementFactory::make("rtpsend") + .property("rtp-id", id.to_string()) + .build() + .unwrap(); + let mut h = + gst_check::Harness::with_element(&rtpbin2, Some("rtp_sink_0"), Some("rtp_src_0")); + let mut h_rtcp = gst_check::Harness::with_element(&rtpbin2, None, Some("rtcp_src_0")); + let session = h + .element() + .unwrap() + .emit_by_name::("get-session", &[&0u32]); + session.connect("bye-ssrc", false, move |args| { + let bye_ssrc = args[1].get::().unwrap(); + assert_eq!(bye_ssrc, ssrc); + bye_ssrc_sender.send(ssrc).unwrap(); + None + }); + h.set_src_caps_str("application/x-rtp,payload=96,clock-rate=90000"); + let mut segment = gst::Segment::new(); + segment.set_format(gst::Format::Time); + h.push_event(gst::event::Segment::builder(&segment).build()); + let buf1 = gst::Buffer::from_mut_slice(generate_rtp_packet(ssrc, 0x34, 0x10, 16)); + h.push(buf1.clone()).unwrap(); + let buf2 = gst::Buffer::from_mut_slice(generate_rtp_packet(ssrc, 0x35, 0x10, 16)); + h.push(buf2.clone()).unwrap(); + + let buf3 = h.pull().unwrap(); + assert_eq!(buf3, buf1); + let buf4 = h.pull().unwrap(); + assert_eq!(buf4, buf2); + + h.push_event(gst::event::Eos::builder().build()); + let _rtcp = h_rtcp.pull().unwrap(); + assert_eq!(bye_ssrc_receiver.recv().unwrap(), ssrc); + } +} diff --git a/net/rtp/src/rtpbin2/internal.rs b/net/rtp/src/rtpbin2/internal.rs new file mode 100644 index 00000000..033311a8 --- /dev/null +++ b/net/rtp/src/rtpbin2/internal.rs @@ -0,0 +1,451 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, + task::Waker, + time::Duration, +}; + +use gst::glib; +use once_cell::sync::{Lazy, OnceCell}; + +use super::config::Rtp2Session; +use super::session::{RtpProfile, Session}; +use super::source::ReceivedRb; + +static CAT: Lazy = Lazy::new(|| { + gst::DebugCategory::new( + "rtpinternalsession", + gst::DebugColorFlags::empty(), + Some("RTP Session (internal)"), + ) +}); + +static SHARED_RTP_STATE: OnceCell>> = OnceCell::new(); + +#[derive(Debug, Clone)] +pub struct SharedRtpState { + name: String, + inner: Arc>, +} + +#[derive(Debug)] +struct SharedRtpStateInner { + sessions: HashMap, + send_outstanding: bool, + recv_outstanding: bool, +} + +impl SharedRtpState { + pub fn recv_get_or_init(name: String) -> Self { + SHARED_RTP_STATE + .get_or_init(|| Mutex::new(HashMap::new())) + .lock() + .unwrap() + .entry(name) + .and_modify(|v| { + v.inner.lock().unwrap().recv_outstanding = true; + }) + .or_insert_with_key(|name| SharedRtpState { + name: name.to_owned(), + inner: Arc::new(Mutex::new(SharedRtpStateInner { + sessions: HashMap::new(), + send_outstanding: false, + recv_outstanding: true, + })), + }) + .clone() + } + + pub fn send_get_or_init(name: String) -> Self { + SHARED_RTP_STATE + .get_or_init(|| Mutex::new(HashMap::new())) + .lock() + .unwrap() + .entry(name) + .and_modify(|v| { + v.inner.lock().unwrap().send_outstanding = true; + }) + .or_insert_with_key(|name| SharedRtpState { + name: name.to_owned(), + inner: Arc::new(Mutex::new(SharedRtpStateInner { + sessions: HashMap::new(), + send_outstanding: true, + recv_outstanding: false, + })), + }) + .clone() + } + + pub fn name(&self) -> &str { + &self.name + } + + pub fn unmark_send_outstanding(&self) { + let mut inner = self.inner.lock().unwrap(); + inner.send_outstanding = false; + if !inner.recv_outstanding { + Self::remove_from_global(&self.name); + } + } + + pub fn unmark_recv_outstanding(&self) { + let mut inner = self.inner.lock().unwrap(); + inner.recv_outstanding = false; + if !inner.send_outstanding { + Self::remove_from_global(&self.name); + } + } + + fn remove_from_global(name: &str) { + let _shared = SHARED_RTP_STATE.get().unwrap().lock().unwrap().remove(name); + } + + pub fn session_get_or_init(&self, id: usize, f: F) -> SharedSession + where + F: FnOnce() -> SharedSession, + { + self.inner + .lock() + .unwrap() + .sessions + .entry(id) + .or_insert_with(f) + .clone() + } +} + +#[derive(Debug, Clone)] +pub struct SharedSession { + pub(crate) id: usize, + pub(crate) inner: Arc>, + pub(crate) config: Rtp2Session, +} + +impl SharedSession { + pub fn new( + id: usize, + profile: RtpProfile, + min_rtcp_interval: Duration, + reduced_size_rtcp: bool, + ) -> Self { + let mut inner = SharedSessionInner::new(id); + inner.session.set_min_rtcp_interval(min_rtcp_interval); + inner.session.set_profile(profile); + inner.session.set_reduced_size_rtcp(reduced_size_rtcp); + let inner = Arc::new(Mutex::new(inner)); + let weak_inner = Arc::downgrade(&inner); + Self { + id, + inner, + config: Rtp2Session::new(weak_inner), + } + } +} + +#[derive(Debug)] +pub(crate) struct SharedSessionInner { + id: usize, + + pub(crate) session: Session, + + pub(crate) pt_map: HashMap, + + pub(crate) rtcp_waker: Option, + pub(crate) rtp_send_sinkpad: Option, +} + +impl SharedSessionInner { + fn new(id: usize) -> Self { + Self { + id, + + session: Session::new(), + + pt_map: HashMap::default(), + rtcp_waker: None, + rtp_send_sinkpad: None, + } + } + + pub fn clear_pt_map(&mut self) { + self.pt_map.clear(); + } + + pub fn add_caps(&mut self, caps: gst::Caps) { + let Some((pt, clock_rate)) = pt_clock_rate_from_caps(&caps) else { + return; + }; + let caps_clone = caps.clone(); + self.pt_map + .entry(pt) + .and_modify(move |entry| *entry = caps) + .or_insert_with(move || caps_clone); + self.session.set_pt_clock_rate(pt, clock_rate); + } + + pub(crate) fn caps_from_pt(&self, pt: u8) -> gst::Caps { + self.pt_map.get(&pt).cloned().unwrap_or( + gst::Caps::builder("application/x-rtp") + .field("payload", pt as i32) + .build(), + ) + } + + pub fn pt_map(&self) -> impl Iterator + '_ { + self.pt_map.iter().map(|(&k, v)| (k, v)) + } + + pub fn stats(&self) -> gst::Structure { + let mut session_stats = gst::Structure::builder("application/x-rtpbin2-session-stats") + .field("id", self.id as u64); + for ssrc in self.session.ssrcs() { + if let Some(ls) = self.session.local_send_source_by_ssrc(ssrc) { + let mut source_stats = + gst::Structure::builder("application/x-rtpbin2-source-stats") + .field("ssrc", ls.ssrc()) + .field("sender", true) + .field("local", true) + .field("packets-sent", ls.packet_count()) + .field("octets-sent", ls.octet_count()) + .field("bitrate", ls.bitrate() as u64); + if let Some(pt) = ls.payload_type() { + if let Some(clock_rate) = self.session.clock_rate_from_pt(pt) { + source_stats = source_stats.field("clock-rate", clock_rate); + } + } + if let Some(sr) = ls.last_sent_sr() { + source_stats = source_stats + .field("sr-ntptime", sr.ntp_timestamp().as_u64()) + .field("sr-rtptime", sr.rtp_timestamp()) + .field("sr-octet-count", sr.octet_count()) + .field("sr-packet-count", sr.packet_count()); + } + let rbs = gst::List::new(ls.received_report_blocks().map( + |(sender_ssrc, ReceivedRb { rb, .. })| { + gst::Structure::builder("application/x-rtcp-report-block") + .field("sender-ssrc", sender_ssrc) + .field("rb-fraction-lost", rb.fraction_lost()) + .field("rb-packets-lost", rb.cumulative_lost()) + .field("rb-extended_sequence_number", rb.extended_sequence_number()) + .field("rb-jitter", rb.jitter()) + .field("rb-last-sr-ntp-time", rb.last_sr_ntp_time()) + .field("rb-delay_since_last-sr-ntp-time", rb.delay_since_last_sr()) + .build() + }, + )); + match rbs.len() { + 0 => (), + 1 => { + source_stats = + source_stats.field("report-blocks", rbs.first().unwrap().clone()); + } + _ => { + source_stats = source_stats.field("report-blocks", rbs); + } + } + + // TODO: add jitter, packets-lost + session_stats = session_stats.field(ls.ssrc().to_string(), source_stats.build()); + } else if let Some(lr) = self.session.local_receive_source_by_ssrc(ssrc) { + let mut source_stats = + gst::Structure::builder("application/x-rtpbin2-source-stats") + .field("ssrc", lr.ssrc()) + .field("sender", false) + .field("local", true); + if let Some(pt) = lr.payload_type() { + if let Some(clock_rate) = self.session.clock_rate_from_pt(pt) { + source_stats = source_stats.field("clock-rate", clock_rate); + } + } + // TODO: add rb stats + session_stats = session_stats.field(lr.ssrc().to_string(), source_stats.build()); + } else if let Some(rs) = self.session.remote_send_source_by_ssrc(ssrc) { + let mut source_stats = + gst::Structure::builder("application/x-rtpbin2-source-stats") + .field("ssrc", rs.ssrc()) + .field("sender", true) + .field("local", false) + .field("octets-received", rs.octet_count()) + .field("packets-received", rs.packet_count()) + .field("bitrate", rs.bitrate() as u64) + .field("jitter", rs.jitter()) + .field("packets-lost", rs.packets_lost()); + if let Some(pt) = rs.payload_type() { + if let Some(clock_rate) = self.session.clock_rate_from_pt(pt) { + source_stats = source_stats.field("clock-rate", clock_rate); + } + } + if let Some(rtp_from) = rs.rtp_from() { + source_stats = source_stats.field("rtp-from", rtp_from.to_string()); + } + if let Some(rtcp_from) = rs.rtcp_from() { + source_stats = source_stats.field("rtcp-from", rtcp_from.to_string()); + } + if let Some(sr) = rs.last_received_sr() { + source_stats = source_stats + .field("sr-ntptime", sr.ntp_timestamp().as_u64()) + .field("sr-rtptime", sr.rtp_timestamp()) + .field("sr-octet-count", sr.octet_count()) + .field("sr-packet-count", sr.packet_count()); + } + if let Some(rb) = rs.last_sent_rb() { + source_stats = source_stats + .field("sent-rb-fraction-lost", rb.fraction_lost()) + .field("sent-rb-packets-lost", rb.cumulative_lost()) + .field( + "sent-rb-extended-sequence-number", + rb.extended_sequence_number(), + ) + .field("sent-rb-jitter", rb.jitter()) + .field("sent-rb-last-sr-ntp-time", rb.last_sr_ntp_time()) + .field( + "sent-rb-delay-since-last-sr-ntp-time", + rb.delay_since_last_sr(), + ); + } + let rbs = gst::List::new(rs.received_report_blocks().map( + |(sender_ssrc, ReceivedRb { rb, .. })| { + gst::Structure::builder("application/x-rtcp-report-block") + .field("sender-ssrc", sender_ssrc) + .field("rb-fraction-lost", rb.fraction_lost()) + .field("rb-packets-lost", rb.cumulative_lost()) + .field("rb-extended_sequence_number", rb.extended_sequence_number()) + .field("rb-jitter", rb.jitter()) + .field("rb-last-sr-ntp-time", rb.last_sr_ntp_time()) + .field("rb-delay_since_last-sr-ntp-time", rb.delay_since_last_sr()) + .build() + }, + )); + match rbs.len() { + 0 => (), + 1 => { + source_stats = + source_stats.field("report-blocks", rbs.first().unwrap().clone()); + } + _ => { + source_stats = source_stats.field("report-blocks", rbs); + } + } + session_stats = session_stats.field(rs.ssrc().to_string(), source_stats.build()); + } else if let Some(rr) = self.session.remote_receive_source_by_ssrc(ssrc) { + let source_stats = gst::Structure::builder("application/x-rtpbin2-source-stats") + .field("ssrc", rr.ssrc()) + .field("sender", false) + .field("local", false) + .build(); + session_stats = session_stats.field(rr.ssrc().to_string(), source_stats); + } + } + + session_stats.build() + } +} + +pub fn pt_clock_rate_from_caps(caps: &gst::CapsRef) -> Option<(u8, u32)> { + let Some(s) = caps.structure(0) else { + gst::debug!(CAT, "no structure!"); + return None; + }; + let Some((clock_rate, pt)) = Option::zip( + s.get::("clock-rate").ok(), + s.get::("payload").ok(), + ) else { + gst::debug!( + CAT, + "could not retrieve clock-rate and/or payload from structure" + ); + return None; + }; + if (0..=127).contains(&pt) && clock_rate > 0 { + Some((pt as u8, clock_rate as u32)) + } else { + gst::debug!( + CAT, + "payload value {pt} out of bounds or clock-rate {clock_rate} out of bounds" + ); + None + } +} + +static RUST_CAT: Lazy = Lazy::new(|| { + gst::DebugCategory::new( + "rust-log", + gst::DebugColorFlags::empty(), + Some("Logs from rust crates"), + ) +}); + +static GST_RUST_LOGGER_ONCE: once_cell::sync::OnceCell<()> = once_cell::sync::OnceCell::new(); +static GST_RUST_LOGGER: GstRustLogger = GstRustLogger {}; + +pub(crate) struct GstRustLogger {} + +impl GstRustLogger { + pub fn install() { + GST_RUST_LOGGER_ONCE.get_or_init(|| { + if log::set_logger(&GST_RUST_LOGGER).is_err() { + gst::warning!( + RUST_CAT, + "Cannot install log->gst logger, already installed?" + ); + } else { + log::set_max_level(GstRustLogger::debug_level_to_log_level_filter( + RUST_CAT.threshold(), + )); + gst::info!(RUST_CAT, "installed log->gst logger"); + } + }); + } + + fn debug_level_to_log_level_filter(level: gst::DebugLevel) -> log::LevelFilter { + match level { + gst::DebugLevel::None => log::LevelFilter::Off, + gst::DebugLevel::Error => log::LevelFilter::Error, + gst::DebugLevel::Warning => log::LevelFilter::Warn, + gst::DebugLevel::Fixme | gst::DebugLevel::Info => log::LevelFilter::Info, + gst::DebugLevel::Debug => log::LevelFilter::Debug, + gst::DebugLevel::Log | gst::DebugLevel::Trace | gst::DebugLevel::Memdump => { + log::LevelFilter::Trace + } + _ => log::LevelFilter::Trace, + } + } + + fn log_level_to_debug_level(level: log::Level) -> gst::DebugLevel { + match level { + log::Level::Error => gst::DebugLevel::Error, + log::Level::Warn => gst::DebugLevel::Warning, + log::Level::Info => gst::DebugLevel::Info, + log::Level::Debug => gst::DebugLevel::Debug, + log::Level::Trace => gst::DebugLevel::Trace, + } + } +} + +impl log::Log for GstRustLogger { + fn enabled(&self, metadata: &log::Metadata) -> bool { + RUST_CAT.above_threshold(GstRustLogger::log_level_to_debug_level(metadata.level())) + } + + fn log(&self, record: &log::Record) { + let gst_level = GstRustLogger::log_level_to_debug_level(record.metadata().level()); + let file = record + .file() + .map(glib::GString::from) + .unwrap_or_else(|| glib::GString::from("rust-log")); + let function = record.target(); + let line = record.line().unwrap_or(0); + RUST_CAT.log( + None::<&glib::Object>, + gst_level, + file.as_gstr(), + function, + line, + *record.args(), + ); + } + + fn flush(&self) {} +} diff --git a/net/rtp/src/rtpbin2/jitterbuffer.rs b/net/rtp/src/rtpbin2/jitterbuffer.rs new file mode 100644 index 00000000..d0a61a42 --- /dev/null +++ b/net/rtp/src/rtpbin2/jitterbuffer.rs @@ -0,0 +1,669 @@ +use crate::utils::ExtendedSeqnum; +use rtp_types::RtpPacket; +use std::cmp::Ordering; +use std::collections::BTreeSet; +use std::time::{Duration, Instant}; + +#[derive(Debug, Clone, Copy)] +struct Stats { + num_late: u64, + num_lost: u64, + num_duplicates: u64, + num_pushed: u64, +} + +impl From for gst::Structure { + fn from(stats: Stats) -> gst::Structure { + gst::Structure::builder("application/x-rtp-jitterbuffer-stats") + .field("num-late", stats.num_late) + .field("num-duplicates", stats.num_duplicates) + .field("num-lost", stats.num_lost) + .field("num-pushed", stats.num_pushed) + .build() + } +} + +#[derive(Debug)] +pub struct JitterBuffer { + packet_counter: usize, + // A set of extended seqnums that we've already seen through, + // intentionally trimmed separately from the items list so that + // we can detect duplicates after the first copy has exited the + // queue + seqnums: BTreeSet, + items: BTreeSet, + latency: Duration, + // Arrival time, PTS + base_times: Option<(Instant, u64)>, + last_output_seqnum: Option, + extended_seqnum: ExtendedSeqnum, + last_input_ts: Option, + stats: Stats, + flushing: bool, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum PollResult { + Forward { id: usize, discont: bool }, + Drop(usize), + Timeout(Instant), + Empty, + Flushing, +} + +#[derive(Debug, PartialEq, Eq)] +pub enum QueueResult { + Queued(usize), + Late, + Duplicate, + Flushing, +} + +#[derive(Eq, Debug)] +struct Item { + id: usize, + // If not set, this is an event / query + pts: Option, + seqnum: u64, +} + +impl Ord for Item { + fn cmp(&self, other: &Self) -> Ordering { + self.seqnum + .cmp(&other.seqnum) + .then(match (self.pts, other.pts) { + (None, Some(_)) => Ordering::Greater, + (Some(_), None) => Ordering::Less, + _ => Ordering::Equal, + }) + } +} + +impl PartialOrd for Item { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl PartialEq for Item { + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } +} + +impl JitterBuffer { + pub fn new(latency: Duration) -> Self { + Self { + packet_counter: 0, + seqnums: BTreeSet::new(), + items: BTreeSet::new(), + latency, + base_times: None, + last_input_ts: None, + last_output_seqnum: None, + extended_seqnum: ExtendedSeqnum::default(), + stats: Stats { + num_late: 0, + num_lost: 0, + num_duplicates: 0, + num_pushed: 0, + }, + flushing: true, + } + } + + pub fn queue_serialized_item(&mut self) -> QueueResult { + let id = self.packet_counter; + self.packet_counter += 1; + let item = Item { + id, + pts: None, + seqnum: (*self.seqnums.last().unwrap_or(&0)), + }; + self.items.insert(item); + trace!("Queued serialized item and assigned ID {id}"); + + QueueResult::Queued(id) + } + + pub fn set_flushing(&mut self, flushing: bool) { + trace!("Flush changed from {} to {flushing}", self.flushing); + self.flushing = flushing; + self.last_output_seqnum = None; + } + + pub fn queue_packet(&mut self, rtp: &RtpPacket, mut pts: u64, now: Instant) -> QueueResult { + if self.flushing { + return QueueResult::Flushing; + } + + // From this point on we always work with extended sequence numbers + let seqnum = self.extended_seqnum.next(rtp.sequence_number()); + + if let Some(ts) = self.last_input_ts { + pts = pts.max(ts); + } + + self.last_input_ts = Some(pts); + + self.base_times.get_or_insert_with(|| { + debug!("Selected base times {:?} {}", now, pts); + + (now, pts) + }); + + // Maintain (and trim) our seqnum list for duplicate detection + while self.seqnums.len() >= std::u16::MAX as usize { + debug!("Trimming"); + self.seqnums.pop_first(); + } + + if self.seqnums.contains(&seqnum) { + trace!( + "Duplicated packet {} (extended {})", + rtp.sequence_number(), + seqnum, + ); + self.stats.num_duplicates += 1; + return QueueResult::Duplicate; + } + + self.seqnums.insert(seqnum); + + if let Some(last_output_seqnum) = self.last_output_seqnum { + if last_output_seqnum >= seqnum { + debug!( + "Late packet {} (extended {})", + rtp.sequence_number(), + seqnum + ); + self.stats.num_late += 1; + return QueueResult::Late; + } + } + + let id = self.packet_counter; + self.packet_counter += 1; + let item = Item { + id, + pts: Some(pts), + seqnum, + }; + + if !self.items.insert(item) { + unreachable!() + } + + trace!("Queued RTP packet with ts {pts}, assigned ID {id}"); + + QueueResult::Queued(id) + } + + pub fn poll(&mut self, now: Instant) -> PollResult { + if self.flushing { + if let Some(item) = self.items.pop_first() { + return PollResult::Drop(item.id); + } else { + return PollResult::Flushing; + } + } + + trace!("Polling at {:?}", now); + + let Some((base_instant, base_ts)) = self.base_times else { + return PollResult::Empty; + }; + + let duration_since_base_instant = now - base_instant; + + trace!( + "Duration since base instant {:?}", + duration_since_base_instant + ); + + let Some(item) = self.items.first() else { + return PollResult::Empty; + }; + + // If an event / query is at the top of the queue, it can be forwarded immediately + let Some(pts) = item.pts else { + let item = self.items.pop_first().unwrap(); + return PollResult::Forward { + id: item.id, + discont: false, + }; + }; + + let ts = pts.checked_sub(base_ts).unwrap(); + let deadline = Duration::from_nanos(ts) + self.latency; + + trace!( + "Considering packet {} with ts {ts}, deadline is {deadline:?}", + item.id + ); + + if deadline <= duration_since_base_instant { + debug!("Packet with id {} is ready", item.id); + + let discont = match self.last_output_seqnum { + None => true, + Some(last_output_seq_ext) => { + let gap = item.seqnum - last_output_seq_ext; + + self.stats.num_lost += gap - 1; + + gap != 1 + } + }; + + self.last_output_seqnum = Some(item.seqnum); + // Safe unwrap, we know the queue isn't empty at this point + let packet = self.items.pop_first().unwrap(); + + self.stats.num_pushed += 1; + + PollResult::Forward { + id: packet.id, + discont, + } + } else { + trace!("Packet with id {} is not ready", item.id); + PollResult::Timeout(base_instant + deadline) + } + } + + pub fn stats(&self) -> gst::Structure { + self.stats.into() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::rtpbin2::session::tests::generate_rtp_packet; + + #[test] + fn empty() { + let mut jb = JitterBuffer::new(Duration::from_secs(1)); + jb.set_flushing(false); + + let now = Instant::now(); + + assert_eq!(jb.poll(now), PollResult::Empty); + } + + #[test] + fn receive_one_packet_no_latency() { + let mut jb = JitterBuffer::new(Duration::from_secs(0)); + jb.set_flushing(false); + + let rtp_data = generate_rtp_packet(0x12345678, 0, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + + let now = Instant::now(); + + let QueueResult::Queued(id) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + assert_eq!(jb.poll(now), PollResult::Forward { id, discont: true }); + } + + #[test] + fn receive_one_packet_with_latency() { + let mut jb = JitterBuffer::new(Duration::from_secs(1)); + jb.set_flushing(false); + + let rtp_data = generate_rtp_packet(0x12345678, 0, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + + let mut now = Instant::now(); + + let QueueResult::Queued(id) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + assert_eq!( + jb.poll(now), + PollResult::Timeout(now + Duration::from_secs(1)) + ); + + now += Duration::from_secs(1); + now -= Duration::from_nanos(1); + + assert_eq!( + jb.poll(now), + PollResult::Timeout(now + Duration::from_nanos(1)) + ); + + now += Duration::from_nanos(1); + + assert_eq!(jb.poll(now), PollResult::Forward { id, discont: true }); + } + + #[test] + fn ordered_packets_no_latency() { + let mut jb = JitterBuffer::new(Duration::from_secs(0)); + jb.set_flushing(false); + + let now = Instant::now(); + + let rtp_data = generate_rtp_packet(0x12345678, 0, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + + let QueueResult::Queued(id_first) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + let rtp_data = generate_rtp_packet(0x12345678, 1, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + let QueueResult::Queued(id_second) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_first, + discont: true + } + ); + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_second, + discont: false + } + ); + } + + #[test] + fn ordered_packets_no_latency_with_gap() { + let mut jb = JitterBuffer::new(Duration::from_secs(0)); + jb.set_flushing(false); + + let now = Instant::now(); + + let rtp_data = generate_rtp_packet(0x12345678, 0, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + let QueueResult::Queued(id_first) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + let rtp_data = generate_rtp_packet(0x12345678, 2, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + let QueueResult::Queued(id_second) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_first, + discont: true + } + ); + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_second, + discont: true + } + ); + } + + #[test] + fn misordered_packets_no_latency() { + let mut jb = JitterBuffer::new(Duration::from_secs(0)); + jb.set_flushing(false); + + let now = Instant::now(); + + let rtp_data = generate_rtp_packet(0x12345678, 1, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + let QueueResult::Queued(id) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + assert_eq!(jb.poll(now), PollResult::Forward { id, discont: true }); + + let rtp_data = generate_rtp_packet(0x12345678, 0, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!(jb.queue_packet(&packet, 0, now), QueueResult::Late); + + // Try and push a duplicate + let rtp_data = generate_rtp_packet(0x12345678, 1, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!(jb.queue_packet(&packet, 0, now), QueueResult::Duplicate); + + // We do accept future sequence numbers up to a distance of at least std::i16::MAX + let rtp_data = generate_rtp_packet(0x12345678, std::i16::MAX as u16 + 1, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + let QueueResult::Queued(id) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + assert_eq!(jb.poll(now), PollResult::Forward { id, discont: true }); + + // But no further + let rtp_data = generate_rtp_packet(0x12345678, 2, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!(jb.queue_packet(&packet, 0, now), QueueResult::Late); + } + + #[test] + fn ordered_packets_with_latency() { + let mut jb = JitterBuffer::new(Duration::from_secs(1)); + jb.set_flushing(false); + + let mut now = Instant::now(); + + let rtp_data = generate_rtp_packet(0x12345678, 0, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + let QueueResult::Queued(id_first) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + assert_eq!( + jb.poll(now), + PollResult::Timeout(now + Duration::from_secs(1)) + ); + + let rtp_data = generate_rtp_packet(0x12345678, 1, 180000, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + let QueueResult::Queued(id_second) = jb.queue_packet(&packet, 2_000_000_000, now) else { + unreachable!() + }; + + assert_eq!( + jb.poll(now), + PollResult::Timeout(now + Duration::from_secs(1)) + ); + + now += Duration::from_secs(1); + + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_first, + discont: true + } + ); + + assert_eq!( + jb.poll(now), + PollResult::Timeout(now + Duration::from_secs(2)) + ); + + now += Duration::from_secs(2); + + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_second, + discont: false + } + ); + } + + fn assert_stats( + jb: &JitterBuffer, + num_late: u64, + num_lost: u64, + num_duplicates: u64, + num_pushed: u64, + ) { + let stats = jb.stats(); + + assert_eq!(stats.get::("num-late").unwrap(), num_late); + assert_eq!(stats.get::("num-lost").unwrap(), num_lost); + assert_eq!(stats.get::("num-duplicates").unwrap(), num_duplicates); + assert_eq!(stats.get::("num-pushed").unwrap(), num_pushed); + } + + #[test] + fn stats() { + let mut jb = JitterBuffer::new(Duration::from_secs(1)); + jb.set_flushing(false); + + let mut now = Instant::now(); + + let rtp_data = generate_rtp_packet(0x12345678, 0, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + jb.queue_packet(&packet, 0, now); + + assert_stats(&jb, 0, 0, 0, 0); + + // At this point pushing the same packet in before it gets output + // results in an increment of the duplicate stat + jb.queue_packet(&packet, 0, now); + assert_stats(&jb, 0, 0, 1, 0); + + now += Duration::from_secs(1); + let _ = jb.poll(now); + + assert_stats(&jb, 0, 0, 1, 1); + + // Pushing it after the first version got output also results in + // an increment of the duplicate stat + jb.queue_packet(&packet, 0, now); + assert_stats(&jb, 0, 0, 2, 1); + + // Then after a packet with seqnum 2 goes through, the lost + // stat must be incremented by 1 (as packet with seqnum 1 went missing) + let rtp_data = generate_rtp_packet(0x12345678, 2, 9000, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + jb.queue_packet(&packet, 100_000_000, now); + + now += Duration::from_millis(100); + let _ = jb.poll(now); + assert_stats(&jb, 0, 1, 2, 2); + + // If the packet with seqnum 1 does arrive after that, it should be + // considered both late and lost + let rtp_data = generate_rtp_packet(0x12345678, 1, 4500, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + jb.queue_packet(&packet, 50_000_000, now); + + let _ = jb.poll(now); + assert_stats(&jb, 1, 1, 2, 2); + + // Finally if it arrives again it should be considered a duplicate, + // and will have achieved the dubious honor of simultaneously being + // lost, late and duplicated + jb.queue_packet(&packet, 50_000_000, now); + + let _ = jb.poll(now); + assert_stats(&jb, 1, 1, 3, 2); + } + + #[test] + fn serialized_items() { + let mut jb = JitterBuffer::new(Duration::from_secs(0)); + jb.set_flushing(false); + + let now = Instant::now(); + + let rtp_data = generate_rtp_packet(0x12345678, 0, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + + let QueueResult::Queued(id_first_serialized_item) = jb.queue_serialized_item() else { + unreachable!() + }; + + let QueueResult::Queued(id_first) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + let QueueResult::Queued(id_second_serialized_item) = jb.queue_serialized_item() else { + unreachable!() + }; + + let rtp_data = generate_rtp_packet(0x12345678, 1, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + let QueueResult::Queued(id_second) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_first_serialized_item, + discont: false + } + ); + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_first, + discont: true + } + ); + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_second_serialized_item, + discont: false + } + ); + assert_eq!( + jb.poll(now), + PollResult::Forward { + id: id_second, + discont: false + } + ); + } + + #[test] + fn flushing_queue() { + let mut jb = JitterBuffer::new(Duration::from_secs(0)); + jb.set_flushing(false); + + let now = Instant::now(); + + let rtp_data = generate_rtp_packet(0x12345678, 0, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + + let QueueResult::Queued(id_first_serialized_item) = jb.queue_serialized_item() else { + unreachable!() + }; + + let QueueResult::Queued(id_first) = jb.queue_packet(&packet, 0, now) else { + unreachable!() + }; + + // Everything after this should eventually return flushing, poll() will instruct to drop + // everything stored and then return flushing indefinitely. + jb.set_flushing(true); + assert_eq!(jb.queue_packet(&packet, 0, now), QueueResult::Flushing); + + assert_eq!(jb.poll(now), PollResult::Drop(id_first_serialized_item)); + assert_eq!(jb.poll(now), PollResult::Drop(id_first)); + assert_eq!(jb.poll(now), PollResult::Flushing); + assert_eq!(jb.poll(now), PollResult::Flushing); + + jb.set_flushing(false); + assert_eq!(jb.poll(now), PollResult::Empty); + } +} diff --git a/net/rtp/src/rtpbin2/mod.rs b/net/rtp/src/rtpbin2/mod.rs new file mode 100644 index 00000000..2dc81ca1 --- /dev/null +++ b/net/rtp/src/rtpbin2/mod.rs @@ -0,0 +1,44 @@ +// SPDX-License-Identifier: MPL-2.0 + +use gst::glib; +use gst::prelude::*; +use once_cell::sync::Lazy; +mod config; +mod internal; +mod jitterbuffer; +mod rtprecv; +mod rtpsend; +mod session; +mod source; +mod sync; +mod time; + +glib::wrapper! { + pub struct RtpSend(ObjectSubclass) @extends gst::Element, gst::Object; +} +glib::wrapper! { + pub struct RtpRecv(ObjectSubclass) @extends gst::Element, gst::Object; +} + +pub fn register(plugin: &gst::Plugin) -> Result<(), glib::BoolError> { + gst::Element::register( + Some(plugin), + "rtpsend", + gst::Rank::NONE, + RtpSend::static_type(), + )?; + gst::Element::register( + Some(plugin), + "rtprecv", + gst::Rank::NONE, + RtpRecv::static_type(), + ) +} + +pub static RUNTIME: Lazy = Lazy::new(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_time() + .worker_threads(1) + .build() + .unwrap() +}); diff --git a/net/rtp/src/rtpbin2/rtprecv.rs b/net/rtp/src/rtpbin2/rtprecv.rs new file mode 100644 index 00000000..5f96018a --- /dev/null +++ b/net/rtp/src/rtpbin2/rtprecv.rs @@ -0,0 +1,1641 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::collections::{BTreeMap, HashMap}; +use std::net::SocketAddr; +use std::pin::Pin; +use std::sync::{Arc, Mutex, MutexGuard}; +use std::task::{Poll, Waker}; +use std::time::{Duration, Instant, SystemTime}; + +use futures::StreamExt; +use gst::{glib, prelude::*, subclass::prelude::*}; +use once_cell::sync::Lazy; + +use super::internal::{pt_clock_rate_from_caps, GstRustLogger, SharedRtpState, SharedSession}; +use super::jitterbuffer::{self, JitterBuffer}; +use super::session::{ + KeyUnitRequestType, RecvReply, RequestRemoteKeyUnitReply, RtcpRecvReply, RtpProfile, + RTCP_MIN_REPORT_INTERVAL, +}; +use super::source::SourceState; +use super::sync; + +use crate::rtpbin2::RUNTIME; + +const DEFAULT_LATENCY: gst::ClockTime = gst::ClockTime::from_mseconds(200); + +static CAT: Lazy = Lazy::new(|| { + gst::DebugCategory::new( + "rtprecv", + gst::DebugColorFlags::empty(), + Some("RTP session receiver"), + ) +}); + +#[derive(Debug, Clone)] +struct Settings { + rtp_id: String, + latency: gst::ClockTime, + timestamping_mode: sync::TimestampingMode, +} + +impl Default for Settings { + fn default() -> Self { + Settings { + rtp_id: String::from("rtp-id"), + latency: DEFAULT_LATENCY, + timestamping_mode: sync::TimestampingMode::default(), + } + } +} + +#[derive(Debug)] +#[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] +struct JitterBufferStream { + store: Arc>, + sleep: Pin>, +} + +impl JitterBufferStream { + fn new(store: Arc>) -> Self { + Self { + store, + sleep: Box::pin(tokio::time::sleep(Duration::from_secs(1))), + } + } +} + +impl futures::stream::Stream for JitterBufferStream { + type Item = JitterBufferItem; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let now = Instant::now(); + let mut lowest_wait = None; + + let mut jitterbuffer_store = self.store.lock().unwrap(); + let ret = jitterbuffer_store.jitterbuffer.poll(now); + gst::trace!(CAT, "jitterbuffer poll ret: {ret:?}"); + match ret { + jitterbuffer::PollResult::Flushing => { + return Poll::Ready(None); + } + jitterbuffer::PollResult::Drop(id) => { + jitterbuffer_store + .store + .remove(&id) + .unwrap_or_else(|| panic!("Buffer with id {id} not in store!")); + cx.waker().wake_by_ref(); + } + jitterbuffer::PollResult::Forward { id, discont } => { + let mut item = jitterbuffer_store + .store + .remove(&id) + .unwrap_or_else(|| panic!("Buffer with id {id} not in store!")); + if let JitterBufferItem::Packet(ref mut packet) = item { + if discont { + gst::debug!(CAT, "Forwarding discont buffer"); + let packet_mut = packet.make_mut(); + packet_mut.set_flags(gst::BufferFlags::DISCONT); + } + } + return Poll::Ready(Some(item)); + } + jitterbuffer::PollResult::Timeout(timeout) => { + if lowest_wait.map_or(true, |lowest_wait| timeout < lowest_wait) { + lowest_wait = Some(timeout); + } + } + // Will be woken up when necessary + jitterbuffer::PollResult::Empty => (), + } + + jitterbuffer_store.waker = Some(cx.waker().clone()); + drop(jitterbuffer_store); + + if let Some(timeout) = lowest_wait { + let this = self.get_mut(); + this.sleep.as_mut().reset(timeout.into()); + if !std::future::Future::poll(this.sleep.as_mut(), cx).is_pending() { + cx.waker().wake_by_ref(); + } + } + + Poll::Pending + } +} + +#[derive(Debug)] +enum JitterBufferItem { + Packet(gst::Buffer), + Event(gst::Event), + Query( + std::ptr::NonNull, + std::sync::mpsc::SyncSender, + ), +} + +// SAFETY: Need to be able to pass *mut gst::QueryRef +unsafe impl Send for JitterBufferItem {} + +#[derive(Debug)] +struct JitterBufferStore { + store: BTreeMap, + waker: Option, + jitterbuffer: JitterBuffer, +} + +#[derive(Debug, Clone)] +struct RtpRecvSrcPad { + pt: u8, + ssrc: u32, + pad: gst::Pad, + jitter_buffer_store: Arc>, +} + +impl PartialEq for RtpRecvSrcPad { + fn eq(&self, other: &Self) -> bool { + self.pt == other.pt && self.ssrc == other.ssrc && self.pad == other.pad + } +} + +impl Eq for RtpRecvSrcPad {} + +impl RtpRecvSrcPad { + fn activate(&mut self, state: MutexGuard, session_id: usize) { + let session = state.session_by_id(session_id).unwrap(); + let seqnum = session.rtp_recv_sink_seqnum.unwrap(); + let stream_id = format!("{}/{}", self.pt, self.ssrc); + let stream_start = gst::event::StreamStart::builder(&stream_id) + .group_id(session.rtp_recv_sink_group_id.unwrap()) + .seqnum(seqnum) + .build(); + + let session_inner = session.internal_session.inner.lock().unwrap(); + let caps = session_inner.caps_from_pt(self.pt); + let caps = gst::event::Caps::builder(&caps).seqnum(seqnum).build(); + drop(session_inner); + + let segment = gst::event::Segment::builder(session.rtp_recv_sink_segment.as_ref().unwrap()) + .seqnum(seqnum) + .build(); + drop(state); + + self.pad.set_active(true).unwrap(); + let _ = self.pad.store_sticky_event(&stream_start); + let _ = self.pad.store_sticky_event(&caps); + let _ = self.pad.store_sticky_event(&segment); + } +} + +#[derive(Debug)] +struct HeldRecvBuffer { + hold_id: Option, + buffer: gst::Buffer, + pad: RtpRecvSrcPad, + new_pad: bool, +} + +#[derive(Debug)] +struct RecvSession { + internal_session: SharedSession, + + // State for received RTP streams + rtp_recv_sinkpad: Option, + rtp_recv_sink_group_id: Option, + rtp_recv_sink_caps: Option, + rtp_recv_sink_segment: Option>, + rtp_recv_sink_seqnum: Option, + + recv_store: Vec, + + rtp_recv_srcpads: Vec, + recv_flow_combiner: Arc>, + + rtcp_recv_sinkpad: Option, +} + +impl RecvSession { + fn new(shared_state: &SharedRtpState, id: usize) -> Self { + let internal_session = shared_state.session_get_or_init(id, || { + SharedSession::new(id, RtpProfile::Avp, RTCP_MIN_REPORT_INTERVAL, false) + }); + Self { + internal_session, + rtp_recv_sinkpad: None, + rtp_recv_sink_group_id: None, + rtp_recv_sink_caps: None, + rtp_recv_sink_segment: None, + rtp_recv_sink_seqnum: None, + + recv_store: vec![], + + rtp_recv_srcpads: vec![], + recv_flow_combiner: Arc::new(Mutex::new(gst_base::UniqueFlowCombiner::new())), + + rtcp_recv_sinkpad: None, + } + } + + fn start_rtp_recv_task(&mut self, pad: &gst::Pad) -> Result<(), glib::BoolError> { + gst::debug!(CAT, obj: pad, "Starting rtp recv src task"); + + let recv_pad = self + .rtp_recv_srcpads + .iter_mut() + .find(|recv| &recv.pad == pad) + .unwrap(); + + let pad_weak = pad.downgrade(); + let recv_flow_combiner = self.recv_flow_combiner.clone(); + let store = recv_pad.jitter_buffer_store.clone(); + + { + let mut store = store.lock().unwrap(); + store.jitterbuffer.set_flushing(false); + store.waker.take(); + } + + pad.start_task(move || { + let Some(pad) = pad_weak.upgrade() else { + return; + }; + + let recv_flow_combiner = recv_flow_combiner.clone(); + let store = store.clone(); + + RUNTIME.block_on(async move { + let mut stream = JitterBufferStream::new(store); + while let Some(item) = stream.next().await { + match item { + JitterBufferItem::Packet(buffer) => { + let flow = pad.push(buffer); + gst::trace!(CAT, obj: pad, "Pushed buffer, flow ret {:?}", flow); + let mut recv_flow_combiner = recv_flow_combiner.lock().unwrap(); + let _combined_flow = recv_flow_combiner.update_pad_flow(&pad, flow); + // TODO: store flow, return only on session pads? + } + JitterBufferItem::Event(event) => { + let res = pad.push_event(event); + gst::trace!(CAT, obj: pad, "Pushed serialized event, result: {}", res); + } + JitterBufferItem::Query(mut query, tx) => { + // This is safe because the thread holding the original reference is waiting + // for us exclusively + let res = pad.peer_query(unsafe { query.as_mut() }); + let _ = tx.send(res); + } + } + } + }) + })?; + + gst::debug!(CAT, obj: pad, "Task started"); + + Ok(()) + } + + fn stop_rtp_recv_task(&mut self, pad: &gst::Pad) -> Result<(), glib::BoolError> { + gst::debug!(CAT, obj: pad, "Stopping rtp recv src task"); + let recv_pad = self + .rtp_recv_srcpads + .iter_mut() + .find(|recv| &recv.pad == pad) + .unwrap(); + + let mut store = recv_pad.jitter_buffer_store.lock().unwrap(); + store.jitterbuffer.set_flushing(true); + if let Some(waker) = store.waker.take() { + waker.wake(); + } + + Ok(()) + } + + fn get_or_create_rtp_recv_src( + &mut self, + rtpbin: &RtpRecv, + pt: u8, + ssrc: u32, + ) -> (RtpRecvSrcPad, bool) { + if let Some(pad) = self + .rtp_recv_srcpads + .iter() + .find(|&r| r.ssrc == ssrc && r.pt == pt) + { + (pad.clone(), false) + } else { + let src_templ = rtpbin.obj().pad_template("rtp_src_%u_%u_%u").unwrap(); + let id = self.internal_session.id; + let srcpad = gst::Pad::builder_from_template(&src_templ) + .iterate_internal_links_function(|pad, parent| { + RtpRecv::catch_panic_pad_function( + parent, + || gst::Iterator::from_vec(vec![]), + |this| this.iterate_internal_links(pad), + ) + }) + .query_function(|pad, parent, query| { + RtpRecv::catch_panic_pad_function( + parent, + || false, + |this| this.src_query(pad, query), + ) + }) + .event_function(move |pad, parent, event| { + RtpRecv::catch_panic_pad_function( + parent, + || false, + |this| this.rtp_recv_src_event(pad, event, id, pt, ssrc), + ) + }) + .activatemode_function({ + let this = rtpbin.downgrade(); + move |pad, _parent, mode, active| { + let Some(this) = this.upgrade() else { + return Err(gst::LoggableError::new( + *CAT, + glib::bool_error!("rtprecv does not exist anymore"), + )); + }; + this.rtp_recv_src_activatemode(pad, mode, active, id) + } + }) + .name(format!("rtp_src_{}_{}_{}", id, pt, ssrc)) + .build(); + + srcpad.use_fixed_caps(); + + let settings = rtpbin.settings.lock().unwrap(); + + let recv_pad = RtpRecvSrcPad { + pt, + ssrc, + pad: srcpad.clone(), + jitter_buffer_store: Arc::new(Mutex::new(JitterBufferStore { + waker: None, + store: BTreeMap::new(), + jitterbuffer: JitterBuffer::new(settings.latency.into()), + })), + }; + + self.recv_flow_combiner + .lock() + .unwrap() + .add_pad(&recv_pad.pad); + self.rtp_recv_srcpads.push(recv_pad.clone()); + (recv_pad, true) + } + } +} + +#[derive(Debug, Default)] +struct State { + shared_state: Option, + sessions: Vec, + max_session_id: usize, + pads_session_id_map: HashMap, + sync_context: Option, +} + +impl State { + fn session_by_id(&self, id: usize) -> Option<&RecvSession> { + self.sessions + .iter() + .find(|session| session.internal_session.id == id) + } + + fn mut_session_by_id(&mut self, id: usize) -> Option<&mut RecvSession> { + self.sessions + .iter_mut() + .find(|session| session.internal_session.id == id) + } + + fn stats(&self) -> gst::Structure { + let mut ret = gst::Structure::builder("application/x-rtp2-stats"); + for session in self.sessions.iter() { + let sess_id = session.internal_session.id; + let session_inner = session.internal_session.inner.lock().unwrap(); + + let mut session_stats = session_inner.stats(); + let jb_stats = gst::List::new(session.rtp_recv_srcpads.iter().map(|pad| { + let mut jb_stats = pad.jitter_buffer_store.lock().unwrap().jitterbuffer.stats(); + jb_stats.set_value("ssrc", (pad.ssrc as i32).to_send_value()); + jb_stats.set_value("pt", (pad.pt as i32).to_send_value()); + jb_stats + })); + + session_stats.set("jitterbuffer-stats", jb_stats); + ret = ret.field(sess_id.to_string(), session_stats); + } + ret.build() + } +} + +pub struct RtpRecv { + settings: Mutex, + state: Arc>, +} + +impl RtpRecv { + fn rtp_recv_src_activatemode( + &self, + pad: &gst::Pad, + mode: gst::PadMode, + active: bool, + id: usize, + ) -> Result<(), gst::LoggableError> { + if let gst::PadMode::Push = mode { + let mut state = self.state.lock().unwrap(); + let Some(session) = state.mut_session_by_id(id) else { + if active { + return Err(gst::LoggableError::new( + *CAT, + glib::bool_error!("Can't activate pad of unknown session {id}"), + )); + } else { + return Ok(()); + } + }; + + if active { + session.start_rtp_recv_task(pad)?; + } else { + session.stop_rtp_recv_task(pad)?; + + gst::debug!(CAT, obj: pad, "Stopping task"); + + let _ = pad.stop_task(); + } + + Ok(()) + } else { + Err(gst::LoggableError::new( + *CAT, + glib::bool_error!("Unsupported pad mode {mode:?}"), + )) + } + } + + pub fn src_query(&self, pad: &gst::Pad, query: &mut gst::QueryRef) -> bool { + gst::log!(CAT, obj: pad, "Handling query {query:?}"); + + use gst::QueryViewMut::*; + match query.view_mut() { + Latency(q) => { + let mut peer_query = gst::query::Latency::new(); + + let ret = gst::Pad::query_default(pad, Some(&*self.obj()), &mut peer_query); + let our_latency = self.settings.lock().unwrap().latency; + + let min = if ret { + let (_, min, _) = peer_query.result(); + + our_latency + min + } else { + our_latency + }; + + gst::info!(CAT, obj: pad, "Handled latency query, our latency {our_latency}, minimum latency: {min}"); + q.set(true, min, gst::ClockTime::NONE); + + ret + } + _ => gst::Pad::query_default(pad, Some(pad), query), + } + } + + fn iterate_internal_links(&self, pad: &gst::Pad) -> gst::Iterator { + let state = self.state.lock().unwrap(); + if let Some(&id) = state.pads_session_id_map.get(pad) { + if let Some(session) = state.session_by_id(id) { + if let Some(ref sinkpad) = session.rtp_recv_sinkpad { + if sinkpad == pad { + let pads = session + .rtp_recv_srcpads + .iter() + // Only include pads that are already part of the element + .filter(|r| state.pads_session_id_map.contains_key(&r.pad)) + .map(|r| r.pad.clone()) + .collect(); + return gst::Iterator::from_vec(pads); + } else if session.rtp_recv_srcpads.iter().any(|r| &r.pad == pad) { + return gst::Iterator::from_vec(vec![sinkpad.clone()]); + } + } + // nothing to do for rtcp pads + } + } + gst::Iterator::from_vec(vec![]) + } + + fn rtp_recv_sink_chain( + &self, + pad: &gst::Pad, + id: usize, + mut buffer: gst::Buffer, + ) -> Result { + let mut state = self.state.lock().unwrap(); + let Some(session) = state.mut_session_by_id(id) else { + return Err(gst::FlowError::Error); + }; + + // TODO: this is different from the old C implementation, where we + // simply used the RTP timestamps as they were instead of doing any + // sort of skew calculations. + // + // Check if this makes sense or if this leads to issue with eg interleaved + // TCP. + let arrival_time = match buffer.dts() { + Some(dts) => { + let segment = session.rtp_recv_sink_segment.as_ref().unwrap(); + // TODO: use running_time_full if we care to support that + match segment.to_running_time(dts) { + Some(time) => time, + None => { + gst::error!(CAT, obj: pad, "out of segment DTS are not supported"); + return Err(gst::FlowError::Error); + } + } + } + None => match self.obj().current_running_time() { + Some(time) => time, + None => { + gst::error!(CAT, obj: pad, "Failed to get current time"); + return Err(gst::FlowError::Error); + } + }, + }; + + gst::trace!(CAT, obj: pad, "using arrival time {}", arrival_time); + + let addr: Option = + buffer + .meta::() + .and_then(|net_meta| { + net_meta + .addr() + .dynamic_cast::() + .map(|a| a.into()) + .ok() + }); + let mapped = buffer.map_readable().map_err(|e| { + gst::error!(CAT, imp: self, "Failed to map input buffer {e:?}"); + gst::FlowError::Error + })?; + let rtp = match rtp_types::RtpPacket::parse(&mapped) { + Ok(rtp) => rtp, + Err(e) => { + // If this is a valid RTCP packet then it was muxed with the RTP stream and can be + // handled just fine. + if rtcp_types::Compound::parse(&mapped).map_or(false, |mut rtcp| { + rtcp.next().map_or(false, |rtcp| rtcp.is_ok()) + }) { + drop(mapped); + return Self::rtcp_recv_sink_chain(self, id, buffer); + } + + gst::error!(CAT, imp: self, "Failed to parse input as valid rtp packet: {e:?}"); + return Ok(gst::FlowSuccess::Ok); + } + }; + + let internal_session = session.internal_session.clone(); + + let mut session_inner = internal_session.inner.lock().unwrap(); + + if state + .sync_context + .as_ref() + .unwrap() + .clock_rate(rtp.ssrc()) + .is_none() + { + let clock_rate = session_inner + .session + .clock_rate_from_pt(rtp.payload_type()) + .unwrap(); + state + .sync_context + .as_mut() + .unwrap() + .set_clock_rate(rtp.ssrc(), clock_rate); + } + + // TODO: Put NTP time as `gst::ReferenceTimeStampMeta` on the buffers if selected via property + let (pts, _ntp_time) = state.sync_context.as_mut().unwrap().calculate_pts( + rtp.ssrc(), + rtp.timestamp(), + arrival_time.nseconds(), + ); + let session = state.mut_session_by_id(id).unwrap(); + let segment = session.rtp_recv_sink_segment.as_ref().unwrap(); + let pts = segment + .position_from_running_time(gst::ClockTime::from_nseconds(pts)) + .unwrap(); + gst::debug!(CAT, "Calculated PTS: {}", pts); + + let now = Instant::now(); + let mut buffers_to_push = vec![]; + loop { + match session_inner.session.handle_recv(&rtp, addr, now) { + RecvReply::SsrcCollision(_ssrc) => (), // TODO: handle ssrc collision + RecvReply::NewSsrc(ssrc, _pt) => { + drop(session_inner); + internal_session + .config + .emit_by_name::<()>("new-ssrc", &[&ssrc]); + session_inner = internal_session.inner.lock().unwrap(); + } + RecvReply::Hold(hold_id) => { + let pt = rtp.payload_type(); + let ssrc = rtp.ssrc(); + drop(mapped); + { + let buf_mut = buffer.make_mut(); + buf_mut.set_pts(pts); + } + let (pad, new_pad) = session.get_or_create_rtp_recv_src(self, pt, ssrc); + session.recv_store.push(HeldRecvBuffer { + hold_id: Some(hold_id), + buffer, + pad, + new_pad, + }); + break; + } + RecvReply::Drop(hold_id) => { + if let Some(pos) = session + .recv_store + .iter() + .position(|b| b.hold_id.unwrap() == hold_id) + { + session.recv_store.remove(pos); + } + } + RecvReply::Forward(hold_id) => { + if let Some(pos) = session + .recv_store + .iter() + .position(|b| b.hold_id.unwrap() == hold_id) + { + buffers_to_push.push(session.recv_store.remove(pos)); + } else { + unreachable!(); + } + } + RecvReply::Ignore => break, + RecvReply::Passthrough => { + let pt = rtp.payload_type(); + let ssrc = rtp.ssrc(); + drop(mapped); + { + let buf_mut = buffer.make_mut(); + buf_mut.set_pts(pts); + } + let (pad, new_pad) = session.get_or_create_rtp_recv_src(self, pt, ssrc); + buffers_to_push.push(HeldRecvBuffer { + hold_id: None, + buffer, + pad, + new_pad, + }); + break; + } + } + } + + drop(session_inner); + + for mut held in buffers_to_push { + // TODO: handle other processing + if held.new_pad { + state.pads_session_id_map.insert(held.pad.pad.clone(), id); + // drops the state lock + held.pad.activate(state, id); + self.obj().add_pad(&held.pad.pad).unwrap(); + state = self.state.lock().unwrap(); + } + + let mapped = held.buffer.map_readable().map_err(|e| { + gst::error!(CAT, imp: self, "Failed to map input buffer {e:?}"); + gst::FlowError::Error + })?; + let rtp = match rtp_types::RtpPacket::parse(&mapped) { + Ok(rtp) => rtp, + Err(e) => { + gst::error!(CAT, imp: self, "Failed to parse input as valid rtp packet: {e:?}"); + return Ok(gst::FlowSuccess::Ok); + } + }; + + // FIXME: Should block if too many packets are stored here because the source pad task + // is blocked + let mut jitterbuffer_store = held.pad.jitter_buffer_store.lock().unwrap(); + + match jitterbuffer_store.jitterbuffer.queue_packet( + &rtp, + held.buffer.pts().unwrap().nseconds(), + now, + ) { + jitterbuffer::QueueResult::Flushing => { + // TODO: return flushing result upstream + } + jitterbuffer::QueueResult::Queued(id) => { + drop(mapped); + + jitterbuffer_store + .store + .insert(id, JitterBufferItem::Packet(held.buffer)); + if let Some(waker) = jitterbuffer_store.waker.take() { + waker.wake() + } + } + jitterbuffer::QueueResult::Late => { + gst::warning!(CAT, "Late buffer was dropped"); + } + jitterbuffer::QueueResult::Duplicate => { + gst::warning!(CAT, "Duplicate buffer was dropped"); + } + } + } + + Ok(gst::FlowSuccess::Ok) + } + + fn rtcp_recv_sink_chain( + &self, + id: usize, + buffer: gst::Buffer, + ) -> Result { + let state = self.state.lock().unwrap(); + let Some(session) = state.session_by_id(id) else { + return Err(gst::FlowError::Error); + }; + + let addr: Option = + buffer + .meta::() + .and_then(|net_meta| { + net_meta + .addr() + .dynamic_cast::() + .map(|a| a.into()) + .ok() + }); + let mapped = buffer.map_readable().map_err(|e| { + gst::error!(CAT, imp: self, "Failed to map input buffer {e:?}"); + gst::FlowError::Error + })?; + let rtcp = match rtcp_types::Compound::parse(&mapped) { + Ok(rtcp) => rtcp, + Err(e) => { + gst::error!(CAT, imp: self, "Failed to parse input as valid rtcp packet: {e:?}"); + return Ok(gst::FlowSuccess::Ok); + } + }; + + let internal_session = session.internal_session.clone(); + let mut session_inner = internal_session.inner.lock().unwrap(); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let replies = + session_inner + .session + .handle_rtcp_recv(rtcp, mapped.len(), addr, now, ntp_now); + let rtp_send_sinkpad = session_inner.rtp_send_sinkpad.clone(); + drop(session_inner); + drop(state); + + for reply in replies { + match reply { + RtcpRecvReply::NewSsrc(ssrc) => { + internal_session + .config + .emit_by_name::<()>("new-ssrc", &[&ssrc]); + } + RtcpRecvReply::SsrcCollision(_ssrc) => (), // TODO: handle ssrc collision + RtcpRecvReply::TimerReconsideration => { + let state = self.state.lock().unwrap(); + let session = state.session_by_id(id).unwrap(); + let mut session_inner = session.internal_session.inner.lock().unwrap(); + if let Some(waker) = session_inner.rtcp_waker.take() { + // reconsider timers means that we wake the rtcp task to get a new timeout + waker.wake(); + } + } + RtcpRecvReply::RequestKeyUnit { ssrcs, fir } => { + if let Some(ref rtp_send_sinkpad) = rtp_send_sinkpad { + gst::debug!(CAT, imp: self, "Sending force-keyunit event for ssrcs {ssrcs:?} (all headers: {fir})"); + // TODO what to do with the ssrc? + let event = gst_video::UpstreamForceKeyUnitEvent::builder() + .all_headers(fir) + .other_field("ssrcs", &gst::Array::new(ssrcs)) + .build(); + + let _ = rtp_send_sinkpad.push_event(event); + } else { + gst::debug!(CAT, imp: self, "Can't send force-keyunit event because of missing sinkpad"); + } + } + RtcpRecvReply::NewCName((cname, ssrc)) => { + let mut state = self.state.lock().unwrap(); + + state.sync_context.as_mut().unwrap().associate(ssrc, &cname); + } + RtcpRecvReply::NewRtpNtp((ssrc, rtp, ntp)) => { + let mut state = self.state.lock().unwrap(); + + state + .sync_context + .as_mut() + .unwrap() + .add_sender_report(ssrc, rtp, ntp); + } + RtcpRecvReply::SsrcBye(ssrc) => internal_session + .config + .emit_by_name::<()>("bye-ssrc", &[&ssrc]), + } + } + drop(mapped); + + Ok(gst::FlowSuccess::Ok) + } + + pub fn rtp_recv_sink_query( + &self, + pad: &gst::Pad, + query: &mut gst::QueryRef, + id: usize, + ) -> bool { + gst::log!(CAT, obj: pad, "Handling query {query:?}"); + + if query.is_serialized() { + let state = self.state.lock().unwrap(); + let mut ret = true; + + if let Some(session) = state.session_by_id(id) { + let jb_stores: Vec>> = session + .rtp_recv_srcpads + .iter() + .filter(|r| state.pads_session_id_map.contains_key(&r.pad)) + .map(|p| p.jitter_buffer_store.clone()) + .collect(); + + drop(state); + + let query = std::ptr::NonNull::from(query); + + // The idea here is to reproduce the default behavior of GstPad, where + // queries will run sequentially on each internally linked source pad + // until one succeeds. + // + // We however jump through hoops here in order to keep the query + // reasonably synchronized with the data flow. + // + // While the GstPad behavior makes complete sense for allocation + // queries (can't have it succeed for two downstream branches as they + // need to modify the query), we could in the future decide to have + // the drain query run on all relevant source pads no matter what. + // + // Also note that if there were no internally linked pads, GstPad's + // behavior is to return TRUE, we do this here too. + for jb_store in jb_stores { + let mut jitterbuffer_store = jb_store.lock().unwrap(); + + let jitterbuffer::QueueResult::Queued(id) = + jitterbuffer_store.jitterbuffer.queue_serialized_item() + else { + unreachable!() + }; + + let (query_tx, query_rx) = std::sync::mpsc::sync_channel(1); + + jitterbuffer_store + .store + .insert(id, JitterBufferItem::Query(query, query_tx)); + + drop(jitterbuffer_store); + + // Now block until the jitterbuffer has processed the query + match query_rx.recv() { + Ok(res) => { + ret |= res; + if ret { + break; + } + } + _ => { + // The sender was closed because of a state change + break; + } + } + } + } + + ret + } else { + gst::Pad::query_default(pad, Some(pad), query) + } + } + + // Serialized events received on our sink pads have to navigate + // through the relevant jitterbuffers in order to remain (reasonably) + // consistently ordered with the RTP packets once output on our source + // pads + fn rtp_recv_sink_queue_serialized_event(&self, id: usize, event: gst::Event) -> bool { + let state = self.state.lock().unwrap(); + if let Some(session) = state.session_by_id(id) { + for srcpad in session + .rtp_recv_srcpads + .iter() + .filter(|r| state.pads_session_id_map.contains_key(&r.pad)) + { + let mut jitterbuffer_store = srcpad.jitter_buffer_store.lock().unwrap(); + + let jitterbuffer::QueueResult::Queued(id) = + jitterbuffer_store.jitterbuffer.queue_serialized_item() + else { + unreachable!() + }; + + jitterbuffer_store + .store + .insert(id, JitterBufferItem::Event(event.clone())); + if let Some(waker) = jitterbuffer_store.waker.take() { + waker.wake(); + } + } + } + + true + } + + fn rtp_recv_sink_event(&self, pad: &gst::Pad, mut event: gst::Event, id: usize) -> bool { + match event.view() { + gst::EventView::StreamStart(stream_start) => { + let mut state = self.state.lock().unwrap(); + + if let Some(session) = state.mut_session_by_id(id) { + let group_id = stream_start.group_id(); + session.rtp_recv_sink_group_id = + Some(group_id.unwrap_or_else(gst::GroupId::next)); + } + + true + } + gst::EventView::Caps(caps) => { + let mut state = self.state.lock().unwrap(); + + if let Some((pt, clock_rate)) = pt_clock_rate_from_caps(caps.caps()) { + if let Some(session) = state.mut_session_by_id(id) { + let caps = caps.caps_owned(); + session.rtp_recv_sink_caps = Some(caps.clone()); + + let mut session_inner = session.internal_session.inner.lock().unwrap(); + session_inner.session.set_pt_clock_rate(pt, clock_rate); + session_inner.add_caps(caps); + } + } else { + gst::warning!(CAT, obj: pad, "input caps are missing payload or clock-rate fields"); + } + true + } + gst::EventView::Segment(segment) => { + let mut state = self.state.lock().unwrap(); + + if let Some(session) = state.mut_session_by_id(id) { + let segment = segment.segment(); + let segment = match segment.downcast_ref::() { + Some(segment) => segment.clone(), + None => { + gst::warning!(CAT, obj: pad, "Only TIME segments are supported"); + + let segment = gst::FormattedSegment::new(); + let seqnum = event.seqnum(); + + event = gst::event::Segment::builder(&segment) + .seqnum(seqnum) + .build(); + + segment + } + }; + + session.rtp_recv_sink_segment = Some(segment); + session.rtp_recv_sink_seqnum = Some(event.seqnum()); + } + + drop(state); + + self.rtp_recv_sink_queue_serialized_event(id, event) + } + gst::EventView::Eos(_eos) => { + let now = Instant::now(); + let state = self.state.lock().unwrap(); + if let Some(session) = state.session_by_id(id) { + let mut session = session.internal_session.inner.lock().unwrap(); + let ssrcs = session.session.ssrcs().collect::>(); + // we can only Bye the entire session if we do not have any local send sources + // currently sending data + let mut all_remote = true; + let internal_ssrc = session.session.internal_ssrc(); + for ssrc in ssrcs { + let Some(_local_recv) = session.session.local_receive_source_by_ssrc(ssrc) + else { + if let Some(local_send) = + session.session.local_send_source_by_ssrc(ssrc) + { + if local_send.state() != SourceState::Bye + && Some(ssrc) != internal_ssrc + { + all_remote = false; + break; + } + } + continue; + }; + } + if all_remote { + session.session.schedule_bye("End of stream", now); + } + drop(session); + } + drop(state); + // FIXME: may need to delay sending eos under some circumstances + self.rtp_recv_sink_queue_serialized_event(id, event); + true + } + gst::EventView::FlushStart(_fs) => { + let state = self.state.lock().unwrap(); + let mut pause_tasks = vec![]; + if let Some(session) = state.session_by_id(id) { + for recv_pad in session.rtp_recv_srcpads.iter() { + let mut store = recv_pad.jitter_buffer_store.lock().unwrap(); + store.jitterbuffer.set_flushing(true); + if let Some(waker) = store.waker.take() { + waker.wake(); + } + pause_tasks.push(recv_pad.pad.clone()); + } + } + drop(state); + for pad in pause_tasks { + let _ = pad.pause_task(); + } + gst::Pad::event_default(pad, Some(&*self.obj()), event) + } + gst::EventView::FlushStop(_fs) => { + let mut state = self.state.lock().unwrap(); + if let Some(session) = state.mut_session_by_id(id) { + let pads = session + .rtp_recv_srcpads + .iter() + .map(|r| r.pad.clone()) + .collect::>(); + for pad in pads { + // Will reset flushing to false and ensure task is woken up + let _ = session.start_rtp_recv_task(&pad); + } + } + drop(state); + self.rtp_recv_sink_queue_serialized_event(id, event) + } + _ => { + if event.is_serialized() { + self.rtp_recv_sink_queue_serialized_event(id, event) + } else { + gst::Pad::event_default(pad, Some(&*self.obj()), event) + } + } + } + } + + fn rtp_recv_src_event( + &self, + pad: &gst::Pad, + event: gst::Event, + id: usize, + pt: u8, + ssrc: u32, + ) -> bool { + match event.view() { + gst::EventView::CustomUpstream(custom) => { + if let Ok(fku) = gst_video::UpstreamForceKeyUnitEvent::parse(custom) { + let all_headers = fku.all_headers; + let count = fku.count; + + let state = self.state.lock().unwrap(); + if let Some(session) = state.session_by_id(id) { + let now = Instant::now(); + let mut session = session.internal_session.inner.lock().unwrap(); + let caps = session.caps_from_pt(pt); + let s = caps.structure(0).unwrap(); + + let pli = s.has_field("rtcp-fb-nack-pli"); + let fir = s.has_field("rtcp-fb-ccm-fir") && all_headers; + + let typ = if fir { + KeyUnitRequestType::Fir(count) + } else { + KeyUnitRequestType::Pli + }; + + if pli || fir { + let replies = session.session.request_remote_key_unit(now, typ, ssrc); + + for reply in replies { + match reply { + RequestRemoteKeyUnitReply::TimerReconsideration => { + if let Some(waker) = session.rtcp_waker.take() { + // reconsider timers means that we wake the rtcp task to get a new timeout + waker.wake(); + } + } + } + } + } + } + + // Don't forward + return true; + } + gst::Pad::event_default(pad, Some(&*self.obj()), event) + } + _ => gst::Pad::event_default(pad, Some(&*self.obj()), event), + } + } +} + +#[glib::object_subclass] +impl ObjectSubclass for RtpRecv { + const NAME: &'static str = "GstRtpRecv"; + type Type = super::RtpRecv; + type ParentType = gst::Element; + + fn new() -> Self { + GstRustLogger::install(); + Self { + settings: Default::default(), + state: Default::default(), + } + } +} + +impl ObjectImpl for RtpRecv { + fn properties() -> &'static [glib::ParamSpec] { + static PROPERTIES: Lazy> = Lazy::new(|| { + vec![ + glib::ParamSpecString::builder("rtp-id") + .nick("The RTP Connection ID") + .blurb("A connection ID shared with a rtpsend element for implementing both sending and receiving using the same RTP context") + .default_value("rtp-id") + .build(), + glib::ParamSpecUInt::builder("latency") + .nick("Buffer latency in ms") + .blurb("Amount of ms to buffer") + .default_value(DEFAULT_LATENCY.mseconds() as u32) + .mutable_ready() + .build(), + glib::ParamSpecUInt::builder("stats") + .nick("Statistics") + .blurb("Statistics about the session") + .read_only() + .build(), + glib::ParamSpecEnum::builder::("timestamping-mode") + .nick("Timestamping Mode") + .blurb("Govern how to pick presentation timestamps for packets") + .default_value(sync::TimestampingMode::default()) + .mutable_ready() + .build(), + ] + }); + + PROPERTIES.as_ref() + } + + fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) { + match pspec.name() { + "rtp-id" => { + let mut settings = self.settings.lock().unwrap(); + settings.rtp_id = value.get::().expect("type checked upstream"); + } + "latency" => { + let _latency = { + let mut settings = self.settings.lock().unwrap(); + settings.latency = gst::ClockTime::from_mseconds( + value.get::().expect("type checked upstream").into(), + ); + settings.latency + }; + + let _ = self + .obj() + .post_message(gst::message::Latency::builder().src(&*self.obj()).build()); + } + "timestamping-mode" => { + let mut settings = self.settings.lock().unwrap(); + settings.timestamping_mode = value + .get::() + .expect("Type checked upstream"); + } + _ => unimplemented!(), + } + } + + fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value { + match pspec.name() { + "rtp-id" => { + let settings = self.settings.lock().unwrap(); + settings.rtp_id.to_value() + } + "latency" => { + let settings = self.settings.lock().unwrap(); + (settings.latency.mseconds() as u32).to_value() + } + "stats" => { + let state = self.state.lock().unwrap(); + state.stats().to_value() + } + "timestamping-mode" => { + let settings = self.settings.lock().unwrap(); + settings.timestamping_mode.to_value() + } + _ => unimplemented!(), + } + } + + fn signals() -> &'static [glib::subclass::Signal] { + static SIGNALS: Lazy> = Lazy::new(|| { + vec![glib::subclass::Signal::builder("get-session") + .param_types([u32::static_type()]) + .return_type::() + .action() + .class_handler(|_token, args| { + let element = args[0].get::().expect("signal arg"); + let id = args[1].get::().expect("signal arg"); + let bin = element.imp(); + let state = bin.state.lock().unwrap(); + state + .session_by_id(id as usize) + .map(|sess| sess.internal_session.config.to_value()) + }) + .build()] + }); + + SIGNALS.as_ref() + } +} + +impl GstObjectImpl for RtpRecv {} + +impl ElementImpl for RtpRecv { + fn metadata() -> Option<&'static gst::subclass::ElementMetadata> { + static ELEMENT_METADATA: Lazy = Lazy::new(|| { + gst::subclass::ElementMetadata::new( + "RTP Session receiver", + "Network/RTP/Filter", + "RTP sessions management (receiver)", + "Matthew Waters ", + ) + }); + + Some(&*ELEMENT_METADATA) + } + + fn pad_templates() -> &'static [gst::PadTemplate] { + static PAD_TEMPLATES: Lazy> = Lazy::new(|| { + let rtp_caps = gst::Caps::builder_full() + .structure(gst::Structure::builder("application/x-rtp").build()) + .build(); + let rtcp_caps = gst::Caps::builder_full() + .structure(gst::Structure::builder("application/x-rtcp").build()) + .build(); + + vec![ + gst::PadTemplate::new( + "rtp_sink_%u", + gst::PadDirection::Sink, + gst::PadPresence::Request, + &rtp_caps, + ) + .unwrap(), + gst::PadTemplate::new( + "rtcp_sink_%u", + gst::PadDirection::Sink, + gst::PadPresence::Request, + &rtcp_caps, + ) + .unwrap(), + gst::PadTemplate::new( + "rtp_src_%u_%u_%u", + gst::PadDirection::Src, + gst::PadPresence::Sometimes, + &rtp_caps, + ) + .unwrap(), + ] + }); + + PAD_TEMPLATES.as_ref() + } + + fn request_new_pad( + &self, + templ: &gst::PadTemplate, + name: Option<&str>, + _caps: Option<&gst::Caps>, // XXX: do something with caps? + ) -> Option { + let settings = self.settings.lock().unwrap().clone(); + let rtp_id = settings.rtp_id.clone(); + let mut state = self.state.lock().unwrap(); + let max_session_id = state.max_session_id; + + // parse the possibly provided name into a session id or use the default + let sess_parse = move |name: Option<&str>, prefix, default_id| -> Option { + if let Some(name) = name { + name.strip_prefix(prefix).and_then(|suffix| { + if suffix.starts_with("%u") { + Some(default_id) + } else { + suffix.parse::().ok() + } + }) + } else { + Some(default_id) + } + }; + + match templ.name_template() { + "rtp_sink_%u" => sess_parse(name, "rtp_sink_", max_session_id).and_then(|id| { + let new_pad = move |session: &mut RecvSession| -> Option<( + gst::Pad, + Option, + usize, + Vec, + )> { + let sinkpad = gst::Pad::builder_from_template(templ) + .chain_function(move |pad, parent, buffer| { + RtpRecv::catch_panic_pad_function( + parent, + || Err(gst::FlowError::Error), + |this| this.rtp_recv_sink_chain(pad, id, buffer), + ) + }) + .iterate_internal_links_function(|pad, parent| { + RtpRecv::catch_panic_pad_function( + parent, + || gst::Iterator::from_vec(vec![]), + |this| this.iterate_internal_links(pad), + ) + }) + .event_function(move |pad, parent, event| { + RtpRecv::catch_panic_pad_function( + parent, + || false, + |this| this.rtp_recv_sink_event(pad, event, id), + ) + }) + .query_function(move |pad, parent, query| { + RtpRecv::catch_panic_pad_function( + parent, + || false, + |this| this.rtp_recv_sink_query(pad, query, id), + ) + }) + .name(format!("rtp_sink_{}", id)) + .build(); + session.rtp_recv_sinkpad = Some(sinkpad.clone()); + Some((sinkpad, None, id, vec![])) + }; + + let session = state.mut_session_by_id(id); + if let Some(session) = session { + if session.rtp_recv_sinkpad.is_some() { + None + } else { + new_pad(session) + } + } else { + let shared_state = state + .shared_state + .get_or_insert_with(|| SharedRtpState::recv_get_or_init(rtp_id)); + let mut session = RecvSession::new(shared_state, id); + let ret = new_pad(&mut session); + state.sessions.push(session); + ret + } + }), + "rtcp_sink_%u" => sess_parse(name, "rtcp_sink_", max_session_id).and_then(|id| { + let new_pad = move |session: &mut RecvSession| -> Option<( + gst::Pad, + Option, + usize, + Vec, + )> { + let sinkpad = gst::Pad::builder_from_template(templ) + .chain_function(move |_pad, parent, buffer| { + RtpRecv::catch_panic_pad_function( + parent, + || Err(gst::FlowError::Error), + |this| this.rtcp_recv_sink_chain(id, buffer), + ) + }) + .iterate_internal_links_function(|pad, parent| { + RtpRecv::catch_panic_pad_function( + parent, + || gst::Iterator::from_vec(vec![]), + |this| this.iterate_internal_links(pad), + ) + }) + .name(format!("rtcp_sink_{}", id)) + .build(); + session.rtcp_recv_sinkpad = Some(sinkpad.clone()); + Some((sinkpad, None, id, vec![])) + }; + + let session = state.mut_session_by_id(id); + if let Some(session) = session { + if session.rtcp_recv_sinkpad.is_some() { + None + } else { + new_pad(session) + } + } else { + let shared_state = state + .shared_state + .get_or_insert_with(|| SharedRtpState::recv_get_or_init(rtp_id)); + let mut session = RecvSession::new(shared_state, id); + let ret = new_pad(&mut session); + state.sessions.push(session); + ret + } + }), + _ => None, + } + .map(|(pad, otherpad, id, sticky_events)| { + state.max_session_id = (id + 1).max(state.max_session_id); + state.pads_session_id_map.insert(pad.clone(), id); + if let Some(ref pad) = otherpad { + state.pads_session_id_map.insert(pad.clone(), id); + } + + drop(state); + + pad.set_active(true).unwrap(); + for event in sticky_events { + let _ = pad.store_sticky_event(&event); + } + self.obj().add_pad(&pad).unwrap(); + + if let Some(pad) = otherpad { + pad.set_active(true).unwrap(); + self.obj().add_pad(&pad).unwrap(); + } + + pad + }) + } + + fn release_pad(&self, pad: &gst::Pad) { + let mut state = self.state.lock().unwrap(); + let mut removed_pads = vec![]; + let mut removed_session_ids = vec![]; + if let Some(&id) = state.pads_session_id_map.get(pad) { + removed_pads.push(pad.clone()); + if let Some(session) = state.mut_session_by_id(id) { + if Some(pad) == session.rtp_recv_sinkpad.as_ref() { + session.rtp_recv_sinkpad = None; + removed_pads.extend(session.rtp_recv_srcpads.iter().map(|r| r.pad.clone())); + session.recv_flow_combiner.lock().unwrap().clear(); + session.rtp_recv_srcpads.clear(); + session.recv_store.clear(); + } + + if Some(pad) == session.rtcp_recv_sinkpad.as_ref() { + session.rtcp_recv_sinkpad = None; + } + + if session.rtp_recv_sinkpad.is_none() && session.rtcp_recv_sinkpad.is_none() { + removed_session_ids.push(session.internal_session.id); + } + } + } + drop(state); + + for pad in removed_pads.iter() { + let _ = pad.set_active(false); + // Pad might not have been added yet if it's a RTP recv srcpad + if pad.has_as_parent(&*self.obj()) { + let _ = self.obj().remove_pad(pad); + } + } + + { + let mut state = self.state.lock().unwrap(); + + for pad in removed_pads.iter() { + state.pads_session_id_map.remove(pad); + } + for id in removed_session_ids { + if let Some(session) = state.mut_session_by_id(id) { + if session.rtp_recv_sinkpad.is_none() && session.rtcp_recv_sinkpad.is_none() { + let id = session.internal_session.id; + state.sessions.retain(|s| s.internal_session.id != id); + } + } + } + } + + self.parent_release_pad(pad) + } + + #[allow(clippy::single_match)] + fn change_state( + &self, + transition: gst::StateChange, + ) -> Result { + match transition { + gst::StateChange::NullToReady => { + let settings = self.settings.lock().unwrap(); + let mut state = self.state.lock().unwrap(); + let rtp_id = settings.rtp_id.clone(); + let empty_sessions = state.sessions.is_empty(); + match state.shared_state.as_mut() { + Some(shared) => { + if !empty_sessions && shared.name() != rtp_id { + let other_name = shared.name().to_owned(); + drop(state); + self.post_error_message(gst::error_msg!(gst::LibraryError::Settings, ["rtp-id {rtp_id} does not match the currently set value {other_name}"])); + return Err(gst::StateChangeError); + } + } + None => { + state.shared_state = Some(SharedRtpState::send_get_or_init(rtp_id.clone())); + } + } + } + gst::StateChange::ReadyToPaused => { + let settings = self.settings.lock().unwrap(); + let mut state = self.state.lock().unwrap(); + + state.sync_context = Some(sync::Context::new(settings.timestamping_mode)); + } + _ => (), + } + + let mut success = self.parent_change_state(transition)?; + + match transition { + gst::StateChange::ReadyToPaused | gst::StateChange::PlayingToPaused => { + success = gst::StateChangeSuccess::NoPreroll; + } + gst::StateChange::PausedToReady => { + let mut state = self.state.lock().unwrap(); + let mut removed_pads = vec![]; + for session in &mut state.sessions { + removed_pads.extend(session.rtp_recv_srcpads.iter().map(|r| r.pad.clone())); + + session.recv_flow_combiner.lock().unwrap().clear(); + session.rtp_recv_srcpads.clear(); + session.recv_store.clear(); + + session.rtp_recv_sink_caps = None; + session.rtp_recv_sink_segment = None; + session.rtp_recv_sink_seqnum = None; + session.rtp_recv_sink_group_id = None; + } + state.sync_context = None; + drop(state); + + for pad in removed_pads.iter() { + let _ = pad.set_active(false); + // Pad might not have been added yet if it's a RTP recv srcpad + if pad.has_as_parent(&*self.obj()) { + let _ = self.obj().remove_pad(pad); + } + } + + let mut state = self.state.lock().unwrap(); + for pad in removed_pads { + state.pads_session_id_map.remove(&pad); + } + drop(state); + } + _ => (), + } + + Ok(success) + } +} + +impl Drop for RtpRecv { + fn drop(&mut self) { + if let Some(ref shared_state) = self.state.lock().unwrap().shared_state { + shared_state.unmark_recv_outstanding(); + } + } +} diff --git a/net/rtp/src/rtpbin2/rtpsend.rs b/net/rtp/src/rtpbin2/rtpsend.rs new file mode 100644 index 00000000..305f9803 --- /dev/null +++ b/net/rtp/src/rtpbin2/rtpsend.rs @@ -0,0 +1,878 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::collections::HashMap; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::Poll; +use std::time::{Duration, Instant, SystemTime}; + +use futures::future::{AbortHandle, Abortable}; +use futures::StreamExt; +use gst::{glib, prelude::*, subclass::prelude::*}; +use once_cell::sync::Lazy; + +use super::internal::{pt_clock_rate_from_caps, GstRustLogger, SharedRtpState, SharedSession}; +use super::session::{RtcpSendReply, RtpProfile, SendReply, RTCP_MIN_REPORT_INTERVAL}; +use super::source::SourceState; + +use crate::rtpbin2::RUNTIME; + +const DEFAULT_MIN_RTCP_INTERVAL: Duration = RTCP_MIN_REPORT_INTERVAL; +const DEFAULT_REDUCED_SIZE_RTCP: bool = false; + +static CAT: Lazy = Lazy::new(|| { + gst::DebugCategory::new( + "rtpsend", + gst::DebugColorFlags::empty(), + Some("RTP Sending"), + ) +}); + +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, glib::Enum)] +#[repr(u32)] +#[enum_type(name = "GstRtpSendProfile")] +enum Profile { + #[default] + #[enum_value(name = "AVP profile as specified in RFC 3550", nick = "avp")] + Avp, + #[enum_value(name = "AVPF profile as specified in RFC 4585", nick = "avpf")] + Avpf, +} + +impl From for Profile { + fn from(value: RtpProfile) -> Self { + match value { + RtpProfile::Avp => Self::Avp, + RtpProfile::Avpf => Self::Avpf, + } + } +} + +impl From for RtpProfile { + fn from(value: Profile) -> Self { + match value { + Profile::Avp => Self::Avp, + Profile::Avpf => Self::Avpf, + } + } +} + +#[derive(Debug, Clone)] +struct Settings { + rtp_id: String, + min_rtcp_interval: Duration, + profile: Profile, + reduced_size_rtcp: bool, +} + +impl Default for Settings { + fn default() -> Self { + Settings { + rtp_id: String::from("rtp-id"), + min_rtcp_interval: DEFAULT_MIN_RTCP_INTERVAL, + profile: Profile::default(), + reduced_size_rtcp: DEFAULT_REDUCED_SIZE_RTCP, + } + } +} + +#[derive(Debug)] +#[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] +struct RtcpSendStream { + state: Arc>, + session_id: usize, + sleep: Pin>, +} + +impl RtcpSendStream { + fn new(state: Arc>, session_id: usize) -> Self { + Self { + state, + session_id, + sleep: Box::pin(tokio::time::sleep(Duration::from_secs(1))), + } + } +} + +impl futures::stream::Stream for RtcpSendStream { + type Item = RtcpSendReply; + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let mut state = self.state.lock().unwrap(); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let mut lowest_wait = None; + if let Some(session) = state.mut_session_by_id(self.session_id) { + let mut session_inner = session.internal_session.inner.lock().unwrap(); + if let Some(reply) = session_inner.session.poll_rtcp_send(now, ntp_now) { + return Poll::Ready(Some(reply)); + } + if let Some(wait) = session_inner.session.poll_rtcp_send_timeout(now) { + if lowest_wait.map_or(true, |lowest_wait| wait < lowest_wait) { + lowest_wait = Some(wait); + } + } + session_inner.rtcp_waker = Some(cx.waker().clone()); + } + drop(state); + + // default to the minimum initial rtcp delay so we don't busy loop if there are no sessions or no + // timeouts available + let lowest_wait = + lowest_wait.unwrap_or(now + crate::rtpbin2::session::RTCP_MIN_REPORT_INTERVAL / 2); + let this = self.get_mut(); + this.sleep.as_mut().reset(lowest_wait.into()); + if !std::future::Future::poll(this.sleep.as_mut(), cx).is_pending() { + // wake us again if the delay is not pending for another go at finding the next timeout + // value + cx.waker().wake_by_ref(); + } + Poll::Pending + } +} + +#[derive(Debug)] +struct SendSession { + internal_session: SharedSession, + + rtcp_task: Mutex>, + + // State for sending RTP streams + rtp_send_sinkpad: Option, + rtp_send_srcpad: Option, + + rtcp_send_srcpad: Option, +} + +impl SendSession { + fn new(shared_state: &SharedRtpState, id: usize, settings: &Settings) -> Self { + let internal_session = shared_state.session_get_or_init(id, || { + SharedSession::new( + id, + settings.profile.into(), + settings.min_rtcp_interval, + settings.reduced_size_rtcp, + ) + }); + let mut inner = internal_session.inner.lock().unwrap(); + inner.session.set_profile(settings.profile.into()); + inner + .session + .set_min_rtcp_interval(settings.min_rtcp_interval); + inner + .session + .set_reduced_size_rtcp(settings.reduced_size_rtcp); + drop(inner); + + Self { + internal_session, + + rtcp_task: Mutex::new(None), + rtp_send_sinkpad: None, + rtp_send_srcpad: None, + rtcp_send_srcpad: None, + } + } + + fn start_rtcp_task(&self, state: Arc>) { + let mut rtcp_task = self.rtcp_task.lock().unwrap(); + + if rtcp_task.is_some() { + return; + } + + // run the runtime from another task to prevent the "start a runtime from within a runtime" panic + // when the plugin is statically linked. + let (abort_handle, abort_registration) = AbortHandle::new_pair(); + let session_id = self.internal_session.id; + RUNTIME.spawn(async move { + let future = Abortable::new(Self::rtcp_task(state, session_id), abort_registration); + future.await + }); + + rtcp_task.replace(RtcpTask { abort_handle }); + } + + async fn rtcp_task(state: Arc>, session_id: usize) { + let mut stream = RtcpSendStream::new(state.clone(), session_id); + while let Some(reply) = stream.next().await { + let state = state.lock().unwrap(); + let Some(session) = state.session_by_id(session_id) else { + continue; + }; + match reply { + RtcpSendReply::Data(data) => { + let Some(rtcp_srcpad) = session.rtcp_send_srcpad.clone() else { + continue; + }; + drop(state); + RUNTIME.spawn_blocking(move || { + let buffer = gst::Buffer::from_mut_slice(data); + if let Err(e) = rtcp_srcpad.push(buffer) { + gst::warning!(CAT, obj: rtcp_srcpad, "Failed to send rtcp data: flow return {e:?}"); + } + }); + } + RtcpSendReply::SsrcBye(ssrc) => session + .internal_session + .config + .emit_by_name::<()>("bye-ssrc", &[&ssrc]), + } + } + } + + fn stop_rtcp_task(&self) { + let mut rtcp_task = self.rtcp_task.lock().unwrap(); + + if let Some(rtcp) = rtcp_task.take() { + rtcp.abort_handle.abort(); + } + } +} + +#[derive(Debug, Default)] +struct State { + shared_state: Option, + sessions: Vec, + max_session_id: usize, + pads_session_id_map: HashMap, +} + +impl State { + fn session_by_id(&self, id: usize) -> Option<&SendSession> { + self.sessions + .iter() + .find(|session| session.internal_session.id == id) + } + + fn mut_session_by_id(&mut self, id: usize) -> Option<&mut SendSession> { + self.sessions + .iter_mut() + .find(|session| session.internal_session.id == id) + } + + fn stats(&self) -> gst::Structure { + let mut ret = gst::Structure::builder("application/x-rtp2-stats"); + for session in self.sessions.iter() { + let sess_id = session.internal_session.id; + let session = session.internal_session.inner.lock().unwrap(); + + ret = ret.field(sess_id.to_string(), session.stats()); + } + ret.build() + } +} + +pub struct RtpSend { + settings: Mutex, + state: Arc>, +} + +#[derive(Debug)] +struct RtcpTask { + abort_handle: AbortHandle, +} + +impl RtpSend { + fn iterate_internal_links(&self, pad: &gst::Pad) -> gst::Iterator { + let state = self.state.lock().unwrap(); + if let Some(&id) = state.pads_session_id_map.get(pad) { + if let Some(session) = state.session_by_id(id) { + if let Some(ref sinkpad) = session.rtp_send_sinkpad { + if let Some(ref srcpad) = session.rtp_send_srcpad { + if sinkpad == pad { + return gst::Iterator::from_vec(vec![srcpad.clone()]); + } else if srcpad == pad { + return gst::Iterator::from_vec(vec![sinkpad.clone()]); + } + } + } + // nothing to do for rtcp pads + } + } + gst::Iterator::from_vec(vec![]) + } + + fn rtp_send_sink_chain( + &self, + id: usize, + buffer: gst::Buffer, + ) -> Result { + let state = self.state.lock().unwrap(); + let Some(session) = state.session_by_id(id) else { + gst::error!(CAT, "No session?"); + return Err(gst::FlowError::Error); + }; + + let mapped = buffer.map_readable().map_err(|e| { + gst::error!(CAT, imp: self, "Failed to map input buffer {e:?}"); + gst::FlowError::Error + })?; + let rtp = match rtp_types::RtpPacket::parse(&mapped) { + Ok(rtp) => rtp, + Err(e) => { + gst::error!(CAT, imp: self, "Failed to parse input as valid rtp packet: {e:?}"); + return Ok(gst::FlowSuccess::Ok); + } + }; + + let srcpad = session.rtp_send_srcpad.clone().unwrap(); + let session = session.internal_session.clone(); + let mut session_inner = session.inner.lock().unwrap(); + drop(state); + + let now = Instant::now(); + loop { + match session_inner.session.handle_send(&rtp, now) { + SendReply::SsrcCollision(_ssrc) => (), // TODO: handle ssrc collision + SendReply::NewSsrc(ssrc, _pt) => { + drop(session_inner); + session.config.emit_by_name::<()>("new-ssrc", &[&ssrc]); + session_inner = session.inner.lock().unwrap(); + } + SendReply::Passthrough => break, + SendReply::Drop => return Ok(gst::FlowSuccess::Ok), + } + } + // TODO: handle other processing + drop(mapped); + drop(session_inner); + srcpad.push(buffer) + } + + fn rtp_send_sink_event(&self, pad: &gst::Pad, event: gst::Event, id: usize) -> bool { + match event.view() { + gst::EventView::Caps(caps) => { + if let Some((pt, clock_rate)) = pt_clock_rate_from_caps(caps.caps()) { + let state = self.state.lock().unwrap(); + if let Some(session) = state.session_by_id(id) { + let mut session = session.internal_session.inner.lock().unwrap(); + session.session.set_pt_clock_rate(pt, clock_rate); + session.add_caps(caps.caps_owned()); + } + } else { + gst::warning!(CAT, obj: pad, "input caps are missing payload or clock-rate fields"); + } + gst::Pad::event_default(pad, Some(&*self.obj()), event) + } + gst::EventView::Eos(_eos) => { + let now = Instant::now(); + let state = self.state.lock().unwrap(); + if let Some(session) = state.session_by_id(id) { + let mut session = session.internal_session.inner.lock().unwrap(); + let ssrcs = session.session.ssrcs().collect::>(); + // We want to bye all relevant ssrc's here. + // Relevant means they will not be used by something else which means that any + // local send ssrc that is not being used for Sr/Rr reports (internal_ssrc) can + // have the Bye state applied. + let mut all_local = true; + let internal_ssrc = session.session.internal_ssrc(); + for ssrc in ssrcs { + let Some(local_send) = session.session.mut_local_send_source_by_ssrc(ssrc) + else { + if let Some(local_recv) = + session.session.local_receive_source_by_ssrc(ssrc) + { + if local_recv.state() != SourceState::Bye + && Some(ssrc) != internal_ssrc + { + all_local = false; + } + } + continue; + }; + if Some(ssrc) != internal_ssrc { + local_send.mark_bye("End of Stream") + } + } + if all_local { + // if there are no non-local send ssrc's, then we can Bye the entire + // session. + session.session.schedule_bye("End of Stream", now); + } + if let Some(waker) = session.rtcp_waker.take() { + waker.wake(); + } + drop(session); + } + drop(state); + gst::Pad::event_default(pad, Some(&*self.obj()), event) + } + _ => gst::Pad::event_default(pad, Some(&*self.obj()), event), + } + } +} + +#[glib::object_subclass] +impl ObjectSubclass for RtpSend { + const NAME: &'static str = "GstRtpSend"; + type Type = super::RtpSend; + type ParentType = gst::Element; + + fn new() -> Self { + GstRustLogger::install(); + Self { + settings: Default::default(), + state: Default::default(), + } + } +} + +impl ObjectImpl for RtpSend { + fn properties() -> &'static [glib::ParamSpec] { + static PROPERTIES: Lazy> = Lazy::new(|| { + vec![ + glib::ParamSpecString::builder("rtp-id") + .nick("The RTP Connection ID") + .blurb("A connection ID shared with a rtprecv element for implementing both sending and receiving using the same RTP context") + .default_value("rtp-id") + .build(), + glib::ParamSpecUInt::builder("min-rtcp-interval") + .nick("Minimum RTCP interval in ms") + .blurb("Minimum time (in ms) between RTCP reports") + .default_value(DEFAULT_MIN_RTCP_INTERVAL.as_millis() as u32) + .mutable_ready() + .build(), + glib::ParamSpecUInt::builder("stats") + .nick("Statistics") + .blurb("Statistics about the session") + .read_only() + .build(), + glib::ParamSpecEnum::builder::("rtp-profile") + .nick("RTP Profile") + .blurb("RTP Profile to use") + .default_value(Profile::default()) + .mutable_ready() + .build(), + glib::ParamSpecBoolean::builder("reduced-size-rtcp") + .nick("Reduced Size RTCP") + .blurb("Use reduced size RTCP. Only has an effect if rtp-profile=avpf") + .default_value(DEFAULT_REDUCED_SIZE_RTCP) + .mutable_ready() + .build(), + ] + }); + + PROPERTIES.as_ref() + } + + fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) { + match pspec.name() { + "rtp-id" => { + let mut settings = self.settings.lock().unwrap(); + settings.rtp_id = value.get::().expect("type checked upstream"); + } + "min-rtcp-interval" => { + let mut settings = self.settings.lock().unwrap(); + settings.min_rtcp_interval = Duration::from_millis( + value.get::().expect("type checked upstream").into(), + ); + } + "rtp-profile" => { + let mut settings = self.settings.lock().unwrap(); + settings.profile = value.get::().expect("Type checked upstream"); + } + "reduced-size-rtcp" => { + let mut settings = self.settings.lock().unwrap(); + settings.reduced_size_rtcp = value.get::().expect("Type checked upstream"); + } + _ => unimplemented!(), + } + } + + fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value { + match pspec.name() { + "rtp-id" => { + let settings = self.settings.lock().unwrap(); + settings.rtp_id.to_value() + } + "min-rtcp-interval" => { + let settings = self.settings.lock().unwrap(); + (settings.min_rtcp_interval.as_millis() as u32).to_value() + } + "stats" => { + let state = self.state.lock().unwrap(); + state.stats().to_value() + } + "rtp-profile" => { + let settings = self.settings.lock().unwrap(); + settings.profile.to_value() + } + "reduced-size-rtcp" => { + let settings = self.settings.lock().unwrap(); + settings.reduced_size_rtcp.to_value() + } + _ => unimplemented!(), + } + } + + fn signals() -> &'static [glib::subclass::Signal] { + static SIGNALS: Lazy> = Lazy::new(|| { + vec![glib::subclass::Signal::builder("get-session") + .param_types([u32::static_type()]) + .return_type::() + .action() + .class_handler(|_token, args| { + let element = args[0].get::().expect("signal arg"); + let id = args[1].get::().expect("signal arg"); + let send = element.imp(); + let state = send.state.lock().unwrap(); + state + .session_by_id(id as usize) + .map(|sess| sess.internal_session.config.to_value()) + }) + .build()] + }); + + SIGNALS.as_ref() + } +} + +impl GstObjectImpl for RtpSend {} + +impl ElementImpl for RtpSend { + fn metadata() -> Option<&'static gst::subclass::ElementMetadata> { + static ELEMENT_METADATA: Lazy = Lazy::new(|| { + gst::subclass::ElementMetadata::new( + "RTP Session Sender", + "Network/RTP/Filter", + "RTP session management (sender)", + "Matthew Waters ", + ) + }); + + Some(&*ELEMENT_METADATA) + } + + fn pad_templates() -> &'static [gst::PadTemplate] { + static PAD_TEMPLATES: Lazy> = Lazy::new(|| { + let rtp_caps = gst::Caps::builder_full() + .structure(gst::Structure::builder("application/x-rtp").build()) + .build(); + let rtcp_caps = gst::Caps::builder_full() + .structure(gst::Structure::builder("application/x-rtcp").build()) + .build(); + + vec![ + gst::PadTemplate::new( + "rtp_sink_%u", + gst::PadDirection::Sink, + gst::PadPresence::Request, + &rtp_caps, + ) + .unwrap(), + gst::PadTemplate::new( + "rtp_src_%u", + gst::PadDirection::Src, + gst::PadPresence::Sometimes, + &rtp_caps, + ) + .unwrap(), + gst::PadTemplate::new( + "rtcp_src_%u", + gst::PadDirection::Src, + gst::PadPresence::Request, + &rtcp_caps, + ) + .unwrap(), + ] + }); + + PAD_TEMPLATES.as_ref() + } + + fn request_new_pad( + &self, + templ: &gst::PadTemplate, + name: Option<&str>, + _caps: Option<&gst::Caps>, // XXX: do something with caps? + ) -> Option { + let settings = self.settings.lock().unwrap().clone(); + let state_clone = self.state.clone(); + let mut state = self.state.lock().unwrap(); + let max_session_id = state.max_session_id; + let rtp_id = settings.rtp_id.clone(); + + // parse the possibly provided name into a session id or use the default + let sess_parse = move |name: Option<&str>, prefix, default_id| -> Option { + if let Some(name) = name { + name.strip_prefix(prefix).and_then(|suffix| { + if suffix.starts_with("%u") { + Some(default_id) + } else { + suffix.parse::().ok() + } + }) + } else { + Some(default_id) + } + }; + + match templ.name_template() { + "rtp_sink_%u" => sess_parse(name, "rtp_sink_", max_session_id).and_then(|id| { + let new_pad = move |session: &mut SendSession| -> Option<( + gst::Pad, + Option, + usize, + Vec, + )> { + let sinkpad = gst::Pad::builder_from_template(templ) + .chain_function(move |_pad, parent, buffer| { + RtpSend::catch_panic_pad_function( + parent, + || Err(gst::FlowError::Error), + |this| this.rtp_send_sink_chain(id, buffer), + ) + }) + .iterate_internal_links_function(|pad, parent| { + RtpSend::catch_panic_pad_function( + parent, + || gst::Iterator::from_vec(vec![]), + |this| this.iterate_internal_links(pad), + ) + }) + .event_function(move |pad, parent, event| { + RtpSend::catch_panic_pad_function( + parent, + || false, + |this| this.rtp_send_sink_event(pad, event, id), + ) + }) + .flags(gst::PadFlags::PROXY_CAPS) + .name(format!("rtp_sink_{}", id)) + .build(); + let src_templ = self.obj().pad_template("rtp_src_%u").unwrap(); + let srcpad = gst::Pad::builder_from_template(&src_templ) + .iterate_internal_links_function(|pad, parent| { + RtpSend::catch_panic_pad_function( + parent, + || gst::Iterator::from_vec(vec![]), + |this| this.iterate_internal_links(pad), + ) + }) + .name(format!("rtp_src_{}", id)) + .build(); + session.rtp_send_sinkpad = Some(sinkpad.clone()); + session.rtp_send_srcpad = Some(srcpad.clone()); + session + .internal_session + .inner + .lock() + .unwrap() + .rtp_send_sinkpad = Some(sinkpad.clone()); + Some((sinkpad, Some(srcpad), id, vec![])) + }; + + let session = state.mut_session_by_id(id); + if let Some(session) = session { + if session.rtp_send_sinkpad.is_some() { + None + } else { + new_pad(session) + } + } else { + let shared_state = state + .shared_state + .get_or_insert_with(|| SharedRtpState::send_get_or_init(rtp_id)); + let mut session = SendSession::new(shared_state, id, &settings); + let ret = new_pad(&mut session); + state.sessions.push(session); + ret + } + }), + "rtcp_src_%u" => sess_parse(name, "rtcp_src_", max_session_id).and_then(|id| { + let new_pad = move |session: &mut SendSession| -> Option<( + gst::Pad, + Option, + usize, + Vec, + )> { + let srcpad = gst::Pad::builder_from_template(templ) + .iterate_internal_links_function(|pad, parent| { + RtpSend::catch_panic_pad_function( + parent, + || gst::Iterator::from_vec(vec![]), + |this| this.iterate_internal_links(pad), + ) + }) + .name(format!("rtcp_src_{}", id)) + .build(); + + let stream_id = format!("{}/rtcp", id); + let stream_start = gst::event::StreamStart::builder(&stream_id).build(); + let seqnum = stream_start.seqnum(); + + let caps = gst::Caps::new_empty_simple("application/x-rtcp"); + let caps = gst::event::Caps::builder(&caps).seqnum(seqnum).build(); + + let segment = gst::FormattedSegment::::new(); + let segment = gst::event::Segment::new(&segment); + + session.rtcp_send_srcpad = Some(srcpad.clone()); + session.start_rtcp_task(state_clone); + Some((srcpad, None, id, vec![stream_start, caps, segment])) + }; + + let session = state.mut_session_by_id(id); + if let Some(session) = session { + if session.rtcp_send_srcpad.is_some() { + None + } else { + new_pad(session) + } + } else { + let shared_state = state + .shared_state + .get_or_insert_with(|| SharedRtpState::send_get_or_init(rtp_id)); + let mut session = SendSession::new(shared_state, id, &settings); + let ret = new_pad(&mut session); + state.sessions.push(session); + ret + } + }), + _ => None, + } + .map(|(pad, otherpad, id, sticky_events)| { + state.max_session_id = (id + 1).max(state.max_session_id); + state.pads_session_id_map.insert(pad.clone(), id); + if let Some(ref pad) = otherpad { + state.pads_session_id_map.insert(pad.clone(), id); + } + + drop(state); + + pad.set_active(true).unwrap(); + for event in sticky_events { + let _ = pad.store_sticky_event(&event); + } + self.obj().add_pad(&pad).unwrap(); + + if let Some(pad) = otherpad { + pad.set_active(true).unwrap(); + self.obj().add_pad(&pad).unwrap(); + } + + pad + }) + } + + fn release_pad(&self, pad: &gst::Pad) { + let mut state = self.state.lock().unwrap(); + let mut removed_pads = vec![]; + let mut removed_session_ids = vec![]; + if let Some(&id) = state.pads_session_id_map.get(pad) { + removed_pads.push(pad.clone()); + if let Some(session) = state.mut_session_by_id(id) { + if Some(pad) == session.rtp_send_sinkpad.as_ref() { + session.rtp_send_sinkpad = None; + session + .internal_session + .inner + .lock() + .unwrap() + .rtp_send_sinkpad = None; + + if let Some(srcpad) = session.rtp_send_srcpad.take() { + removed_pads.push(srcpad); + } + } + + if Some(pad) == session.rtcp_send_srcpad.as_ref() { + session.rtcp_send_srcpad = None; + } + + if session.rtp_send_sinkpad.is_none() && session.rtcp_send_srcpad.is_none() { + removed_session_ids.push(session.internal_session.id); + } + } + } + drop(state); + + for pad in removed_pads.iter() { + let _ = pad.set_active(false); + // Pad might not have been added yet if it's a RTP recv srcpad + if pad.has_as_parent(&*self.obj()) { + let _ = self.obj().remove_pad(pad); + } + } + + { + let mut state = self.state.lock().unwrap(); + + for pad in removed_pads.iter() { + state.pads_session_id_map.remove(pad); + } + for id in removed_session_ids { + if let Some(session) = state.session_by_id(id) { + if session.rtp_send_sinkpad.is_none() && session.rtcp_send_srcpad.is_none() { + session.stop_rtcp_task(); + state.sessions.retain(|s| s.internal_session.id != id); + } + } + } + } + + self.parent_release_pad(pad) + } + + #[allow(clippy::single_match)] + fn change_state( + &self, + transition: gst::StateChange, + ) -> Result { + match transition { + gst::StateChange::NullToReady => { + let settings = self.settings.lock().unwrap(); + let rtp_id = settings.rtp_id.clone(); + drop(settings); + + let state_clone = self.state.clone(); + let mut state = self.state.lock().unwrap(); + let empty_sessions = state.sessions.is_empty(); + match state.shared_state.as_mut() { + Some(shared) => { + if !empty_sessions && shared.name() != rtp_id { + let other_name = shared.name().to_owned(); + drop(state); + self.post_error_message(gst::error_msg!(gst::LibraryError::Settings, ["rtp-id {rtp_id} does not match the currently set value {other_name}"])); + return Err(gst::StateChangeError); + } + } + None => { + state.shared_state = Some(SharedRtpState::send_get_or_init(rtp_id.clone())); + } + } + for session in state.sessions.iter_mut() { + if session.rtcp_send_srcpad.is_some() { + session.start_rtcp_task(state_clone.clone()); + } + } + } + _ => (), + } + let success = self.parent_change_state(transition)?; + + match transition { + gst::StateChange::ReadyToNull => { + let mut state = self.state.lock().unwrap(); + for session in state.sessions.iter_mut() { + session.stop_rtcp_task(); + } + } + _ => (), + } + + Ok(success) + } +} + +impl Drop for RtpSend { + fn drop(&mut self) { + if let Some(ref shared_state) = self.state.lock().unwrap().shared_state { + shared_state.unmark_send_outstanding(); + } + } +} diff --git a/net/rtp/src/rtpbin2/session.rs b/net/rtp/src/rtpbin2/session.rs new file mode 100644 index 00000000..32d07e18 --- /dev/null +++ b/net/rtp/src/rtpbin2/session.rs @@ -0,0 +1,2657 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::collections::{HashMap, VecDeque}; +use std::net::SocketAddr; +use std::time::{Duration, Instant, SystemTime}; + +use rtcp_types::*; +use rtp_types::RtpPacket; + +use rand::prelude::*; + +use crate::rtpbin2::source::SourceRecvReply; + +use super::source::{ + LocalReceiveSource, LocalSendSource, RemoteReceiveSource, RemoteSendSource, SourceState, +}; +use super::time::system_time_to_ntp_time_u64; + +use gst::prelude::MulDiv; + +// TODO: make configurable +pub const RTCP_MIN_REPORT_INTERVAL: Duration = Duration::from_secs(5); + +const RTCP_SOURCE_TIMEOUT_N_INTERVALS: u32 = 5; +const RTCP_ADDRESS_CONFLICT_TIMEOUT: Duration = RTCP_MIN_REPORT_INTERVAL.saturating_mul(12); +// 5% of 8kB/s +const RTCP_MIN_BANDWIDTH: usize = 400; +const RTCP_MTU: usize = 1200; + +const UDP_IP_OVERHEAD_BYTES: usize = 28; + +#[derive(Debug, Default)] +struct RtcpTimeMembers { + time: Option, + p_members: usize, +} + +#[derive(Debug)] +struct ByeState { + members: usize, + pmembers: usize, +} + +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub enum RtpProfile { + #[default] + Avp, + Avpf, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum KeyUnitRequestType { + Pli, + Fir(u32), +} + +impl RtpProfile { + fn is_feedback(&self) -> bool { + self == &Self::Avpf + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum RequestEarlyRtcpResult { + NotScheduled, + Scheduled, + TimerReconsideration, +} + +#[derive(Debug)] +pub struct Session { + // settings + min_rtcp_interval: Duration, + profile: RtpProfile, + reduced_size_rtcp: bool, + // state + local_senders: HashMap, + local_receivers: HashMap, + remote_receivers: HashMap, + remote_senders: HashMap, + average_rtcp_size: usize, + last_sent_data: Option, + hold_buffer_counter: usize, + sdes: HashMap, + pt_map: HashMap, + conflicting_addresses: HashMap, + // used when we have not sent anything but need a ssrc for Rr + internal_rtcp_sender_src: Option, + bye_state: Option, + is_point_to_point: bool, + + // rtcp scheduling state + // T_rr: holds the interval used to calculate the current `next_rtcp_send` + rtcp_interval: Option, + // tp: last time that any rtcp was handled + last_rtcp_handle_time: Option, + // tn: holds the next regular rtcp send time and the number of members at the + // time when the time was calculated + next_rtcp_send: RtcpTimeMembers, + // T_rr_last: last two times a regular rtcp packet was sent + last_rtcp_sent_times: VecDeque, + // time for the next early rtcp to be sent + next_early_rtcp_time: Option, + pending_rtcp_send: VecDeque, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RecvReply { + /// A new ssrc was discovered. If you want to change things about the new ssrc, then do it now. + /// Call recv() again. + NewSsrc(u32, u8), + /// hold this buffer for later and give it the relevant id. The id will be used in a Drop, or + /// Forward return value + Hold(usize), + /// Drop a buffer by id. Should continue calling with the same input until not Drop or Forward + Drop(usize), + /// Forward a held buffer by id. Should continue calling with the same input until not Drop or Forward. + Forward(usize), + /// Forward the input buffer + Passthrough, + /// Ignore this buffer and do not passthrough + Ignore, + /// A ssrc collision has been detected for the provided ssrc. Sender (us) should change ssrc. + SsrcCollision(u32), +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum SendReply { + /// A new ssrc was discovered. If you want to change things about the new ssrc, then do it now. + /// Call send() again. + NewSsrc(u32, u8), + /// Forward the input buffer + Passthrough, + /// Drop this buffer + Drop, + /// SSRC collision detected, Sender (us) should change our SSRC and this packet must be dropped + SsrcCollision(u32), +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum RtcpRecvReply { + /// A new ssrc was discovered. If you want to change things about the new ssrc, then do it now + /// before pushing the buffer again + NewSsrc(u32), + /// SSRC collision detected, Sender (us) should change our SSRC and this packet must be dropped + SsrcCollision(u32), + /// RTCP timer needs to be reconsidered. Call poll_rtcp_send_timeout() to get the new time + TimerReconsideration, + /// Request a key unit for the given SSRC of ours + RequestKeyUnit { ssrcs: Vec, fir: bool }, + /// A new cname to ssrc mapping was found in a sdes: (cname, ssrc) + NewCName((String, u32)), + /// A new RTP to NTP mapping was received for an ssrc: (ssrc, RTP, NTP) + NewRtpNtp((u32, u32, u64)), + /// A ssrc has byed + SsrcBye(u32), +} + +#[derive(Debug)] +pub enum RtcpSendReply { + /// Data needs to be sent + Data(Vec), + /// A ssrc has byed + SsrcBye(u32), +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum RequestRemoteKeyUnitReply { + /// RTCP timer needs to be reconsidered. Call poll_rtcp_send_timeout() to get the new time + TimerReconsideration, +} + +impl Session { + pub fn new() -> Self { + let cname = generate_cname(); + let mut sdes = HashMap::new(); + sdes.insert(SdesItem::CNAME, cname); + Self { + min_rtcp_interval: RTCP_MIN_REPORT_INTERVAL, + profile: RtpProfile::default(), + reduced_size_rtcp: false, + local_senders: HashMap::new(), + // also known as remote_senders + local_receivers: HashMap::new(), + remote_receivers: HashMap::new(), + remote_senders: HashMap::new(), + last_rtcp_sent_times: VecDeque::new(), + rtcp_interval: None, + next_rtcp_send: RtcpTimeMembers { + time: None, + p_members: 0, + }, + average_rtcp_size: 100, + last_sent_data: None, + hold_buffer_counter: 0, + sdes, + pt_map: HashMap::new(), + conflicting_addresses: HashMap::new(), + internal_rtcp_sender_src: None, + bye_state: None, + next_early_rtcp_time: None, + last_rtcp_handle_time: None, + is_point_to_point: true, + pending_rtcp_send: VecDeque::new(), + } + } + + /// Set the minimum (regular) RTCP interval to use for this session + pub fn set_min_rtcp_interval(&mut self, min_rtcp_interval: Duration) { + self.min_rtcp_interval = min_rtcp_interval; + } + + /// Set the RTP profile to use. + pub fn set_profile(&mut self, profile: RtpProfile) { + self.profile = profile; + } + + /// Set usage of reduced size RTCP + pub fn set_reduced_size_rtcp(&mut self, reduced_size_rtcp: bool) { + self.reduced_size_rtcp = reduced_size_rtcp; + } + + fn n_members(&self) -> usize { + self.bye_state + .as_ref() + .map(|state| state.members) + .unwrap_or_else(|| { + self.local_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + + self + .local_receivers + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + + self + .remote_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + + self + .remote_receivers + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + }) + } + + fn n_senders(&self) -> usize { + self.bye_state.as_ref().map(|_state| 0).unwrap_or_else(|| { + self.local_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + + self + .remote_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + }) + } + + fn p_members(&self) -> usize { + self.bye_state + .as_ref() + .map(|state| state.pmembers) + .unwrap_or(self.next_rtcp_send.p_members) + } + + fn update_point_to_point(&mut self) { + // This definition of point to point is from RFC 8108 + // i.e. If there are multiple remote CNAMEs, then we are not point to point. + // + // XXX: We currently don't provide external overriding for this + let mut cname = None; + self.is_point_to_point = self + .remote_senders + .values() + .filter_map(|source| source.sdes().get(&SdesItem::CNAME)) + .chain( + self.remote_receivers + .values() + .filter_map(|source| source.sdes().get(&SdesItem::CNAME)), + ) + .all(|cn| { + let ret = cname.is_none() || cname == Some(cn); + cname = cname.or(Some(cn)); + ret + }); + } + + /// Set the RTP clock rate for a particular payload type + pub fn set_pt_clock_rate(&mut self, pt: u8, clock_rate: u32) { + self.pt_map.insert(pt, clock_rate); + } + + /// Retrieve the RTP clock rate for a particular payload type + pub fn clock_rate_from_pt(&self, pt: u8) -> Option { + self.pt_map.get(&pt).copied() + } + + fn handle_ssrc_conflict(&mut self, addr: SocketAddr, now: Instant) -> bool { + if let Some(time) = self.conflicting_addresses.get_mut(&addr) { + trace!("ignoring looped packet from known collision address {addr:?}"); + *time = now; + false + } else { + trace!("New collision address {addr:?}"); + self.conflicting_addresses.insert(addr, now); + true + } + } + + /// Handle receiving an RTP packet. The [`RecvRecply`] return value outlines what the caller + /// must do with the packet. + pub fn handle_recv( + &mut self, + rtp: &RtpPacket, + from: Option, + now: Instant, + ) -> RecvReply { + trace!( + "receive rtp from:{from:?} at {now:?}, ssrc:{}, pt:{}, seqno:{}, rtp ts:{}, bytes:{}", + rtp.ssrc(), + rtp.payload_type(), + rtp.sequence_number(), + rtp.timestamp(), + rtp.payload().len() + ); + if let Some(addr) = from { + // handle possible collisions + if let Some(_source) = self.local_senders.get(&rtp.ssrc()) { + if self.handle_ssrc_conflict(addr, now) { + return RecvReply::SsrcCollision(rtp.ssrc()); + } else { + return RecvReply::Ignore; + } + } else if let Some(recv) = self.remote_receivers.remove(&rtp.ssrc()) { + let mut sender = recv.into_send(); + sender.set_rtp_from(from); + self.remote_senders.insert(rtp.ssrc(), sender); + } else if let Some(recv) = self.remote_senders.get_mut(&rtp.ssrc()) { + if let Some(from_addr) = recv.rtp_from() { + if addr != from_addr { + // this is favour old source behaviour + return RecvReply::Ignore; + } + } else { + recv.set_rtp_from(from); + } + } + } + + // TODO: handle CSRCs + + let clock_rate = self.clock_rate_from_pt(rtp.payload_type()); + + if let Some(source) = self.remote_senders.get_mut(&rtp.ssrc()) { + match source.recv_packet( + rtp.payload().len() as u32, + now, + rtp.sequence_number(), + rtp.timestamp(), + rtp.payload_type(), + clock_rate, + self.hold_buffer_counter, + ) { + SourceRecvReply::Hold(id) => { + self.hold_buffer_counter += 1; + RecvReply::Hold(id) + } + SourceRecvReply::Drop(id) => RecvReply::Drop(id), + SourceRecvReply::Ignore => RecvReply::Ignore, + SourceRecvReply::Forward(id) => RecvReply::Forward(id), + SourceRecvReply::Passthrough => RecvReply::Passthrough, + } + } else { + let mut source = RemoteSendSource::new(rtp.ssrc()); + source.set_rtp_from(from); + self.remote_senders.insert(rtp.ssrc(), source); + trace!("new receive ssrc:{}, pt:{}", rtp.ssrc(), rtp.payload_type()); + RecvReply::NewSsrc(rtp.ssrc(), rtp.payload_type()) + } + } + + /// Handle sending a RTP packet. The [`SendReply`] return value indicates what the caller + /// must do with this packet. + pub fn handle_send(&mut self, rtp: &RtpPacket, now: Instant) -> SendReply { + trace!( + "sending at {now:?} ssrc:{}, pt:{}, seqno:{}, rtp ts:{}, bytes:{}", + rtp.ssrc(), + rtp.payload_type(), + rtp.sequence_number(), + rtp.timestamp(), + rtp.payload().len() + ); + self.last_sent_data = Some(now); + + // handle possible collision + if let Some(source) = self.remote_senders.get(&rtp.ssrc()) { + if let Some(rtp_from) = source.rtp_from().or(source.rtcp_from()) { + if self.handle_ssrc_conflict(rtp_from, now) { + return SendReply::SsrcCollision(rtp.ssrc()); + } + } + return SendReply::Drop; + } else if let Some(source) = self.remote_receivers.get(&rtp.ssrc()) { + if let Some(rtcp_from) = source.rtcp_from() { + if self.handle_ssrc_conflict(rtcp_from, now) { + return SendReply::SsrcCollision(rtp.ssrc()); + } + } + return SendReply::Drop; + } + + if let Some(source) = self.local_senders.get_mut(&rtp.ssrc()) { + if source.state() != SourceState::Normal { + warn!( + "source {} is in state {:?}, dropping send", + source.ssrc(), + source.state() + ); + return SendReply::Drop; + } + source.set_last_activity(now); + if let Some(_clock_rate) = self.pt_map.get(&rtp.payload_type()) { + source.sent_packet( + rtp.payload().len(), + now, + rtp.sequence_number(), + rtp.timestamp(), + rtp.payload_type(), + ); + SendReply::Passthrough + } else { + trace!("no clock rate for pt:{}, dropping", rtp.payload_type()); + SendReply::Drop + } + } else { + self.local_receivers.remove_entry(&rtp.ssrc()); + let mut source = LocalSendSource::new(rtp.ssrc()); + source.set_last_activity(now); + source.set_state(SourceState::Normal); + for (k, v) in self.sdes.iter() { + source.set_sdes_item(*k, v.as_bytes()); + } + if self.local_senders.is_empty() && self.rtcp_reverse_consideration(0, now) { + // TODO: signal updated timeout + } + self.local_senders.insert(rtp.ssrc(), source); + info!("new send ssrc:{}, pt:{}", rtp.ssrc(), rtp.payload_type()); + SendReply::NewSsrc(rtp.ssrc(), rtp.payload_type()) + } + } + + fn update_rtcp_average(&mut self, additional_size: usize) { + if self.average_rtcp_size == 0 { + self.average_rtcp_size = additional_size; + } else { + self.average_rtcp_size = (additional_size + self.average_rtcp_size * 15) / 16; + } + } + + fn handle_rb( + &mut self, + sender_ssrc: u32, + rb: ReportBlock<'_>, + from: Option, + now: Instant, + ntp_time: SystemTime, + ) -> Option { + let mut ret = None; + + if let Some(source) = self.local_senders.get_mut(&rb.ssrc()) { + source.add_last_rb(sender_ssrc, rb, now, ntp_time); + source.set_last_activity(now); + } else { + if let Some(source) = self.remote_receivers.remove(&rb.ssrc()) { + let sender = source.into_send(); + self.remote_senders.insert(rb.ssrc(), sender); + } + + let source = self.remote_senders.entry(rb.ssrc()).or_insert_with(|| { + ret = Some(RtcpRecvReply::NewSsrc(rb.ssrc())); + RemoteSendSource::new(rb.ssrc()) + }); + source.set_rtcp_from(from); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + source.add_last_rb(sender_ssrc, rb, now, ntp_time); + } + + ret + } + + fn rtcp_reverse_consideration(&mut self, initial_n_members: usize, now: Instant) -> bool { + let n_members = self.n_members(); + if n_members >= self.p_members() { + trace!("rtcp reverse consideration not applied, n_members >= p_members"); + // this only applies if nmembers is less than pmembers + return false; + } + if let Some(ref mut prev) = self.next_rtcp_send.time { + if now > *prev { + trace!("rtcp reverse consideration not applied, last timeout in the past"); + // timer should have fired already, nothing to do + return false; + } + let dur = prev.saturating_duration_since(now); + if self.next_rtcp_send.p_members > 0 { + let member_factor = initial_n_members as f64 / self.next_rtcp_send.p_members as f64; + // FIXME: Does something have to be adjusted here in feedback profiles for + // T_rr_interval? + *prev = now + dur.mul_f64(member_factor); + self.next_rtcp_send.p_members = n_members; + if let Some(last_rtcp) = self.last_rtcp_sent_times.front_mut() { + let dur = last_rtcp.saturating_duration_since(now); + *last_rtcp = now - dur.mul_f64(member_factor); + } + trace!("rtcp reverse consideration applied"); + return true; + } + trace!("rtcp reverse consideration not applied, p_members <= 0"); + } else { + trace!("rtcp reverse consideration not applied, have not sent initial rtcp"); + } + false + } + + /// Handle receiving a RTCP packet. The returned [`RtcpRecvReply`]s indicates anything the + /// caller may need to handle. + pub fn handle_rtcp_recv( + &mut self, + rtcp: Compound, + rtcp_len: usize, + from: Option, + now: Instant, + ntp_time: SystemTime, + ) -> Vec { + trace!("Receive RTCP at {now:?}, ntp:{ntp_time:?}"); + // TODO: handle from: Option + let mut replies = vec![]; + + if self.bye_state.is_none() { + self.update_rtcp_average(rtcp_len + UDP_IP_OVERHEAD_BYTES); + } + + let mut reconsidered_timeout = false; + for (i, p) in rtcp.enumerate() { + trace!("recv rtcp {i}th packet: {p:?}"); + match p { + // TODO: actually handle App packets + Ok(Packet::App(_app)) => (), + Ok(Packet::Bye(bye)) => { + // https://datatracker.ietf.org/doc/html/rfc3550#section-6.3.4 + let n_members = self.n_members(); + let mut check_reconsideration = false; + for ssrc in bye.ssrcs() { + if let Some(source) = self.remote_senders.get_mut(&ssrc) { + source.set_rtcp_from(from); + source.set_last_activity(now); + source.set_state(SourceState::Bye); + check_reconsideration = true; + replies.push(RtcpRecvReply::SsrcBye(ssrc)); + } else if let Some(source) = self.remote_receivers.get_mut(&ssrc) { + source.set_last_activity(now); + source.set_state(SourceState::Bye); + check_reconsideration = true; + replies.push(RtcpRecvReply::SsrcBye(ssrc)); + } + // XXX: do we need to handle an unknown ssrc here? + // TODO: signal rtcp timeout needs recalcuating + } + if let Some(ref mut state) = self.bye_state { + state.members += 1; + let n_members = state.members; + self.update_rtcp_average(rtcp_len + UDP_IP_OVERHEAD_BYTES); + if check_reconsideration + && self.rtcp_reverse_consideration(n_members, now) + && !reconsidered_timeout + { + replies.push(RtcpRecvReply::TimerReconsideration); + reconsidered_timeout = true; + } + } else if check_reconsideration + && self.rtcp_reverse_consideration(n_members, now) + && !reconsidered_timeout + { + replies.push(RtcpRecvReply::TimerReconsideration); + reconsidered_timeout = true; + } + } + Ok(Packet::Rr(rr)) => { + if let Some(source) = self.remote_senders.remove(&rr.ssrc()) { + let receiver = source.into_receive(); + self.remote_receivers.insert(rr.ssrc(), receiver); + } + + let source = self.remote_receivers.entry(rr.ssrc()).or_insert_with(|| { + replies.push(RtcpRecvReply::NewSsrc(rr.ssrc())); + RemoteReceiveSource::new(rr.ssrc()) + }); + source.set_rtcp_from(from); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + + for rb in rr.report_blocks() { + if let Some(reply) = self.handle_rb(rr.ssrc(), rb, from, now, ntp_time) { + replies.push(reply); + } + } + } + Ok(Packet::Sr(sr)) => { + if let Some(addr) = from { + if self.local_senders.get(&sr.ssrc()).is_some() + || self.local_receivers.get(&sr.ssrc()).is_some() + { + if self.handle_ssrc_conflict(addr, now) { + replies.push(RtcpRecvReply::SsrcCollision(sr.ssrc())); + } + continue; + } + } + + if let Some(source) = self.remote_receivers.remove(&sr.ssrc()) { + let sender = source.into_send(); + self.remote_senders.insert(sr.ssrc(), sender); + } + + let source = self.remote_senders.entry(sr.ssrc()).or_insert_with(|| { + replies.push(RtcpRecvReply::NewSsrc(sr.ssrc())); + RemoteSendSource::new(sr.ssrc()) + }); + source.set_rtcp_from(from); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + source.set_last_received_sr( + ntp_time, + sr.ntp_timestamp().into(), + sr.rtp_timestamp(), + sr.octet_count(), + sr.packet_count(), + ); + + replies.push(RtcpRecvReply::NewRtpNtp(( + sr.ssrc(), + sr.rtp_timestamp(), + sr.ntp_timestamp(), + ))); + + for rb in sr.report_blocks() { + if let Some(reply) = self.handle_rb(sr.ssrc(), rb, from, now, ntp_time) { + replies.push(reply); + } + } + } + Ok(Packet::Sdes(sdes)) => { + for chunk in sdes.chunks() { + for item in chunk.items() { + if let Some(addr) = from { + if self.local_senders.get(&chunk.ssrc()).is_some() + || self.local_receivers.get(&chunk.ssrc()).is_some() + { + if self.handle_ssrc_conflict(addr, now) { + replies.push(RtcpRecvReply::SsrcCollision(chunk.ssrc())); + } + continue; + } + } + if !matches!( + item.type_(), + SdesItem::CNAME + | SdesItem::NAME + | SdesItem::EMAIL + | SdesItem::PHONE + | SdesItem::LOC + | SdesItem::TOOL + | SdesItem::NOTE + ) { + // FIXME: handle unknown sdes items + continue; + } + if let Some(source) = self.remote_senders.get_mut(&chunk.ssrc()) { + source.set_rtcp_from(from); + source.received_sdes(item.type_(), item.value()); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + } else { + let source = self + .remote_receivers + .entry(chunk.ssrc()) + .or_insert_with(|| { + replies.push(RtcpRecvReply::NewSsrc(chunk.ssrc())); + RemoteReceiveSource::new(chunk.ssrc()) + }); + source.set_rtcp_from(from); + source.received_sdes(item.type_(), item.value()); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + } + + if item.type_() == SdesItem::CNAME { + if let Ok(s) = std::str::from_utf8(item.value()) { + replies.push(RtcpRecvReply::NewCName(( + s.to_owned(), + chunk.ssrc(), + ))); + } + } + } + } + } + Ok(Packet::PayloadFeedback(pf)) => { + if let Ok(_pli) = pf.parse_fci::() { + self.handle_remote_request_key_unit( + now, + &mut replies, + false, + pf.sender_ssrc(), + std::iter::once(pf.media_ssrc()), + ); + } else if let Ok(fir) = pf.parse_fci::() { + self.handle_remote_request_key_unit( + now, + &mut replies, + true, + pf.sender_ssrc(), + // TODO: What to do with the sequence? + fir.entries().map(|entry| entry.ssrc()), + ); + } + } + Ok(Packet::TransportFeedback(_)) | Ok(Packet::Unknown(_)) => (), + // TODO: in RFC4585 profile, need to listen for feedback messages and remove any + // that we would have sent + Err(_) => (), + } + } + self.update_point_to_point(); + replies + } + + fn handle_remote_request_key_unit( + &mut self, + now: Instant, + replies: &mut Vec, + fir: bool, + sender_ssrc: u32, + media_ssrcs: impl Iterator, + ) { + if !self.remote_receivers.contains_key(&sender_ssrc) + && !self.remote_senders.contains_key(&sender_ssrc) + { + trace!("No remote source known for sender ssrc {sender_ssrc}"); + } + + let mut ssrcs = Vec::new(); + for media_ssrc in media_ssrcs { + let Some(sender) = self.local_senders.get(&media_ssrc) else { + trace!("Not a local sender for ssrc {media_ssrc}"); + continue; + }; + + let Some(rb) = sender + .received_report_blocks() + .find(|(ssrc, _rb)| *ssrc == sender_ssrc) + else { + trace!("No RB for sender ssrc {sender_ssrc} yet"); + continue; + }; + + if let Some(sender) = self.remote_senders.get_mut(&sender_ssrc) { + if !sender.remote_request_key_unit_allowed(now, rb.1) { + trace!("Requesting key-unit not allowed again yet"); + continue; + } + } else if let Some(sender) = self.remote_receivers.get_mut(&sender_ssrc) { + if !sender.remote_request_key_unit_allowed(now, rb.1) { + trace!("Requesting key-unit not allowed again yet"); + continue; + } + } + + trace!("Requesting key-unit from sender ssrc {sender_ssrc} for media ssrc {media_ssrc} (fir: {fir})"); + ssrcs.push(media_ssrc); + } + + if !ssrcs.is_empty() { + replies.push(RtcpRecvReply::RequestKeyUnit { ssrcs, fir }); + } + } + + fn generate_sr<'a>( + &mut self, + mut rtcp: CompoundBuilder<'a>, + now: Instant, + ntp_now: SystemTime, + minimum: bool, // RFC 4585 + ssrcs_reported: &mut Vec, + ) -> CompoundBuilder<'a> { + // Don't include in an early reduced-size RTCP packet + if minimum && self.reduced_size_rtcp_allowed() { + return rtcp; + } + + let ntp_time = system_time_to_ntp_time_u64(ntp_now); + if self + .local_senders + .values() + .any(|source| match source.state() { + SourceState::Normal => true, + SourceState::Probation(_) => false, + SourceState::Bye => source.bye_sent_time().is_none(), + }) + { + let mut sender_srs = vec![]; + for sender in self.local_senders.values() { + if sender.state() != SourceState::Normal { + continue; + } + if sender.state() == SourceState::Bye && sender.bye_sent_time().is_some() { + continue; + } + // pick one of the sender ssrc's if we are going to + if self.internal_rtcp_sender_src.is_none() { + self.internal_rtcp_sender_src = Some(sender.ssrc()); + } + // get last rtp sent timestamp + let rtp_timestamp = sender + .last_rtp_sent_timestamp() + .map(|(last_rtp_ts, instant)| { + let dur_since_last_rtp = now.duration_since(instant); + trace!("last_rtp_ts: {last_rtp_ts}, dur since last rtp: {dur_since_last_rtp:?}"); + // get the clock-rate for this source + last_rtp_ts + sender + .payload_type() + .and_then(|pt| self.clock_rate_from_pt(pt)) + .and_then(|clock_rate| { + // assume that the rtp times and clock times advance at a rate + // close to 1.0 and do a direct linear extrapolation to get the rtp + // time for 'now' + (dur_since_last_rtp.as_nanos() as u64).mul_div_round( + clock_rate as u64, + gst::ClockTime::SECOND.nseconds(), + ).map(|v| ((v & 0xffff_ffff) as u32)) + }) + .unwrap_or(0) + }) + .unwrap_or(0); + + let mut sr = SenderReport::builder(sender.ssrc()) + .packet_count((sender.packet_count() & 0xffff_ffff) as u32) + .octet_count((sender.octet_count() & 0xffff_ffff) as u32) + .ntp_timestamp(ntp_time.as_u64()) + .rtp_timestamp(rtp_timestamp); + + sender_srs.push((sender.ssrc(), ntp_now, ntp_time, rtp_timestamp)); + if !ssrcs_reported.iter().any(|&ssrc| ssrc == sender.ssrc()) { + ssrcs_reported.push(sender.ssrc()); + } + + // Don't include RBs in minimal RTCP packets + if !minimum { + for sender in self.remote_senders.values() { + if sender.state() != SourceState::Normal { + continue; + } + let rb = sender.generate_report_block(ntp_now); + sr = sr.add_report_block(rb.into()); + if !ssrcs_reported.iter().any(|&ssrc| ssrc == sender.ssrc()) { + ssrcs_reported.push(sender.ssrc()); + } + } + } + rtcp = rtcp.add_packet(sr); + + // A minimal RTCP packet only contains a single SR + if minimum { + break; + } + } + for (ssrc, ntp_now, ntp_time, rtp_timestamp) in sender_srs { + self.local_senders + .entry(ssrc) + .and_modify(|sender| sender.take_sr_snapshot(ntp_now, ntp_time, rtp_timestamp)); + } + } + rtcp + } + + fn reduced_size_rtcp_allowed(&self) -> bool { + self.reduced_size_rtcp && matches!(self.profile, RtpProfile::Avpf) + } + + fn have_ssrc(&self, ssrc: u32) -> bool { + self.local_senders.get(&ssrc).is_some() + || self.local_receivers.get(&ssrc).is_some() + || self.remote_senders.get(&ssrc).is_some() + || self.remote_receivers.get(&ssrc).is_some() + } + + pub fn internal_ssrc(&self) -> Option { + self.internal_rtcp_sender_src + } + + fn ensure_internal_send_src(&mut self) -> u32 { + match self.internal_rtcp_sender_src { + Some(ssrc) => ssrc, + None => loop { + let ssrc = generate_ssrc(); + if !self.have_ssrc(ssrc) { + let mut source = LocalReceiveSource::new(ssrc); + source.set_state(SourceState::Normal); + for (k, v) in self.sdes.iter() { + source.set_sdes_item(*k, v.as_bytes()); + } + self.local_receivers.insert(ssrc, source); + self.internal_rtcp_sender_src = Some(ssrc); + return ssrc; + } + }, + } + } + + fn generate_rr<'a>( + &mut self, + mut rtcp: CompoundBuilder<'a>, + now: Instant, + ntp_now: SystemTime, + minimum: bool, // RFC 4585 + ssrcs_reported: &mut Vec, + ) -> CompoundBuilder<'a> { + // Don't include in an early reduced-size RTCP packet + if minimum && self.reduced_size_rtcp_allowed() { + return rtcp; + } + + if self + .local_senders + .values() + .all(|source| match source.state() { + SourceState::Normal => false, + SourceState::Probation(_) => true, + SourceState::Bye => source.bye_sent_time().is_some(), + }) + { + let ssrc = self.ensure_internal_send_src(); + self.local_senders + .entry(ssrc) + .and_modify(|source| source.set_last_activity(now)); + self.local_receivers + .entry(ssrc) + .and_modify(|source| source.set_last_activity(now)); + let mut rr = ReceiverReport::builder(ssrc); + + // Don't include RBs in minimal RTCP packets + if !minimum { + for sender in self.remote_senders.values() { + if sender.state() != SourceState::Normal { + continue; + } + let rb = sender.generate_report_block(ntp_now); + rr = rr.add_report_block(rb.into()); + if !ssrcs_reported.iter().any(|&ssrc| ssrc == sender.ssrc()) { + ssrcs_reported.push(sender.ssrc()); + } + } + } + + rtcp = rtcp.add_packet(rr); + } + rtcp + } + + fn generate_sdes<'a>( + &self, + rtcp: CompoundBuilder<'a>, + minimum: bool, // RFC 4585 + ) -> CompoundBuilder<'a> { + // Don't include in an early reduced-size RTCP packet + if minimum && self.reduced_size_rtcp_allowed() { + return rtcp; + } + + let mut sdes = Sdes::builder(); + let mut have_chunk = false; + for sender in self.local_senders.values() { + let sdes_map = sender.sdes(); + if !sdes_map.is_empty() { + let mut chunk = SdesChunk::builder(sender.ssrc()); + for (ty, val) in sdes_map { + if !minimum || *ty == SdesItem::CNAME { + chunk = chunk.add_item_owned(SdesItem::builder(*ty, val)); + } + } + have_chunk = true; + sdes = sdes.add_chunk(chunk); + } + } + for receiver in self.local_receivers.values() { + let sdes_map = receiver.sdes(); + if !sdes_map.is_empty() { + let mut chunk = SdesChunk::builder(receiver.ssrc()); + for (ty, val) in sdes_map { + if !minimum || *ty == SdesItem::CNAME { + chunk = chunk.add_item_owned(SdesItem::builder(*ty, val)); + } + } + have_chunk = true; + sdes = sdes.add_chunk(chunk); + } + } + if have_chunk { + rtcp.add_packet(sdes) + } else { + rtcp + } + } + + fn find_bye_sources(&mut self) -> HashMap> { + let mut reason_ssrcs = HashMap::new(); + for source in self + .local_senders + .values_mut() + .filter(|source| source.state() == SourceState::Bye) + { + if source.bye_sent_time().is_none() { + let reason = source + .bye_reason() + .cloned() + .unwrap_or_else(|| String::from("Bye")); + let ssrcs = reason_ssrcs.entry(reason).or_insert_with(Vec::new); + ssrcs.push(source.ssrc()); + } + } + for source in self + .local_receivers + .values_mut() + .filter(|source| source.state() == SourceState::Bye) + { + if source.bye_sent_time().is_none() { + let reason = source + .bye_reason() + .cloned() + .unwrap_or_else(|| String::from("Bye")); + let ssrcs = reason_ssrcs.entry(reason).or_insert_with(Vec::new); + ssrcs.push(source.ssrc()); + } + } + reason_ssrcs + } + + fn generate_bye<'a>( + &mut self, + mut rtcp: CompoundBuilder<'a>, + now: Instant, + ) -> CompoundBuilder<'a> { + let bye_reason_ssrcs = self.find_bye_sources(); + if !bye_reason_ssrcs.is_empty() { + for (reason, ssrcs) in bye_reason_ssrcs.iter() { + let mut bye = Bye::builder().reason_owned(reason); + for ssrc in ssrcs.iter() { + self.pending_rtcp_send + .push_front(RtcpSendReply::SsrcBye(*ssrc)); + bye = bye.add_source(*ssrc); + if let Some(source) = self.local_senders.get_mut(ssrc) { + source.bye_sent_at(now); + } else if let Some(source) = self.local_receivers.get_mut(ssrc) { + source.bye_sent_at(now); + } + } + rtcp = rtcp.add_packet(bye); + } + } + rtcp + } + + fn generate_pli<'a>( + &mut self, + mut rtcp: CompoundBuilder<'a>, + _now: Instant, + ) -> CompoundBuilder<'a> { + let ssrc = self.ensure_internal_send_src(); + + for source in self.remote_senders.values_mut() { + let pli = source.generate_pli(); + if let Some(pli) = pli { + debug!("Generating PLI for sender {}: {:?}", source.ssrc(), pli); + rtcp = rtcp.add_packet( + rtcp_types::PayloadFeedback::builder_owned(pli) + .sender_ssrc(ssrc) + .media_ssrc(source.ssrc()), + ); + } + } + rtcp + } + + fn generate_fir<'a>( + &mut self, + mut rtcp: CompoundBuilder<'a>, + _now: Instant, + ) -> CompoundBuilder<'a> { + let mut have_fir = false; + let mut fir = rtcp_types::Fir::builder(); + + for source in self.remote_senders.values_mut() { + fir = source.generate_fir(fir, &mut have_fir); + } + + if have_fir { + let ssrc = self.ensure_internal_send_src(); + + debug!("Generating FIR: {:?}", fir); + rtcp = + rtcp.add_packet(rtcp_types::PayloadFeedback::builder_owned(fir).sender_ssrc(ssrc)); + } + + rtcp + } + + // RFC 3550 6.3.5 + // FIXME: we should surface this information to the element in order + // to perform clean up of the sync context + fn handle_timeouts(&mut self, now: Instant) { + trace!("handling rtcp timeouts"); + let td = RTCP_SOURCE_TIMEOUT_N_INTERVALS + * self + .deterministic_rtcp_duration(false) + .max(Duration::from_secs(5)); + + // delete all sources that are too old + self.local_receivers + .retain(|_ssrc, source| now - source.last_activity() < td); + self.remote_senders + .retain(|_ssrc, source| now - source.last_activity() < td); + self.remote_receivers + .retain(|_ssrc, source| now - source.last_activity() < td); + + // There is a SHOULD about performing RTCP reverse timer consideration here if any sources + // were timed out, however we are here before calculating the next rtcp timeout so are + // covered already with a changing number of members. + // If we call this outside of rtcp handling, then rtcp_reverse_consideration() would need to + // be called. + + // switch senders that haven't sent in a while to receivers + if self.last_rtcp_sent_times.len() >= 2 { + let two_rtcp_ago = *self.last_rtcp_sent_times.back().unwrap(); + let removed_senders = self + .local_senders + .iter() + .filter_map(|(&ssrc, source)| { + trace!( + "now: {now:?}, last activity: {:?} two_rtcp_ago: {:?}", + source.last_activity(), + two_rtcp_ago + ); + if source.last_activity() < two_rtcp_ago { + Some(ssrc) + } else { + None + } + }) + .inspect(|source| trace!("ssrc {source} has become a receiver")) + .collect::>(); + + for ssrc in removed_senders { + if let Some(source) = self.local_senders.remove(&ssrc) { + let new_source = source.into_receive(); + self.local_receivers.insert(new_source.ssrc(), new_source); + } + } + } + + // remove outdated conflicting addresses + self.conflicting_addresses + .retain(|_addr, time| now - *time < RTCP_ADDRESS_CONFLICT_TIMEOUT); + } + + /// Produce a RTCP packet (or `None` if it is too early to send a RTCP packet). After this call returns, + /// the next time to send a RTCP packet can be retrieved from `poll_rtcp_send_timeout` + // TODO: return RtcpPacketBuilder thing + pub fn poll_rtcp_send(&mut self, now: Instant, ntp_now: SystemTime) -> Option { + if let Some(event) = self.pending_rtcp_send.pop_back() { + return Some(event); + } + + let Some(next_rtcp_send) = self.next_rtcp_send.time else { + trace!("no next check time yet"); + return None; + }; + let is_early = self.next_early_rtcp_time.is_some() && !self.last_rtcp_sent_times.is_empty(); + + if is_early { + if let Some(next_early_rtcp_time) = self.next_early_rtcp_time { + if now < next_early_rtcp_time { + trace!("next early time {next_early_rtcp_time:?} not reached at {now:?}, nothing to produce"); + return None; + } + } + } else { + if now < next_rtcp_send { + trace!("next time {next_rtcp_send:?} not reached at {now:?}, nothing to produce"); + return None; + } + + // timer reconsideration + self.update_point_to_point(); + let interval = self.rtcp_interval(); + let test_next_rtcp_time = self.last_rtcp_handle_time.unwrap() + interval; + if test_next_rtcp_time > now { + trace!("timer reconsideration considers this wakeup {now:?} too early, nothing to produce. reconsidered time {test_next_rtcp_time:?}"); + self.next_rtcp_send.time = Some(test_next_rtcp_time); + return None; + } + } + + debug!( + "generating rtcp packet at {now:?}, ntp:{ntp_now:?}, duration since last rtcp {:?}", + self.last_rtcp_sent_times + .front() + .copied() + .unwrap_or(now) + .duration_since(now) + ); + + let (data, ssrcs_reported) = { + let mut rtcp = Compound::builder(); + let mut ssrcs_reported = vec![]; + + // TODO: implement round robin of sr/rrs + rtcp = self.generate_sr(rtcp, now, ntp_now, is_early, &mut ssrcs_reported); + rtcp = self.generate_rr(rtcp, now, ntp_now, is_early, &mut ssrcs_reported); + rtcp = self.generate_sdes(rtcp, is_early); + rtcp = self.generate_pli(rtcp, now); + rtcp = self.generate_fir(rtcp, now); + rtcp = self.generate_bye(rtcp, now); + + let size = rtcp.calculate_size().unwrap(); + // TODO: handle dropping data + assert!(size < RTCP_MTU); + let mut data = vec![0; size]; + rtcp.write_into(&mut data).unwrap(); + (data, ssrcs_reported) + }; + + for ssrc in ssrcs_reported { + if let Some(receiver) = self.remote_senders.get_mut(&ssrc) { + receiver.update_last_rtcp(); + } + } + + self.update_rtcp_average(data.len() + UDP_IP_OVERHEAD_BYTES); + + if !is_early { + self.handle_timeouts(now); + + self.update_point_to_point(); + let mut interval = self.rtcp_interval(); + self.rtcp_interval = Some(interval); + // Handle T_rr_interval + if self.profile.is_feedback() && !self.min_rtcp_interval.is_zero() { + let new_interval = interval.max(rtcp_dither(self.min_rtcp_interval)); + trace!("Updating interval from {interval:?} to {new_interval:?}"); + interval = new_interval; + } + self.next_rtcp_send = RtcpTimeMembers { + time: Some(now + interval), + p_members: self.n_members(), + }; + trace!("next rtcp time {:?}", self.next_rtcp_send.time); + self.last_rtcp_sent_times.push_front(now); + while self.last_rtcp_sent_times.len() > 2 { + self.last_rtcp_sent_times.pop_back(); + } + self.bye_state = None; + } + self.last_rtcp_handle_time = Some(now); + self.next_early_rtcp_time = None; + Some(RtcpSendReply::Data(data)) + } + + /// Returns the next time to send a RTCP packet. + pub fn poll_rtcp_send_timeout(&mut self, now: Instant) -> Option { + trace!( + "poll-rtcp-send-timeout early time {:?}, next {:?}", + self.next_early_rtcp_time, + self.next_rtcp_send.time + ); + if let Some(early_time) = self.next_early_rtcp_time { + return Some(early_time); + } + if self.next_rtcp_send.time.is_none() { + self.update_point_to_point(); + let interval = self.rtcp_interval(); + self.rtcp_interval = Some(interval); + self.last_rtcp_handle_time = Some(now); + self.next_rtcp_send = RtcpTimeMembers { + time: Some(now + interval), + p_members: self.n_members(), + }; + trace!( + "poll-rtcp-send-timeout initial rtcp time {:?}", + self.next_rtcp_send.time + ); + } + self.next_rtcp_send.time + } + + fn deterministic_rtcp_duration(&self, we_sent: bool) -> Duration { + let n_senders = self.n_senders() as u64; + let n_members = self.n_members() as u64; + let session_bandwidth = self.session_bandwidth(); + // 5% of the session bandwidth, or the minimum of 400B/s + let rtcp_bw = (session_bandwidth / 20).max(RTCP_MIN_BANDWIDTH); + + let (n, rtcp_bw) = if n_senders * 4 <= n_members { + if we_sent { + (n_senders, rtcp_bw / 4) + } else { + (n_members - n_senders, rtcp_bw / 4 * 3) + } + } else { + (n_members, rtcp_bw) + }; + + let min_rtcp_interval = if we_sent { + Duration::ZERO + } else if self.profile.is_feedback() { + // RFC 4585 3.4d), 3.5.1 + // + // If not the first RTCP then Tmin is initialized to 0, otherwise to 1s. + // Also for a multi-party session it is initialized to 0. + if self.is_point_to_point + && !self.last_rtcp_sent_times.is_empty() + && self.bye_state.is_none() + { + Duration::ZERO + } else { + Duration::from_secs(1) + } + } else if !self.last_rtcp_sent_times.is_empty() && self.bye_state.is_none() { + self.min_rtcp_interval + } else { + self.min_rtcp_interval / 2 + }; + + // 1_000_000_000 / (e-1.5) + let compensation_ns = 820_829_366; + trace!( + "members {n}, RTCP bandwidth {rtcp_bw}, average RTCP size {}", + self.average_rtcp_size + ); + let t_nanos = (compensation_ns + .mul_div_round(self.average_rtcp_size as u64 * n, rtcp_bw.max(1) as u64)) + .unwrap() + .max(min_rtcp_interval.as_nanos() as u64); + trace!("deterministic rtcp interval {t_nanos}ns"); + Duration::from_nanos(t_nanos) + } + + fn session_bandwidth(&self) -> usize { + // TODO: allow to be externally provided + self.local_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .map(|source| source.bitrate()) + .sum::() + + self + .remote_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .map(|source| source.bitrate()) + .sum::() + } + + fn calculated_rtcp_duration(&self, we_sent: bool) -> Duration { + // From RFC 3550: only active data senders may use the reduced minimum + let dur = self.deterministic_rtcp_duration(we_sent); + rtcp_dither(dur) + } + + // returns whether to keep the feedback + fn request_early_rtcp(&mut self, now: Instant, max_delay: Duration) -> RequestEarlyRtcpResult { + // Implementation of RFC 4585 3.5.2 + if !self.profile.is_feedback() { + warn!("requested early RTCP without relevant feedback RTP profile, ignoring"); + return RequestEarlyRtcpResult::NotScheduled; + } + + if let Some(early_time) = self.next_early_rtcp_time { + if now + max_delay >= early_time { + debug!("early RTCP already scheduled for {early_time:?}"); + return RequestEarlyRtcpResult::Scheduled; + } else { + debug!("early RTCP already scheduled for {early_time:?} but too late"); + return RequestEarlyRtcpResult::NotScheduled; + } + } + let Some(next_rtcp_send_time) = self.next_rtcp_send.time else { + debug!("No regular RTCP scheduled yet, scheduling now"); + return RequestEarlyRtcpResult::TimerReconsideration; + }; + + // no regular RTCP sent yet, we cannot send early without the first regular RTCP being sent + let Some(&last_rtcp_sent) = self.last_rtcp_sent_times.front() else { + if now + max_delay >= next_rtcp_send_time { + debug!("early RTCP can't be scheduled until first regular RTCP is sent but regular RTCP scheduled early enough"); + return RequestEarlyRtcpResult::Scheduled; + } else { + debug!("early RTCP can't be scheduled until first regular RTCP is sent and regular RTCP scheduled too late"); + return RequestEarlyRtcpResult::NotScheduled; + } + }; + + self.update_point_to_point(); + + // safe as all previous invariants ensure that the `handle_time` must have been set + let last_rtcp_handle_time = self.last_rtcp_handle_time.unwrap(); + + let t_rr = self.rtcp_interval.unwrap(); + let t_dither_max = if self.is_point_to_point { + Duration::from_secs(0) + } else { + t_rr / 2 + }; + + if now + t_dither_max > next_rtcp_send_time { + if now + max_delay >= next_rtcp_send_time { + debug!("early RTCP not scheduled because regular RTCP is early enough"); + return RequestEarlyRtcpResult::Scheduled; + } else { + debug!("early RTCP can't be scheduled because regular RTCP is scheduled soon but too late"); + return RequestEarlyRtcpResult::NotScheduled; + } + } + + let (allow_early, offset) = if last_rtcp_sent == last_rtcp_handle_time { + // last rtcp was a regular rtcp, we can send early after that + (true, Duration::ZERO) + } else if last_rtcp_handle_time + t_rr <= now + max_delay { + // 1. last rtcp packet was not regular rtcp so must be early rtcp + // 2. More than t_rr has passed since so a regular rtcp packet has been suppressed thus + // allowing early send again + ( + true, + (last_rtcp_handle_time + t_rr).saturating_duration_since(now), + ) + } else { + debug!("Can't allow early RTCP again yet"); + (false, Duration::ZERO) + }; + + if !allow_early { + if next_rtcp_send_time - now < max_delay { + debug!("early RTCP not scheduled but regular RTCP scheduled time is soon enough"); + RequestEarlyRtcpResult::Scheduled + } else { + debug!("early RTCP not scheduled and regular RTCP scheduled time is too late"); + RequestEarlyRtcpResult::NotScheduled + } + } else { + let mut rng = rand::thread_rng(); + self.next_early_rtcp_time = Some(now + t_dither_max.mul_f64(rng.gen()) + offset); + debug!("early RTCP scheduled at {:?}", self.next_early_rtcp_time); + RequestEarlyRtcpResult::TimerReconsideration + } + } + + pub fn schedule_bye(&mut self, reason: &str, now: Instant) { + if self.bye_state.is_some() { + return; + } + + for source in self.local_senders.values_mut() { + source.mark_bye(reason); + } + for source in self.local_receivers.values_mut() { + source.mark_bye(reason); + } + + if self.n_members() <= 50 { + return; + } + + self.bye_state = Some(ByeState { + members: 1, + pmembers: 1, + }); + // tp is reset to tc + self.last_rtcp_sent_times = VecDeque::new(); + self.last_rtcp_sent_times.push_front(now); + // FIXME: use actual BYE packet size + self.average_rtcp_size = 100; + self.update_point_to_point(); + let mut interval = self.rtcp_interval(); + self.rtcp_interval = Some(interval); + // Handle T_rr_interval + if self.profile.is_feedback() && !self.min_rtcp_interval.is_zero() { + let new_interval = interval.max(rtcp_dither(self.min_rtcp_interval)); + trace!("Updating interval from {interval:?} to {new_interval:?}"); + interval = new_interval; + } + self.next_rtcp_send = RtcpTimeMembers { + time: Some(now + interval), + p_members: self.n_members(), + }; + trace!("next rtcp time {:?}", self.next_rtcp_send.time); + } + + fn rtcp_interval(&self) -> Duration { + let interval = self + .calculated_rtcp_duration(!self.local_senders.is_empty() && self.bye_state.is_none()); + trace!("Calculated RTCP interval {interval:?}"); + interval + } + + /// Retrieve a list of all ssrc's currently handled by this session + pub fn ssrcs(&self) -> impl Iterator + '_ { + self.local_senders + .keys() + .chain(self.remote_senders.keys()) + .chain(self.local_receivers.keys()) + .chain(self.remote_receivers.keys()) + .cloned() + } + + /// Retrieve a local send source by ssrc + pub fn local_send_source_by_ssrc(&self, ssrc: u32) -> Option<&LocalSendSource> { + self.local_senders.get(&ssrc) + } + + /// Retrieve a local receive source by ssrc + pub fn local_receive_source_by_ssrc(&self, ssrc: u32) -> Option<&LocalReceiveSource> { + self.local_receivers.get(&ssrc) + } + + /// Retrieve a remote send source by ssrc + pub fn remote_send_source_by_ssrc(&self, ssrc: u32) -> Option<&RemoteSendSource> { + self.remote_senders.get(&ssrc) + } + + /// Retrieve a remote receive source by ssrc + pub fn remote_receive_source_by_ssrc(&self, ssrc: u32) -> Option<&RemoteReceiveSource> { + self.remote_receivers.get(&ssrc) + } + + pub fn mut_local_send_source_by_ssrc(&mut self, ssrc: u32) -> Option<&mut LocalSendSource> { + self.local_senders.get_mut(&ssrc) + } + + #[cfg(test)] + fn mut_remote_sender_source_by_ssrc(&mut self, ssrc: u32) -> Option<&mut RemoteSendSource> { + self.remote_senders.get_mut(&ssrc) + } + + pub(crate) fn request_remote_key_unit( + &mut self, + now: Instant, + typ: KeyUnitRequestType, + ssrc: u32, + ) -> Vec { + let mut replies = Vec::new(); + + if !self.remote_senders.contains_key(&ssrc) { + trace!("No remote sender with ssrc {ssrc} known"); + return replies; + }; + + debug!("Requesting remote key-unit for ssrc {ssrc} of type {typ:?}"); + + // FIXME: Use hard-coded 5s interval here + let res = self.request_early_rtcp(now, Duration::from_secs(5)); + if res == RequestEarlyRtcpResult::TimerReconsideration { + replies.push(RequestRemoteKeyUnitReply::TimerReconsideration); + } + + if res != RequestEarlyRtcpResult::NotScheduled { + let source = self.remote_senders.get_mut(&ssrc).unwrap(); + source.request_remote_key_unit(now, typ); + } + + replies + } +} + +fn generate_cname() -> String { + let mut rng = rand::thread_rng(); + let user = rng.gen::(); + let host = rng.gen::(); + format!("user{user}@{host:#}") +} + +fn generate_ssrc() -> u32 { + let mut rng = rand::thread_rng(); + rng.gen::() +} + +fn rtcp_dither(dur: Duration) -> Duration { + let mut rng = rand::thread_rng(); + // need a factor in [0.5, 1.5] + let factor = rng.gen::(); + dur.mul_f64(factor + 0.5) +} + +#[cfg(test)] +pub(crate) mod tests { + use rtp_types::RtpPacketBuilder; + + use crate::rtpbin2::time::NtpTime; + + use super::*; + + pub(crate) fn init_logs() { + let _ = gst::init(); + use crate::rtpbin2::internal::GstRustLogger; + GstRustLogger::install(); + } + + const TEST_PT: u8 = 96; + const TEST_CLOCK_RATE: u32 = 90000; + + #[test] + fn receive_probation() { + init_logs(); + let mut session = Session::new(); + let from = "127.0.0.1:1000".parse().unwrap(); + let now = Instant::now(); + let mut held = vec![]; + for seq_no in 0..5 { + let mut rtp_data = [0; 128]; + let len = RtpPacketBuilder::new() + .payload_type(TEST_PT) + .ssrc(0x12345678) + .sequence_number(seq_no) + .write_into(&mut rtp_data) + .unwrap(); + let rtp_data = &rtp_data[..len]; + let packet = RtpPacket::parse(rtp_data).unwrap(); + let mut ret = session.handle_recv(&packet, Some(from), now); + match seq_no { + // probation + 0 => { + if let RecvReply::NewSsrc(ssrc, pt) = ret { + assert_eq!(ssrc, 0x12345678); + assert_eq!(pt, TEST_PT); + if let RecvReply::Hold(id) = session.handle_recv(&packet, Some(from), now) { + held.push(id); + } else { + unreachable!(); + } + } else { + unreachable!(); + } + } + 1 => { + while let RecvReply::Forward(id) = ret { + let pos = held.iter().position(|&held_id| held_id == id).unwrap(); + held.remove(pos); + ret = session.handle_recv(&packet, Some(from), now); + } + assert!(held.is_empty()); + assert_eq!(ret, RecvReply::Passthrough); + } + 2..=4 => { + assert_eq!(ret, RecvReply::Passthrough) + } + _ => unreachable!(), + } + } + } + + pub fn generate_rtp_packet(ssrc: u32, seq_no: u16, rtp_ts: u32, payload_len: usize) -> Vec { + init_logs(); + let mut rtp_data = [0; 1200]; + let payload = vec![1; payload_len]; + let len = RtpPacketBuilder::new() + .payload_type(TEST_PT) + .ssrc(ssrc) + .sequence_number(seq_no) + .timestamp(rtp_ts) + .payload(payload.as_slice()) + .write_into(rtp_data.as_mut_slice()) + .unwrap(); + rtp_data[..len].to_vec() + } + + fn increment_rtcp_times( + old_now: Instant, + new_now: Instant, + ntp_now: SystemTime, + ) -> (Instant, SystemTime) { + (new_now, ntp_now + new_now.duration_since(old_now)) + } + + fn next_rtcp_packet( + session: &mut Session, + mut now: Instant, + mut ntp_now: SystemTime, + ) -> (RtcpSendReply, Instant, SystemTime) { + let mut ret = None; + while ret.is_none() { + ret = session.poll_rtcp_send(now, ntp_now); + if let Some(rtcp) = ret { + return (rtcp, now, ntp_now); + } else { + (now, ntp_now) = increment_rtcp_times( + now, + session.poll_rtcp_send_timeout(now).unwrap(), + ntp_now, + ); + } + } + unreachable!(); + } + + #[test] + fn send_new_ssrc() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let rtp_data = generate_rtp_packet(0x12345678, 100, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(0x12345678, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + } + + fn session_recv_first_packet_disable_probation( + session: &mut Session, + packet: &RtpPacket<'_>, + now: Instant, + ) { + assert_eq!( + session.handle_recv(packet, None, now), + RecvReply::NewSsrc(packet.ssrc(), packet.payload_type()) + ); + let src = session + .mut_remote_sender_source_by_ssrc(packet.ssrc()) + .unwrap(); + src.set_probation_packets(0); + } + + #[test] + fn receive_disable_probation() { + init_logs(); + let mut session = Session::new(); + let now = Instant::now(); + let rtp_data = generate_rtp_packet(0x12345678, 100, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + } + + #[test] + fn receive_two_ssrc_rr() { + init_logs(); + let mut session = Session::new(); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrcs = [0x12345678, 0x87654321]; + + let rtp_data = generate_rtp_packet(ssrcs[0], 100, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let rtp_data = generate_rtp_packet(ssrcs[1], 200, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + assert_eq!( + session.handle_recv( + &RtpPacket::parse(&generate_rtp_packet(ssrcs[1], 207, 0, 4)).unwrap(), + None, + now + ), + RecvReply::Passthrough + ); + + let (rtcp_data, _now, _ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut n_rb_ssrcs = 0; + let mut found_sdes_cname = false; + let mut sdes_ssrc = None; + let mut rr_ssrc = None; + + for p in rtcp { + match p { + Ok(Packet::Rr(rr)) => { + rr_ssrc = Some(rr.ssrc()); + for rb in rr.report_blocks() { + if ssrcs.contains(&rb.ssrc()) { + n_rb_ssrcs += 1; + } + match rb.ssrc() { + 0x12345678 => { + assert_eq!(rb.extended_sequence_number() & 0xffff, 100); + assert_eq!(rb.cumulative_lost(), 0xFFFFFF); // -1 in 24-bit + assert_eq!(rb.fraction_lost(), 0); + } + 0x87654321 => { + assert_eq!(rb.extended_sequence_number() & 0xffff, 207); + assert_eq!(rb.cumulative_lost(), 6); + assert_eq!(rb.fraction_lost(), 182); + } + _ => unreachable!(), + } + } + } + Ok(Packet::Sdes(sdes)) => { + for chunk in sdes.chunks() { + sdes_ssrc = Some(chunk.ssrc()); + for item in chunk.items() { + if item.type_() == SdesItem::CNAME { + found_sdes_cname = true; + } else { + unreachable!(); + } + } + } + } + _ => unreachable!("{p:?}"), + } + } + assert_eq!(n_rb_ssrcs, ssrcs.len()); + assert!(found_sdes_cname); + assert_eq!(sdes_ssrc, rr_ssrc); + } + + #[test] + fn send_two_ssrc_sr() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrcs = [0x12345678, 0x87654321]; + + // generate packets at the 'same time' as rtcp so some calculated timestamps will match + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + // ensure that timer reconsideration will not suppress + let (now, ntp_now) = increment_rtcp_times(now, now + RTCP_MIN_REPORT_INTERVAL, ntp_now); + + let rtp_data = generate_rtp_packet(ssrcs[0], 100, 4, 1024); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrcs[0], 96) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let rtp_data = generate_rtp_packet(ssrcs[1], 200, 4, 1024); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrcs[1], TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut n_rb_ssrcs = 0; + for p in rtcp { + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 0); + if ssrcs.contains(&sr.ssrc()) { + n_rb_ssrcs += 1; + } + // we sent 1 packet on each ssrc, rtcp should reflect that + assert_eq!(sr.packet_count(), 1); + assert_eq!(sr.octet_count() as usize, 1024); + assert_eq!( + sr.ntp_timestamp(), + system_time_to_ntp_time_u64(ntp_now).as_u64() + ); + assert_eq!(sr.rtp_timestamp(), 4); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + assert_eq!(n_rb_ssrcs, ssrcs.len()); + } + + #[test] + fn receive_two_ssrc_sr() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrcs = [0x12345678, 0x87654321]; + + let rtp_data = generate_rtp_packet(ssrcs[0], 100, 4, 8); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let rtp_data = generate_rtp_packet(ssrcs[1], 200, 20, 12); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet( + SenderReport::builder(ssrcs[0]) + .ntp_timestamp(system_time_to_ntp_time_u64(ntp_now).as_u64()) + .packet_count(1) + .octet_count(8) + .rtp_timestamp(4), + ) + .add_packet( + SenderReport::builder(ssrcs[1]) + .ntp_timestamp(system_time_to_ntp_time_u64(ntp_now).as_u64()) + .packet_count(2) + .octet_count(24) + .rtp_timestamp(20), + ) + .write_into(&mut data) + .unwrap(); + let data = &data[..len]; + let rtcp = Compound::parse(data).unwrap(); + + assert_eq!( + session.handle_rtcp_recv(rtcp, len, None, now, ntp_now), + vec![ + RtcpRecvReply::NewRtpNtp(( + ssrcs[0], + 4, + system_time_to_ntp_time_u64(ntp_now).as_u64() + )), + RtcpRecvReply::NewRtpNtp(( + ssrcs[1], + 20, + system_time_to_ntp_time_u64(ntp_now).as_u64() + )) + ] + ); + + let (rtcp_data, _new_now, new_ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + + let rtcp = Compound::parse(&rtcp_data).unwrap(); + for p in rtcp { + match p { + Ok(Packet::Rr(rr)) => { + assert_eq!(rr.n_reports(), 2); + let mut rb_ssrcs = rr.report_blocks().map(|rb| rb.ssrc()).collect::>(); + rb_ssrcs.sort(); + assert_eq!(rb_ssrcs, &ssrcs); + for rb in rr.report_blocks() { + assert_eq!( + rb.last_sender_report_timestamp(), + system_time_to_ntp_time_u64(ntp_now).as_u32() + ); + assert_eq!( + rb.delay_since_last_sender_report_timestamp(), + NtpTime::from_duration(new_ntp_now.duration_since(ntp_now).unwrap()) + .as_u32() + ); + if rb.ssrc() == ssrcs[0] { + assert_eq!(rb.extended_sequence_number() & 0xffff, 100); + } else if rb.ssrc() == ssrcs[1] { + assert_eq!(rb.extended_sequence_number() & 0xffff, 200); + } else { + unreachable!() + } + } + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + } + + #[test] + fn send_receiver_two_ssrc_sr_rr() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + session.set_min_rtcp_interval(Duration::from_secs(1)); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrcs = [0x12345678, 0x87654321]; + + // get the next rtcp packet times and send at the same time + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + // ensure that timer reconsideration will not suppress + let (now, ntp_now) = increment_rtcp_times(now, now + RTCP_MIN_REPORT_INTERVAL, ntp_now); + + // send from two ssrcs + let rtp_data = generate_rtp_packet(ssrcs[0], 100, 0, 1024); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrcs[0], TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let rtp_data = generate_rtp_packet(ssrcs[1], 200, 0, 1024); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrcs[1], TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + trace!("rtcp data {rtcp_data:?}"); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut n_sr_ssrcs = 0; + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + // no reports as there are no receivers + assert_eq!(sr.n_reports(), 0); + if ssrcs.contains(&sr.ssrc()) { + n_sr_ssrcs += 1; + } + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + assert_eq!(n_sr_ssrcs, ssrcs.len()); + + let recv_ssrcs = [0x11223344, 0xFFEEDDCC]; + + let rtp_data = generate_rtp_packet(recv_ssrcs[0], 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let rtp_data = generate_rtp_packet(recv_ssrcs[1], 600, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let (rtcp_data, _now, _ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + + trace!("rtcp data {rtcp_data:?}"); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut n_sr_ssrcs = 0; + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 2); + if ssrcs.contains(&sr.ssrc()) { + n_sr_ssrcs += 1; + } + let mut rb_ssrcs = sr.report_blocks().map(|rb| rb.ssrc()).collect::>(); + rb_ssrcs.sort(); + assert_eq!(rb_ssrcs, recv_ssrcs); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + assert_eq!(n_sr_ssrcs, ssrcs.len()); + } + + #[test] + fn session_internal_sender_ssrc() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let recv_ssrc = 0x11223344; + + let rtp_data = generate_rtp_packet(recv_ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let (rtcp_data, _now, _ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + + let rtcp = Compound::parse(&rtcp_data).unwrap(); + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Rr(rr)) => { + // no reports as there are no receivers + assert_eq!(rr.n_reports(), 1); + let mut rb_ssrcs = rr.report_blocks().map(|rb| rb.ssrc()).collect::>(); + rb_ssrcs.sort(); + assert_eq!(rb_ssrcs, &[recv_ssrc]); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + } + + #[test] + fn sender_source_timeout() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x12345678; + + let rtp_data = generate_rtp_packet(ssrc, 200, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let (rtcp_data, now, ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + + let rtcp = Compound::parse(&rtcp_data).unwrap(); + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 0); + assert_eq!(sr.ssrc(), ssrc); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + + let mut seen_rr = false; + for _ in 0..=5 { + let (rtcp_data, _now, _ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + let rtcp = Compound::parse(&rtcp_data).unwrap(); + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 0); + assert_eq!(sr.ssrc(), ssrc); + } + Ok(Packet::Rr(rr)) => { + assert_eq!(rr.ssrc(), ssrc); + seen_rr |= true; + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + } + assert!(seen_rr); + } + + #[test] + fn ignore_recv_bye_for_local_sender() { + // test that receiving a BYE for our (local) senders is ignored + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let (rtcp_data, now, ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + let rtcp = Compound::parse(&rtcp_data).unwrap(); + + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 0); + assert_eq!(sr.ssrc(), ssrc); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet(Bye::builder().add_source(ssrc)) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + + assert_eq!( + session.handle_rtcp_recv(rtcp, len, None, now, ntp_now), + vec![] + ); + let source = session.mut_local_send_source_by_ssrc(ssrc).unwrap(); + assert_eq!(source.state(), SourceState::Normal); + } + + #[test] + fn ssrc_collision_on_send() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + let from = "127.0.0.1:8080".parse().unwrap(); + + // add remote ssrc + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet(Sdes::builder().add_chunk( + SdesChunk::builder(ssrc).add_item(SdesItem::builder(SdesItem::CNAME, "cname")), + )) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, Some(from), now, ntp_now), + vec![ + RtcpRecvReply::NewSsrc(ssrc), + RtcpRecvReply::NewCName(("cname".to_string(), ssrc)) + ] + ); + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::SsrcCollision(ssrc) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Drop); + + // add ssrc as if our packets are being looped. As we have already discovered the + // conflicting address, these looped packets should be dropped. + let new_ssrc = 0x44332211; + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet(Sdes::builder().add_chunk( + SdesChunk::builder(new_ssrc).add_item(SdesItem::builder(SdesItem::CNAME, "cname")), + )) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, Some(from), now, ntp_now), + vec![ + RtcpRecvReply::NewSsrc(new_ssrc), + RtcpRecvReply::NewCName(("cname".to_string(), new_ssrc)) + ] + ); + + let rtp_data = generate_rtp_packet(new_ssrc, 510, 10, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!(session.handle_send(&packet, now), SendReply::Drop); + } + + #[test] + fn ssrc_collision_on_recv() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + let from = "127.0.0.1:8080".parse().unwrap(); + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet(Sdes::builder().add_chunk( + SdesChunk::builder(ssrc).add_item(SdesItem::builder(SdesItem::CNAME, "cname")), + )) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, Some(from), now, ntp_now), + vec![RtcpRecvReply::SsrcCollision(ssrc)] + ); + } + + #[test] + fn ssrc_collision_third_party() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ssrc = 0x11223344; + let from1 = "127.0.0.1:8080".parse().unwrap(); + let from2 = "127.0.0.2:8080".parse().unwrap(); + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, Some(from1), now), + RecvReply::Passthrough + ); + + // packet from a different address should be dropped as a third party collision + assert_eq!( + session.handle_recv(&packet, Some(from2), now), + RecvReply::Ignore + ); + + // packet from a original address should still succeed + assert_eq!( + session.handle_recv(&packet, Some(from1), now), + RecvReply::Passthrough + ); + } + + #[test] + fn bye_remote_sender() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + // send initial rtcp + let (rtcp_data, now, ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(_rtcp_data) = rtcp_data else { + unreachable!(); + }; + + let rtcp = Compound::builder().add_packet(Bye::builder().add_source(ssrc)); + let mut data = vec![0; 128]; + let len = rtcp.write_into(&mut data).unwrap(); + let data = &data[..len]; + + let rtcp = Compound::parse(data).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, None, now, ntp_now), + vec![ + RtcpRecvReply::SsrcBye(ssrc), + RtcpRecvReply::TimerReconsideration + ] + ); + let source = session.mut_remote_sender_source_by_ssrc(ssrc).unwrap(); + assert_eq!(source.state(), SourceState::Bye); + } + + #[test] + fn bye_local_sender() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + // send initial rtcp + let (rtcp_data, now, ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(_rtcp_data) = rtcp_data else { + unreachable!(); + }; + + let source = session.mut_local_send_source_by_ssrc(ssrc).unwrap(); + source.mark_bye("Cya"); + assert_eq!(source.state(), SourceState::Bye); + + // data after bye should be dropped + assert_eq!(session.handle_send(&packet, now), SendReply::Drop); + + let (rtcp_data, now, ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut received_bye = false; + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Bye(bye)) => { + assert_eq!(bye.reason(), Some(b"Cya".as_ref())); + assert_eq!(bye.ssrcs().next(), Some(ssrc)); + // bye must not be followed by any other packets + received_bye = true; + } + Ok(Packet::Sdes(_sdes)) => { + assert!(!received_bye); + } + _ => unreachable!(), + } + } + assert!(received_bye); + let (rtcp_data, _now, _ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::SsrcBye(bye_ssrc) = rtcp_data else { + unreachable!(); + }; + assert_eq!(bye_ssrc, ssrc); + } + + #[test] + fn early_rtcp() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + session.set_profile(RtpProfile::Avpf); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + let send_ssrc = 0x55667788; + let send2_ssrc = 0x99aabbcc; + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let rtp_data = generate_rtp_packet(send_ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(send_ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let rtp_data = generate_rtp_packet(send2_ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(send2_ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + // complete first regular rtcp + let (rtcp_data, now, _ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(_rtcp_data) = rtcp_data else { + unreachable!(); + }; + + let source = session.mut_local_send_source_by_ssrc(send_ssrc).unwrap(); + source.set_sdes_item(SdesItem::NAME, b"name"); + + // request early rtcp. The resulting RTCP packet, will only have the CNAME sdes item and a + // single SR/RR. + session.request_early_rtcp(now, RTCP_MIN_REPORT_INTERVAL); + assert!(session.next_early_rtcp_time.is_some()); + + let (rtcp_data, _now, _ntp_now) = next_rtcp_packet(&mut session, now, ntp_now); + let RtcpSendReply::Data(rtcp_data) = rtcp_data else { + unreachable!(); + }; + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut n_sr_ssrc = 0; + let mut n_sdes = 0; + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + assert!([send_ssrc, send2_ssrc].contains(&sr.ssrc())); + n_sr_ssrc += 1; + } + Ok(Packet::Sdes(sdes)) => { + assert_eq!(n_sdes, 0); + for chunk in sdes.chunks() { + assert!([send_ssrc, send2_ssrc].contains(&chunk.ssrc())); + for item in chunk.items() { + assert_eq!(item.type_(), SdesItem::CNAME); + n_sdes += 1; + } + } + } + _ => unreachable!(), + } + } + assert_eq!(n_sdes, 2); + assert_eq!(n_sr_ssrc, 1); + } + + #[test] + fn point_to_point() { + let mut session = Session::new(); + assert!(session.is_point_to_point); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let recv_ssrc = 0x11223344; + let recv2_ssrc = 0x55667788; + let from = "127.0.0.1:8080".parse().unwrap(); + + // single remote cname is still point to point + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet( + Sdes::builder().add_chunk( + SdesChunk::builder(recv_ssrc) + .add_item(SdesItem::builder(SdesItem::CNAME, "cname1")), + ), + ) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, Some(from), now, ntp_now), + vec![ + RtcpRecvReply::NewSsrc(recv_ssrc), + RtcpRecvReply::NewCName(("cname1".to_string(), recv_ssrc)) + ] + ); + assert!(session.is_point_to_point); + + // multiple ssrc but same cname is still point to point + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet( + Sdes::builder().add_chunk( + SdesChunk::builder(recv_ssrc) + .add_item(SdesItem::builder(SdesItem::CNAME, "cname1")), + ), + ) + .add_packet( + Sdes::builder().add_chunk( + SdesChunk::builder(recv2_ssrc) + .add_item(SdesItem::builder(SdesItem::CNAME, "cname1")), + ), + ) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, Some(from), now, ntp_now), + vec![ + RtcpRecvReply::NewCName(("cname1".to_string(), recv_ssrc)), + RtcpRecvReply::NewSsrc(recv2_ssrc), + RtcpRecvReply::NewCName(("cname1".to_string(), recv2_ssrc)) + ] + ); + assert!(session.is_point_to_point); + + // multiple ssrc and different cname is NOT point to point + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet( + Sdes::builder().add_chunk( + SdesChunk::builder(recv_ssrc) + .add_item(SdesItem::builder(SdesItem::CNAME, "cname1")), + ), + ) + .add_packet( + Sdes::builder().add_chunk( + SdesChunk::builder(recv2_ssrc) + .add_item(SdesItem::builder(SdesItem::CNAME, "cname2")), + ), + ) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, Some(from), now, ntp_now), + vec![ + RtcpRecvReply::NewCName(("cname1".to_string(), recv_ssrc)), + RtcpRecvReply::NewCName(("cname2".to_string(), recv2_ssrc)) + ] + ); + assert!(!session.is_point_to_point); + } +} diff --git a/net/rtp/src/rtpbin2/source.rs b/net/rtp/src/rtpbin2/source.rs new file mode 100644 index 00000000..6482e5cf --- /dev/null +++ b/net/rtp/src/rtpbin2/source.rs @@ -0,0 +1,1334 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + collections::{HashMap, VecDeque}, + net::SocketAddr, + time::{Duration, Instant, SystemTime}, +}; + +use rtcp_types::{ReportBlock, ReportBlockBuilder}; + +use super::{ + session::KeyUnitRequestType, + time::{system_time_to_ntp_time_u64, NtpTime}, +}; + +use gst::prelude::MulDiv; + +pub const DEFAULT_PROBATION_N_PACKETS: usize = 2; +pub const DEFAULT_MAX_DROPOUT: u32 = 3000; +pub const DEFAULT_MAX_MISORDER: u32 = 100; + +const BITRATE_WINDOW: Duration = Duration::from_secs(3); + +#[derive(Debug, Clone, Copy)] +pub struct Rb { + ssrc: u32, + /// fraction out of 256 of packets lost since the last Rb + fraction_lost: u8, + /// signed 24-bit number of expected packets - received packets (including duplicates and late + /// packets) + cumulative_lost: u32, + extended_sequence_number: u32, + /// jitter in clock rate units + jitter: u32, + /// 16.16 fixed point ntp time + last_sr: u32, + /// 16.16 fixed point ntp duration + delay_since_last_sr: u32, +} + +impl Rb { + pub fn fraction_lost(&self) -> u8 { + self.fraction_lost + } + + pub fn cumulative_lost(&self) -> i32 { + if self.cumulative_lost & 0x800000 > 0 { + -((self.cumulative_lost & 0x7fffff) as i32) + } else { + self.cumulative_lost as i32 + } + } + + pub fn extended_sequence_number(&self) -> u32 { + self.extended_sequence_number + } + + pub fn jitter(&self) -> u32 { + self.jitter + } + + pub fn last_sr_ntp_time(&self) -> u32 { + self.last_sr + } + + pub fn delay_since_last_sr(&self) -> u32 { + self.delay_since_last_sr + } +} + +impl<'a> From> for Rb { + fn from(value: ReportBlock) -> Self { + Self { + ssrc: value.ssrc(), + fraction_lost: value.fraction_lost(), + cumulative_lost: value.cumulative_lost(), + extended_sequence_number: value.extended_sequence_number(), + jitter: value.interarrival_jitter(), + last_sr: value.last_sender_report_timestamp(), + delay_since_last_sr: value.delay_since_last_sender_report_timestamp(), + } + } +} + +impl From for ReportBlockBuilder { + fn from(value: Rb) -> Self { + ReportBlock::builder(value.ssrc) + .fraction_lost(value.fraction_lost) + .cumulative_lost(value.cumulative_lost) + .extended_sequence_number(value.extended_sequence_number) + .interarrival_jitter(value.jitter) + .last_sender_report_timestamp(value.last_sr) + .delay_since_last_sender_report_timestamp(value.delay_since_last_sr) + } +} + +#[derive(Debug)] +pub(crate) struct Source { + ssrc: u32, + state: SourceState, + sdes: HashMap, + last_activity: Instant, + payload_type: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SourceState { + Probation(usize), + Normal, + Bye, +} + +impl Source { + fn new(ssrc: u32) -> Self { + Self { + ssrc, + state: SourceState::Probation(DEFAULT_PROBATION_N_PACKETS), + sdes: HashMap::new(), + last_activity: Instant::now(), + payload_type: None, + } + } + + fn set_state(&mut self, state: SourceState) { + self.state = state; + } +} + +#[derive(Debug)] +pub struct ReceivedRb { + pub rb: Rb, + pub receive_time: Instant, + pub receive_ntp_time: NtpTime, +} + +impl ReceivedRb { + #[allow(unused)] + fn round_trip_time(&self) -> Duration { + let rb_send_ntp_time = self.rb.last_sr as u64 + self.rb.delay_since_last_sr as u64; + + // Can't calculate any round trip time + if rb_send_ntp_time == 0 { + return Duration::ZERO; + } + + let mut rb_recv_ntp_time = self.receive_ntp_time.as_u32() as u64; + + if rb_send_ntp_time > rb_recv_ntp_time { + // 16.16 bit fixed point NTP time wrapped around + if rb_send_ntp_time - rb_recv_ntp_time > 0x7fff_ffff { + rb_recv_ntp_time += u32::MAX as u64; + } + } + + let diff = rb_recv_ntp_time.saturating_sub(rb_send_ntp_time); + // Bogus RTT of more than 2*5 seconds, return 1s as a fallback + if (diff >> 16) > 5 { + return Duration::from_secs(1); + } + + let rtt = 2 * diff; + let rtt_ns = rtt * 1_000_000_000 / 65_536; + Duration::from_nanos(rtt_ns) + } +} + +#[derive(Debug)] +pub struct LocalSendSource { + source: Source, + ext_seqnum: u64, + last_rtp_sent: Option<(u32, Instant)>, + sent_bytes: u64, + sent_packets: u64, + bitrate: Bitrate, + bye_sent_time: Option, + bye_reason: Option, + last_sent_sr: Option, + last_received_rb: HashMap, +} + +impl LocalSendSource { + pub(crate) fn new(ssrc: u32) -> Self { + Self { + source: Source::new(ssrc), + ext_seqnum: 0, + last_rtp_sent: None, + sent_bytes: 0, + sent_packets: 0, + bitrate: Bitrate::new(BITRATE_WINDOW), + bye_sent_time: None, + bye_reason: None, + last_sent_sr: None, + last_received_rb: HashMap::new(), + } + } + + pub(crate) fn set_state(&mut self, state: SourceState) { + self.source.set_state(state); + } + + pub(crate) fn state(&self) -> SourceState { + self.source.state + } + + pub(crate) fn sent_packet( + &mut self, + bytes: usize, + time: Instant, + seqnum: u16, + rtp_time: u32, + payload_type: u8, + ) { + self.bitrate.add_entry(bytes, time); + + let mut ext_seqnum = seqnum as u64 + (self.ext_seqnum & !0xffff); + + if ext_seqnum < self.ext_seqnum { + let diff = self.ext_seqnum - ext_seqnum; + if diff > 0x7fff { + ext_seqnum += 1 << 16; + } + } else { + let diff = ext_seqnum - self.ext_seqnum; + if diff > 0x7fff { + ext_seqnum -= 1 << 16; + } + } + self.ext_seqnum = ext_seqnum; + + self.source.payload_type = Some(payload_type); + + self.sent_bytes = self.sent_bytes.wrapping_add(bytes as u64); + self.sent_packets += 1; + self.last_rtp_sent = Some((rtp_time, time)); + } + + /// Retrieve the last rtp timestamp (and time) that data was sent for this source + pub fn last_rtp_sent_timestamp(&self) -> Option<(u32, Instant)> { + self.last_rtp_sent + } + + /// Retrieve the last seen payload type for this source + pub fn payload_type(&self) -> Option { + self.source.payload_type + } + + pub(crate) fn bitrate(&self) -> usize { + self.bitrate.bitrate() + } + + pub(crate) fn packet_count(&self) -> u64 { + self.sent_packets + } + + pub(crate) fn octet_count(&self) -> u64 { + self.sent_bytes + } + + /// Retrieve the ssrc for this source + pub fn ssrc(&self) -> u32 { + self.source.ssrc + } + + /// Set an sdes item for this source + pub fn set_sdes_item(&mut self, type_: u8, value: &[u8]) { + if let Ok(s) = std::str::from_utf8(value) { + self.source.sdes.insert(type_, s.to_owned()); + } + } + + /// Retrieve the sdes for this source + pub fn sdes(&self) -> &HashMap { + &self.source.sdes + } + + /// Set the last time when activity was seen for this source + pub fn set_last_activity(&mut self, time: Instant) { + self.source.last_activity = time; + } + + /// The last time when activity was seen for this source + pub fn last_activity(&self) -> Instant { + self.source.last_activity + } + + pub(crate) fn take_sr_snapshot( + &mut self, + ntp_now: SystemTime, + ntp_time: NtpTime, + rtp_timestamp: u32, + ) { + self.last_sent_sr = Some(Sr { + local_time: ntp_now, + remote_time: ntp_time, + rtp_time: rtp_timestamp, + octet_count: (self.sent_bytes & 0xffff_ffff) as u32, + packet_count: (self.sent_packets & 0xffff_ffff) as u32, + }); + } + + pub fn last_sent_sr(&self) -> Option { + self.last_sent_sr + } + + pub(crate) fn bye_sent_at(&mut self, time: Instant) { + self.bye_sent_time = Some(time); + } + + pub(crate) fn bye_sent_time(&self) -> Option { + self.bye_sent_time + } + + pub(crate) fn mark_bye(&mut self, reason: &str) { + if self.source.state == SourceState::Bye { + return; + } + self.set_state(SourceState::Bye); + self.bye_reason = Some(reason.to_string()); + } + + pub(crate) fn bye_reason(&self) -> Option<&String> { + self.bye_reason.as_ref() + } + + pub(crate) fn into_receive(self) -> LocalReceiveSource { + LocalReceiveSource { + source: self.source, + bye_sent_time: self.bye_sent_time, + bye_reason: self.bye_reason, + } + } + + pub fn add_last_rb( + &mut self, + sender_ssrc: u32, + rb: ReportBlock<'_>, + now: Instant, + ntp_now: SystemTime, + ) { + let ntp_now = system_time_to_ntp_time_u64(ntp_now); + let owned_rb = rb.into(); + self.last_received_rb + .entry(sender_ssrc) + .and_modify(|entry| { + *entry = ReceivedRb { + rb: owned_rb, + receive_time: now, + receive_ntp_time: ntp_now, + } + }) + .or_insert_with(|| ReceivedRb { + rb: owned_rb, + receive_time: now, + receive_ntp_time: ntp_now, + }); + } + + pub fn received_report_blocks(&self) -> impl Iterator + '_ { + self.last_received_rb.iter().map(|(&k, v)| (k, v)) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum SourceRecvReply { + /// hold this buffer for later and give it the relevant id. The id will be used in a Drop, or + /// Forward return value + Hold(usize), + /// drop a buffer by id. Should continue calling with the same input until not Drop or Forward + Drop(usize), + /// forward a held buffer by id. Should continue calling with the same input until not Drop or Forward. + Forward(usize), + /// forward the input buffer + Passthrough, + /// Ignore this buffer and do not passthrough + Ignore, +} + +#[derive(Debug)] +struct HeldRecvBuffer { + id: usize, + time: Instant, + seqnum: u16, + bytes: u32, +} + +#[derive(Debug, Clone, Copy)] +pub struct Sr { + local_time: SystemTime, + remote_time: NtpTime, + rtp_time: u32, + octet_count: u32, + packet_count: u32, +} + +impl Sr { + pub fn ntp_timestamp(&self) -> NtpTime { + self.remote_time + } + + pub fn rtp_timestamp(&self) -> u32 { + self.rtp_time + } + + pub fn octet_count(&self) -> u32 { + self.octet_count + } + + pub fn packet_count(&self) -> u32 { + self.packet_count + } +} + +#[derive(Debug)] +pub struct RemoteSendSource { + source: Source, + probation_packets: usize, + last_received_sr: Option, + rtp_from: Option, + rtcp_from: Option, + initial_seqnum: Option, + ext_seqnum: Option, + recv_bytes: u64, + recv_packets: u64, + recv_packets_at_last_rtcp: u64, + ext_seqnum_at_last_rtcp: u64, + jitter: u32, + transit: Option, + // any held buffers. Used when source is on probation. + held_buffers: VecDeque, + bitrate: Bitrate, + last_sent_rb: Option, + last_received_rb: HashMap, + last_request_key_unit: HashMap, + + // If a NACK/PLI is pending with the next RTCP packet + send_pli: bool, + // If a FIR is pending with the next RTCP packet + send_fir: bool, + // Sequence number of the next FIR + send_fir_seqnum: u8, + // Count from the ForceKeyUnitEvent to de-duplicate FIR + send_fir_count: Option, +} + +// The first time we recev a packet for jitter calculations +static INITIAL_RECV_TIME: once_cell::sync::OnceCell = once_cell::sync::OnceCell::new(); + +impl RemoteSendSource { + pub fn new(ssrc: u32) -> Self { + Self { + source: Source::new(ssrc), + probation_packets: DEFAULT_PROBATION_N_PACKETS, + last_received_sr: None, + rtp_from: None, + rtcp_from: None, + initial_seqnum: None, + ext_seqnum: None, + recv_bytes: 0, + recv_packets: 0, + recv_packets_at_last_rtcp: 0, + ext_seqnum_at_last_rtcp: 0, + held_buffers: VecDeque::new(), + jitter: 0, + transit: None, + bitrate: Bitrate::new(BITRATE_WINDOW), + last_sent_rb: None, + last_received_rb: HashMap::new(), + last_request_key_unit: HashMap::new(), + send_pli: false, + send_fir: false, + send_fir_seqnum: 0, + send_fir_count: None, + } + } + + /// Retrieve the ssrc for this source + pub fn ssrc(&self) -> u32 { + self.source.ssrc + } + + pub(crate) fn set_state(&mut self, state: SourceState) { + self.source.set_state(state); + } + + pub(crate) fn state(&self) -> SourceState { + self.source.state + } + + pub(crate) fn set_rtp_from(&mut self, from: Option) { + self.rtp_from = from; + } + + pub(crate) fn rtp_from(&self) -> Option { + self.rtp_from + } + + pub(crate) fn set_rtcp_from(&mut self, from: Option) { + self.rtcp_from = from; + } + + pub(crate) fn rtcp_from(&self) -> Option { + self.rtcp_from + } + + pub(crate) fn set_last_received_sr( + &mut self, + ntp_time: SystemTime, + remote_time: NtpTime, + rtp_time: u32, + octet_count: u32, + packet_count: u32, + ) { + self.last_received_sr = Some(Sr { + local_time: ntp_time, + remote_time, + rtp_time, + octet_count, + packet_count, + }); + } + + /// Retrieve the last received Sr for this source + pub fn last_received_sr(&self) -> Option { + self.last_received_sr + } + + /// Get the last sent RTCP report block for this source + pub fn last_sent_rb(&self) -> Option { + self.last_sent_rb + } + + fn init_sequence(&mut self, seqnum: u16) { + self.last_received_sr = None; + self.recv_bytes = 0; + self.recv_packets = 0; + self.recv_packets_at_last_rtcp = 0; + self.initial_seqnum = self.ext_seqnum; + self.ext_seqnum_at_last_rtcp = match self.ext_seqnum { + Some(ext) => ext, + None => 0x10000 + seqnum as u64, + }; + self.bitrate.reset(); + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn recv_packet( + &mut self, + bytes: u32, + time: Instant, + seqnum: u16, + rtp_timestamp: u32, + payload_type: u8, + clock_rate: Option, + hold_buffer_id: usize, + ) -> SourceRecvReply { + let initial_time = *INITIAL_RECV_TIME.get_or_init(|| time); + + if matches!(self.state(), SourceState::Bye) { + return SourceRecvReply::Ignore; + } + + let (max_seq, mut ext_seqnum) = match self.ext_seqnum { + Some(ext) => ((ext & 0xffff) as u16, seqnum as u64 + (ext & !0xffff)), + None => (seqnum.wrapping_sub(1), 0x10000 + seqnum as u64), + }; + trace!( + "source {} max seq {max_seq}, ext_seqnum {ext_seqnum}", + self.ssrc() + ); + + let diff = if seqnum < max_seq { + let mut diff = max_seq - seqnum; + + if diff > 0x7fff { + ext_seqnum += 1 << 16; + diff = u16::MAX - diff; + } + -(diff as i32 - 1) + } else { + let mut diff = seqnum - max_seq; + if diff > 0x7fff { + ext_seqnum -= 1 << 16; + diff = u16::MAX - diff; + } + diff as i32 + }; + + trace!("source {} in state {:?} received seqnum {seqnum} with a difference of {diff} from the previous seqnum", self.ssrc(), self.state()); + + let ret = if let SourceState::Probation(n_probation) = self.state() { + // consecutive packets are good + if diff == 1 { + if (0..=1).contains(&n_probation) { + info!("source {} leaving probation", self.ssrc()); + self.init_sequence(seqnum); + self.set_state(SourceState::Normal); + SourceRecvReply::Passthrough + } else { + debug!( + "source {} holding seqnum {seqnum} on probation", + self.ssrc() + ); + self.held_buffers.push_front(HeldRecvBuffer { + id: hold_buffer_id, + seqnum, + bytes, + time, + }); + while self.held_buffers.len() > self.probation_packets { + if let Some(held) = self.held_buffers.pop_back() { + debug!( + "source {} dropping seqnum {seqnum} on probation", + self.ssrc() + ); + return SourceRecvReply::Drop(held.id); + } + } + self.set_state(SourceState::Probation(n_probation - 1)); + SourceRecvReply::Hold(hold_buffer_id) + } + } else if self.probation_packets > 0 { + debug!( + "source {} resetting probation counter to {} at seqnum {seqnum}", + self.ssrc(), + self.probation_packets + ); + self.set_state(SourceState::Probation(self.probation_packets - 1)); + if let Some(held) = self.held_buffers.pop_back() { + return SourceRecvReply::Drop(held.id); + } + self.held_buffers.push_front(HeldRecvBuffer { + id: hold_buffer_id, + seqnum, + bytes, + time, + }); + while self.held_buffers.len() > self.probation_packets { + if let Some(held) = self.held_buffers.pop_back() { + return SourceRecvReply::Drop(held.id); + } + } + SourceRecvReply::Hold(hold_buffer_id) + } else { + info!( + "source {} leaving probation (no probation configured)", + self.ssrc() + ); + self.init_sequence(seqnum); + self.set_state(SourceState::Normal); + SourceRecvReply::Passthrough + } + } else if diff >= 1 && diff < DEFAULT_MAX_DROPOUT as i32 { + SourceRecvReply::Passthrough + } else if diff < -(DEFAULT_MAX_MISORDER as i32) || diff >= DEFAULT_MAX_DROPOUT as i32 { + debug!("non-consecutive packet outside of configured limits, dropping"); + + // TODO: we will want to perform a few tasks here that the C jitterbuffer + // used to be taking care of: + // + // - We probably want to operate in the time domain rather than the sequence domain, + // that means implementing a utility similar to RTPPacketRateCtx + // - We should update our late / lost stats when a packet does get ignored + // - We should perform the equivalent of the big gap handling in the C jitterbuffer, + // possibly holding gap packets for a while before deciding that we indeed have an + // actual gap, then propagating a new "resync" receive reply before releasing the + // gap packets in order to let other components (eg jitterbuffer) reset themselves + // when needed. + + // FIXME: should be a harder error? + return SourceRecvReply::Ignore; + } else { + // duplicate or reordered packet + // downstream jitterbuffer will deal with this + SourceRecvReply::Passthrough + }; + + if matches!(ret, SourceRecvReply::Passthrough) { + if let Some(held) = self.held_buffers.pop_back() { + info!( + "source {} pushing stored seqnum {}", + self.ssrc(), + held.seqnum + ); + self.recv_packet_add_to_stats( + rtp_timestamp, + held.time, + initial_time, + payload_type, + clock_rate, + ext_seqnum, + held.bytes, + ); + return SourceRecvReply::Forward(held.id); + } + } + + trace!("setting ext seqnum to {ext_seqnum}"); + self.ext_seqnum = Some(ext_seqnum); + self.recv_packet_add_to_stats( + rtp_timestamp, + time, + initial_time, + payload_type, + clock_rate, + ext_seqnum, + bytes, + ); + + ret + } + + #[allow(clippy::too_many_arguments)] + fn recv_packet_add_to_stats( + &mut self, + rtp_timestamp: u32, + now: Instant, + initial_time: Instant, + payload_type: u8, + clock_rate: Option, + ext_seqnum: u64, + bytes: u32, + ) { + /* calculate jitter */ + if let Some(clock_rate) = clock_rate { + let rtparrival = + ((now.duration_since(initial_time).as_micros() & 0xffff_ffff_ffff_ffff) as u32) + .mul_div_round(clock_rate, 1_000_000) + .unwrap(); + let transit = rtparrival.wrapping_sub(rtp_timestamp); + let diff = if let Some(existing_transit) = self.transit { + existing_transit.abs_diff(transit) + } else { + 0 + }; + self.transit = Some(transit); + trace!("jitter {} diff {diff}", self.jitter); + self.jitter += diff.saturating_sub((self.jitter.saturating_add(8)) >> 4); + } + self.source.payload_type = Some(payload_type); + + if self.initial_seqnum.is_none() { + self.initial_seqnum = Some(ext_seqnum); + } + + self.bitrate.add_entry(bytes as usize, now); + self.recv_bytes = self.recv_bytes.wrapping_add(bytes as u64); + self.recv_packets += 1; + } + + pub(crate) fn received_sdes(&mut self, type_: u8, value: &[u8]) { + if let Ok(s) = std::str::from_utf8(value) { + self.source.sdes.insert(type_, s.to_owned()); + } + } + + /// Retrieve the SDES items currently received for this remote sender + pub fn sdes(&self) -> &HashMap { + &self.source.sdes + } + + pub(crate) fn set_last_activity(&mut self, time: Instant) { + self.source.last_activity = time; + } + + /// Retrieve the last time that activity was seen on this source + pub fn last_activity(&self) -> Instant { + self.source.last_activity + } + + pub(crate) fn bitrate(&self) -> usize { + self.bitrate.bitrate() + } + + pub fn payload_type(&self) -> Option { + self.source.payload_type + } + + fn extended_sequence_number(&self) -> u32 { + (self.ext_seqnum.unwrap_or(0) & 0xffff_ffff) as u32 + } + + pub(crate) fn generate_report_block(&self, ntp_time: SystemTime) -> Rb { + let (last_sr, delay_since_last_sr) = self + .last_received_sr + .as_ref() + .map(|t| { + ( + t.remote_time, + NtpTime::from_duration( + ntp_time + .duration_since(t.local_time) + .unwrap_or(Duration::from_secs(0)), + ), + ) + }) + .unwrap_or(( + NtpTime::from_duration(Duration::from_secs(0)), + NtpTime::from_duration(Duration::from_secs(0)), + )); + + let lost = self.packets_lost(); + + let expected_since_last_rtcp = self + .ext_seqnum + .unwrap_or(0) + .saturating_sub(self.ext_seqnum_at_last_rtcp); + let recv_packets_since_last_rtcp = self.recv_packets - self.recv_packets_at_last_rtcp; + let lost_packets_since_last_rtcp = + expected_since_last_rtcp as i64 - recv_packets_since_last_rtcp as i64; + let fraction_lost = if expected_since_last_rtcp == 0 || lost_packets_since_last_rtcp <= 0 { + 0 + } else { + (((lost_packets_since_last_rtcp as u64) << 8) / expected_since_last_rtcp) as u8 + }; + let cumulative_lost = if lost < 0 { + 0x800000 | (lost & 0x7fffff) as u32 + } else { + (lost & 0x7ffffff) as u32 + }; + + trace!( + "ssrc {} current packet counts ext_seqnum {:?} recv_packets {}", + self.source.ssrc, + self.ext_seqnum, + self.recv_packets + ); + trace!( + "ssrc {} previous rtcp values ext_seqnum {:?} recv_packets {}", + self.source.ssrc, + self.ext_seqnum_at_last_rtcp, + self.recv_packets_at_last_rtcp + ); + trace!("ssrc {} fraction expected {expected_since_last_rtcp} lost {lost_packets_since_last_rtcp} fraction lost {fraction_lost}", self.source.ssrc); + + Rb { + ssrc: self.source.ssrc, + fraction_lost, + cumulative_lost, + extended_sequence_number: self.extended_sequence_number(), + jitter: self.jitter >> 4, + last_sr: last_sr.as_u32(), + delay_since_last_sr: delay_since_last_sr.as_u32(), + } + } + + pub(crate) fn update_last_rtcp(&mut self) { + self.recv_packets_at_last_rtcp = self.recv_packets; + if let Some(ext) = self.ext_seqnum { + self.ext_seqnum_at_last_rtcp = ext; + } + } + + /// The amount of jitter (in clock-rate units) + pub fn jitter(&self) -> u32 { + self.jitter >> 4 + } + + /// The total number of packets lost over the lifetime of this source + pub fn packets_lost(&self) -> i64 { + let expected = self.ext_seqnum.unwrap_or(0) - self.initial_seqnum.unwrap_or(0) + 1; + expected as i64 - self.recv_packets as i64 + } + + #[cfg(test)] + /// Set the number of probation packets before validating this source + pub fn set_probation_packets(&mut self, n_packets: usize) { + info!("source {} setting probation to {n_packets}", self.ssrc()); + self.probation_packets = n_packets; + match self.state() { + SourceState::Bye | SourceState::Normal => (), + SourceState::Probation(existing) => { + if n_packets < existing { + self.set_state(SourceState::Probation(n_packets)); + } + } + } + } + + pub fn packet_count(&self) -> u64 { + self.recv_packets + } + + pub fn octet_count(&self) -> u64 { + self.recv_bytes + } + + pub fn add_last_rb( + &mut self, + sender_ssrc: u32, + rb: ReportBlock<'_>, + now: Instant, + ntp_now: SystemTime, + ) { + let ntp_now = system_time_to_ntp_time_u64(ntp_now); + let owned_rb = rb.into(); + self.last_received_rb + .entry(sender_ssrc) + .and_modify(|entry| { + *entry = ReceivedRb { + rb: owned_rb, + receive_time: now, + receive_ntp_time: ntp_now, + } + }) + .or_insert_with(|| ReceivedRb { + rb: owned_rb, + receive_time: now, + receive_ntp_time: ntp_now, + }); + } + + pub fn received_report_blocks(&self) -> impl Iterator + '_ { + self.last_received_rb.iter().map(|(&k, v)| (k, v)) + } + + pub(crate) fn into_receive(self) -> RemoteReceiveSource { + RemoteReceiveSource { + source: self.source, + rtcp_from: self.rtcp_from, + last_request_key_unit: self.last_request_key_unit, + } + } + + pub(crate) fn remote_request_key_unit_allowed( + &mut self, + now: Instant, + rb: &ReceivedRb, + ) -> bool { + let rtt = rb.round_trip_time(); + + // Allow up to one key-unit request per RTT and SSRC. + let mut allowed = false; + self.last_request_key_unit + .entry(rb.rb.ssrc) + .and_modify(|previous| { + allowed = now.duration_since(*previous) >= rtt; + *previous = now; + }) + .or_insert_with(|| now); + + allowed + } + + pub(crate) fn request_remote_key_unit(&mut self, _now: Instant, typ: KeyUnitRequestType) { + match typ { + KeyUnitRequestType::Fir(count) => { + if self + .send_fir_count + .map_or(true, |previous_count| previous_count != count) + { + self.send_fir_seqnum = self.send_fir_seqnum.wrapping_add(1); + } + self.send_fir = true; + self.send_fir_count = Some(count); + } + KeyUnitRequestType::Pli if !self.send_fir => { + self.send_pli = true; + } + _ => {} + } + } + + pub(crate) fn generate_pli(&mut self) -> Option { + if self.send_pli { + self.send_pli = false; + Some(rtcp_types::Pli::builder()) + } else { + None + } + } + + pub(crate) fn generate_fir( + &mut self, + fir: rtcp_types::FirBuilder, + added: &mut bool, + ) -> rtcp_types::FirBuilder { + if self.send_fir { + self.send_fir = false; + *added = true; + fir.add_ssrc(self.ssrc(), self.send_fir_seqnum) + } else { + fir + } + } +} + +#[derive(Debug)] +pub struct LocalReceiveSource { + source: Source, + bye_sent_time: Option, + bye_reason: Option, +} + +impl LocalReceiveSource { + pub(crate) fn new(ssrc: u32) -> Self { + Self { + source: Source::new(ssrc), + bye_sent_time: None, + bye_reason: None, + } + } + + pub fn ssrc(&self) -> u32 { + self.source.ssrc + } + + pub(crate) fn set_state(&mut self, state: SourceState) { + self.source.set_state(state); + } + + pub(crate) fn state(&self) -> SourceState { + self.source.state + } + + pub(crate) fn payload_type(&self) -> Option { + self.source.payload_type + } + + /// Set an sdes item for this source + pub fn set_sdes_item(&mut self, type_: u8, value: &[u8]) { + if let Ok(s) = std::str::from_utf8(value) { + self.source.sdes.insert(type_, s.to_owned()); + } + } + + /// Retrieve the sdes for this source + pub fn sdes(&self) -> &HashMap { + &self.source.sdes + } + + /// Set the last time when activity was seen for this source + pub fn set_last_activity(&mut self, time: Instant) { + self.source.last_activity = time; + } + + /// Retrieve the last time that activity was seen on this source + pub fn last_activity(&self) -> Instant { + self.source.last_activity + } + + pub(crate) fn bye_sent_at(&mut self, time: Instant) { + self.bye_sent_time = Some(time); + } + + pub(crate) fn bye_sent_time(&self) -> Option { + self.bye_sent_time + } + + pub(crate) fn mark_bye(&mut self, reason: &str) { + if self.source.state == SourceState::Bye { + return; + } + self.set_state(SourceState::Bye); + self.bye_reason = Some(reason.to_string()); + } + + pub(crate) fn bye_reason(&self) -> Option<&String> { + self.bye_reason.as_ref() + } +} + +#[derive(Debug)] +pub struct RemoteReceiveSource { + source: Source, + rtcp_from: Option, + last_request_key_unit: HashMap, +} + +impl RemoteReceiveSource { + pub(crate) fn new(ssrc: u32) -> Self { + Self { + source: Source::new(ssrc), + rtcp_from: None, + last_request_key_unit: HashMap::new(), + } + } + + pub fn ssrc(&self) -> u32 { + self.source.ssrc + } + + pub(crate) fn set_state(&mut self, state: SourceState) { + self.source.set_state(state); + } + + pub(crate) fn state(&self) -> SourceState { + self.source.state + } + + pub(crate) fn set_rtcp_from(&mut self, from: Option) { + self.rtcp_from = from; + } + + pub(crate) fn rtcp_from(&self) -> Option { + self.rtcp_from + } + + pub(crate) fn set_last_activity(&mut self, time: Instant) { + self.source.last_activity = time; + } + + pub(crate) fn received_sdes(&mut self, type_: u8, value: &[u8]) { + if let Ok(s) = std::str::from_utf8(value) { + self.source.sdes.insert(type_, s.to_owned()); + } + } + + /// Retrieve the SDES items currently received for this remote receiver + pub fn sdes(&self) -> &HashMap { + &self.source.sdes + } + + /// Retrieve the last time that activity was seen on this source + pub fn last_activity(&self) -> Instant { + self.source.last_activity + } + + pub(crate) fn into_send(self) -> RemoteSendSource { + RemoteSendSource { + source: self.source, + probation_packets: DEFAULT_PROBATION_N_PACKETS, + last_received_sr: None, + rtp_from: None, + rtcp_from: self.rtcp_from, + initial_seqnum: None, + ext_seqnum: None, + recv_bytes: 0, + recv_packets: 0, + recv_packets_at_last_rtcp: 0, + ext_seqnum_at_last_rtcp: 0, + held_buffers: VecDeque::new(), + jitter: 0, + transit: None, + bitrate: Bitrate::new(BITRATE_WINDOW), + last_sent_rb: None, + last_received_rb: HashMap::new(), + last_request_key_unit: self.last_request_key_unit, + send_pli: false, + send_fir: false, + send_fir_seqnum: 0, + send_fir_count: None, + } + } + + pub(crate) fn remote_request_key_unit_allowed( + &mut self, + now: Instant, + rb: &ReceivedRb, + ) -> bool { + let rtt = rb.round_trip_time(); + + // Allow up to one key-unit request per RTT. + let mut allowed = false; + self.last_request_key_unit + .entry(rb.rb.ssrc) + .and_modify(|previous| { + allowed = now.duration_since(*previous) >= rtt; + *previous = now; + }) + .or_insert_with(|| now); + + allowed + } +} + +#[derive(Debug)] +struct Bitrate { + max_time: Duration, + entries: VecDeque<(usize, Instant)>, +} + +impl Bitrate { + fn new(max_time: Duration) -> Self { + Self { + max_time, + entries: VecDeque::new(), + } + } + + fn add_entry(&mut self, bytes: usize, time: Instant) { + self.entries.push_back((bytes, time)); + while let Some((bytes, latest_time)) = self.entries.pop_front() { + if time.duration_since(latest_time) < self.max_time { + self.entries.push_front((bytes, latest_time)); + break; + } + } + } + + fn bitrate(&self) -> usize { + if let Some(front) = self.entries.front() { + let back = self.entries.back().unwrap(); + let dur_micros = (back.1 - front.1).as_micros(); + if dur_micros == 0 { + return front.0; + } + let bytes = self.entries.iter().map(|entry| entry.0).sum::(); + + (bytes as u64) + .mul_div_round(1_000_000, dur_micros as u64) + .unwrap_or(front.0 as u64) as usize + } else { + 0 + } + } + + fn reset(&mut self) { + self.entries.clear() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::rtpbin2::session::tests::init_logs; + + const TEST_PT: u8 = 96; + + #[test] + fn bitrate_single_value() { + init_logs(); + // the bitrate of a single entry is the entry itself + let mut bitrate = Bitrate::new(BITRATE_WINDOW); + bitrate.add_entry(100, Instant::now()); + assert_eq!(bitrate.bitrate(), 100); + } + + #[test] + fn bitrate_two_values_over_half_second() { + init_logs(); + let mut bitrate = Bitrate::new(Duration::from_secs(1)); + let now = Instant::now(); + bitrate.add_entry(100, now); + bitrate.add_entry(300, now + Duration::from_millis(500)); + assert_eq!(bitrate.bitrate(), (100 + 300) * 2); + } + + #[test] + fn receive_probation() { + init_logs(); + let mut source = RemoteSendSource::new(100); + let now = Instant::now(); + let mut hold_buffer_id = 0; + assert_eq!( + source.state(), + SourceState::Probation(DEFAULT_PROBATION_N_PACKETS) + ); + assert_eq!( + SourceRecvReply::Hold(0), + source.recv_packet(16, now, 500, 100, TEST_PT, None, hold_buffer_id) + ); + hold_buffer_id += 1; + assert_eq!( + SourceRecvReply::Forward(0), + source.recv_packet(16, now, 501, 100, TEST_PT, None, hold_buffer_id) + ); + assert_eq!(source.state(), SourceState::Normal); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(16, now, 501, 100, TEST_PT, None, hold_buffer_id) + ); + assert_eq!(source.state(), SourceState::Normal); + } + + #[test] + fn receive_probation_gap() { + init_logs(); + let mut source = RemoteSendSource::new(100); + let now = Instant::now(); + let mut hold_buffer_id = 0; + assert_eq!( + source.state(), + SourceState::Probation(DEFAULT_PROBATION_N_PACKETS) + ); + assert_eq!( + SourceRecvReply::Hold(0), + source.recv_packet(100, now, 500, 100, TEST_PT, None, hold_buffer_id) + ); + hold_buffer_id += 1; + // push a buffer with a sequence gap and reset the probation counter + assert_eq!( + SourceRecvReply::Drop(0), + source.recv_packet(101, now, 502, 100, TEST_PT, None, hold_buffer_id) + ); + assert_eq!( + SourceRecvReply::Hold(1), + source.recv_packet(100, now, 502, 100, TEST_PT, None, hold_buffer_id) + ); + hold_buffer_id += 1; + assert_eq!( + SourceRecvReply::Forward(1), + source.recv_packet(101, now, 503, 100, TEST_PT, None, hold_buffer_id) + ); + assert_eq!(source.state(), SourceState::Normal); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(101, now, 503, 100, TEST_PT, None, hold_buffer_id) + ); + } + + #[test] + fn receive_no_probation() { + init_logs(); + let mut source = RemoteSendSource::new(100); + let now = Instant::now(); + assert_eq!( + source.state(), + SourceState::Probation(DEFAULT_PROBATION_N_PACKETS) + ); + source.set_probation_packets(0); + assert_eq!(source.state(), SourceState::Probation(0)); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(100, now, 500, 100, TEST_PT, None, 0) + ); + assert_eq!(source.state(), SourceState::Normal); + } + + #[test] + fn receive_wraparound() { + init_logs(); + let mut source = RemoteSendSource::new(100); + source.set_probation_packets(0); + let now = Instant::now(); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(16, now, u16::MAX, u32::MAX, TEST_PT, None, 0) + ); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(16, now, 0, 0, TEST_PT, None, 0) + ); + } +} diff --git a/net/rtp/src/rtpbin2/sync.rs b/net/rtp/src/rtpbin2/sync.rs new file mode 100644 index 00000000..ff2511ac --- /dev/null +++ b/net/rtp/src/rtpbin2/sync.rs @@ -0,0 +1,826 @@ +use gst::glib; +use gst::prelude::MulDiv; +use std::collections::{HashMap, VecDeque}; +use std::sync::Arc; +use std::time::Duration; + +use crate::utils::ExtendedTimestamp; + +use super::time::NtpTime; + +#[derive(Default, Debug)] +struct Ssrc { + cname: Option>, + clock_rate: Option, + extended_timestamp: ExtendedTimestamp, + last_sr_ntp_timestamp: Option, + last_sr_rtp_ext: Option, + // Arrival, RTP timestamp (extended), PTS (potentially skew-corrected) + base_times: Option<(u64, u64, u64)>, + current_delay: Option, + observations: Observations, +} + +impl Ssrc { + fn new(clock_rate: Option) -> Self { + Self { + clock_rate, + ..Default::default() + } + } + + fn reset_times(&mut self) { + self.extended_timestamp = ExtendedTimestamp::default(); + self.last_sr_ntp_timestamp = None; + self.last_sr_rtp_ext = None; + self.base_times = None; + self.current_delay = None; + self.observations = Observations::default(); + } + + /* Returns whether the caller should reset timing associated + * values for this ssrc (eg largest delay) */ + fn set_clock_rate(&mut self, clock_rate: u32) -> bool { + if Some(clock_rate) == self.clock_rate { + // No changes + return false; + } + + self.clock_rate = Some(clock_rate); + self.reset_times(); + true + } + + fn add_sender_report(&mut self, rtp_timestamp: u32, ntp_timestamp: u64) { + self.last_sr_rtp_ext = Some(self.extended_timestamp.next(rtp_timestamp)); + self.last_sr_ntp_timestamp = Some(ntp_timestamp.into()); + // Reset so that the next call to calculate_pts recalculates the NTP / RTP delay + self.current_delay = None; + } +} + +#[derive(Debug)] +struct CnameLargestDelay { + largest_delay: i64, + all_sync: bool, +} + +/// Govern how to pick presentation timestamps for packets +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, glib::Enum)] +#[repr(u32)] +#[enum_type(name = "GstRtpBin2TimestampingMode")] +pub enum TimestampingMode { + /// Simply use arrival time as timestamp + #[allow(dead_code)] + #[enum_value(name = "Use arrival time as timestamp", nick = "arrival")] + Arrival, + /// Use RTP timestamps as is + #[allow(dead_code)] + #[enum_value(name = "Use RTP timestamps as is", nick = "rtp")] + Rtp, + /// Correct skew to synchronize sender and receiver clocks + #[default] + #[enum_value( + name = "Correct skew to synchronize sender and receiver clocks", + nick = "skew" + )] + Skew, +} + +#[derive(Debug)] +pub struct Context { + ssrcs: HashMap, + mode: TimestampingMode, + cnames_to_ssrcs: HashMap, Vec>, + cname_to_largest_delays: HashMap, CnameLargestDelay>, +} + +impl Context { + pub fn new(mode: TimestampingMode) -> Self { + Self { + ssrcs: HashMap::new(), + mode, + cnames_to_ssrcs: HashMap::new(), + cname_to_largest_delays: HashMap::new(), + } + } + + pub fn set_clock_rate(&mut self, ssrc_val: u32, clock_rate: u32) { + if let Some(ssrc) = self.ssrcs.get_mut(&ssrc_val) { + if ssrc.set_clock_rate(clock_rate) { + debug!("{ssrc_val:#08x} times reset after clock rate change"); + if let Some(ref cname) = ssrc.cname { + self.cname_to_largest_delays.remove(cname); + } + } + } else { + self.ssrcs.insert(ssrc_val, Ssrc::new(Some(clock_rate))); + } + } + + pub fn clock_rate(&self, ssrc_val: u32) -> Option { + self.ssrcs.get(&ssrc_val).and_then(|ssrc| ssrc.clock_rate) + } + + fn disassociate(&mut self, ssrc_val: u32, cname: &str) { + self.cname_to_largest_delays.remove(cname); + + if let Some(ssrcs) = self.cnames_to_ssrcs.get_mut(cname) { + ssrcs.retain(|&other| other != ssrc_val); + } + } + + // FIXME: call this on timeouts / BYE (maybe collisions too?) + #[allow(dead_code)] + pub fn remove_ssrc(&mut self, ssrc_val: u32) { + if let Some(ssrc) = self.ssrcs.remove(&ssrc_val) { + debug!("{ssrc_val:#08x} ssrc removed"); + if let Some(ref cname) = ssrc.cname { + self.disassociate(ssrc_val, cname) + } + } + } + + pub fn associate(&mut self, ssrc_val: u32, cname: &str) { + let ssrc = self + .ssrcs + .entry(ssrc_val) + .or_insert_with(|| Ssrc::new(None)); + + let cname = Arc::::from(cname); + if let Some(ref old_cname) = ssrc.cname { + if old_cname == &cname { + return; + } + + ssrc.cname = Some(cname.clone()); + self.disassociate(ssrc_val, cname.as_ref()); + } else { + ssrc.cname = Some(cname.clone()); + } + + let ssrcs = self.cnames_to_ssrcs.entry(cname.clone()).or_default(); + ssrcs.push(ssrc_val); + // Recalculate a new largest delay next time calculate_pts is called + self.cname_to_largest_delays.remove(cname.as_ref()); + } + + pub fn add_sender_report(&mut self, ssrc_val: u32, rtp_timestamp: u32, ntp_timestamp: u64) { + debug!("Adding new sender report for ssrc {ssrc_val:#08x}"); + + let ssrc = self + .ssrcs + .entry(ssrc_val) + .or_insert_with(|| Ssrc::new(None)); + + debug!( + "Latest NTP time: {:?}", + NtpTime::from(ntp_timestamp).as_duration().unwrap() + ); + + ssrc.add_sender_report(rtp_timestamp, ntp_timestamp) + } + + pub fn calculate_pts( + &mut self, + ssrc_val: u32, + timestamp: u32, + arrival_time: u64, + ) -> (u64, Option) { + let ssrc = self.ssrcs.get_mut(&ssrc_val).unwrap(); + let clock_rate = ssrc.clock_rate.unwrap() as u64; + + // Calculate an extended timestamp, calculations only work with extended timestamps + // from that point on + let rtp_ext_ns = ssrc + .extended_timestamp + .next(timestamp) + .mul_div_round(1_000_000_000, clock_rate) + .unwrap(); + + // Now potentially correct the skew by observing how RTP times and arrival times progress + let mut pts = match self.mode { + TimestampingMode::Skew => { + let (skew_corrected, discont) = ssrc.observations.process(rtp_ext_ns, arrival_time); + trace!( + "{ssrc_val:#08x} using skew corrected RTP ext: {}", + skew_corrected + ); + + if discont { + ssrc.reset_times(); + debug!("{ssrc_val:#08x} times reset after observations discontinuity"); + if let Some(ref cname) = ssrc.cname { + self.cname_to_largest_delays.remove(cname); + } + } + + skew_corrected + } + TimestampingMode::Rtp => { + trace!("{ssrc_val:#08x} using uncorrected RTP ext: {}", rtp_ext_ns); + + rtp_ext_ns + } + TimestampingMode::Arrival => { + trace!("{ssrc_val:#08x} using arrival time: {}", arrival_time); + + arrival_time + } + }; + + // Determine the first arrival time and the first RTP time for that ssrc + if ssrc.base_times.is_none() { + ssrc.base_times = Some((arrival_time, rtp_ext_ns, pts)); + } + + let (base_arrival_time, base_rtp_ext_ns, base_pts) = ssrc.base_times.unwrap(); + + // Base the PTS on the first arrival time + pts += base_arrival_time; + trace!("{ssrc_val:#08x} added up base arrival time: {}", pts); + // Now subtract the base PTS we calculated + pts = pts.saturating_sub(base_pts); + trace!("{ssrc_val:#08x} subtracted base PTS: {}", base_pts); + + trace!("{ssrc_val:#08x} PTS prior to potential SR offsetting: {pts}"); + + let mut ntp_time: Option = None; + + // TODO: add property for enabling / disabling offsetting based on + // NTP / RTP mapping, ie inter-stream synchronization + if let Some((last_sr_ntp, last_sr_rtp_ext)) = + ssrc.last_sr_ntp_timestamp.zip(ssrc.last_sr_rtp_ext) + { + let last_sr_rtp_ext_ns = last_sr_rtp_ext + .mul_div_round(1_000_000_000, clock_rate) + .unwrap(); + + // We have a new SR, we can now figure out an NTP time and calculate how it + // relates to arrival times + if ssrc.current_delay.is_none() { + if let Some(base_ntp_time) = if base_rtp_ext_ns > last_sr_rtp_ext_ns { + let rtp_range_ns = base_rtp_ext_ns - last_sr_rtp_ext_ns; + + (last_sr_ntp.as_duration().unwrap().as_nanos() as u64).checked_add(rtp_range_ns) + } else { + let rtp_range_ns = last_sr_rtp_ext_ns - base_rtp_ext_ns; + + (last_sr_ntp.as_duration().unwrap().as_nanos() as u64).checked_sub(rtp_range_ns) + } { + trace!( + "{ssrc_val:#08x} Base NTP time on first packet after new SR is {:?} ({:?})", + base_ntp_time, + Duration::from_nanos(base_ntp_time) + ); + + if base_ntp_time < base_arrival_time { + ssrc.current_delay = Some(base_arrival_time as i64 - base_ntp_time as i64); + } else { + ssrc.current_delay = + Some(-(base_ntp_time as i64 - base_arrival_time as i64)); + } + + trace!("{ssrc_val:#08x} Current delay is {:?}", ssrc.current_delay); + + if let Some(ref cname) = ssrc.cname { + // We should recalculate a new largest delay for this CNAME + self.cname_to_largest_delays.remove(cname); + } + } else { + warn!("{ssrc_val:#08x} Invalid NTP RTP time mapping, waiting for next SR"); + ssrc.last_sr_ntp_timestamp = None; + ssrc.last_sr_rtp_ext = None; + } + } + + ntp_time = if rtp_ext_ns > last_sr_rtp_ext_ns { + let rtp_range_ns = Duration::from_nanos(rtp_ext_ns - last_sr_rtp_ext_ns); + + last_sr_ntp + .as_duration() + .unwrap() + .checked_add(rtp_range_ns) + .map(NtpTime::from_duration) + } else { + let rtp_range_ns = Duration::from_nanos(last_sr_rtp_ext_ns - rtp_ext_ns); + + last_sr_ntp + .as_duration() + .unwrap() + .checked_sub(rtp_range_ns) + .map(NtpTime::from_duration) + }; + } + + // Finally, if we have a CNAME for this SSRC and we have managed to calculate + // a delay for all the other ssrcs for this CNAME, we can calculate by how much + // we need to delay this stream to sync it with the others, if at all. + if let Some(cname) = ssrc.cname.clone() { + let delay = ssrc.current_delay; + let cname_largest_delay = self + .cname_to_largest_delays + .entry(cname.clone()) + .or_insert_with(|| { + let mut cname_largest_delay = CnameLargestDelay { + largest_delay: std::i64::MIN, + all_sync: true, + }; + + trace!("{ssrc_val:#08x} searching for new largest delay"); + + let ssrc_vals = self.cnames_to_ssrcs.get(&cname).unwrap(); + + for ssrc_val in ssrc_vals { + let ssrc = self.ssrcs.get(ssrc_val).unwrap(); + + if let Some(delay) = ssrc.current_delay { + trace!("ssrc {ssrc_val:#08x} has delay {delay}",); + + if delay > cname_largest_delay.largest_delay { + cname_largest_delay.largest_delay = delay; + } + } else { + trace!("{ssrc_val:#08x} has no delay calculated yet"); + cname_largest_delay.all_sync = false; + } + } + + cname_largest_delay + }); + + trace!("{ssrc_val:#08x} Largest delay is {:?}", cname_largest_delay); + + if cname_largest_delay.all_sync { + let offset = (cname_largest_delay.largest_delay - delay.unwrap()) as u64; + + trace!("{ssrc_val:#08x} applying offset {}", offset); + + pts += offset; + } + } + + debug!("{ssrc_val:#08x} calculated PTS {pts}"); + + (pts, ntp_time) + } +} + +const WINDOW_LENGTH: u64 = 512; +const WINDOW_DURATION: u64 = 2_000_000_000; + +#[derive(Debug)] +struct Observations { + base_local_time: Option, + base_remote_time: Option, + highest_remote_time: Option, + deltas: VecDeque, + min_delta: i64, + skew: i64, + filling: bool, + window_size: usize, +} + +impl Default for Observations { + fn default() -> Self { + Self { + base_local_time: None, + base_remote_time: None, + highest_remote_time: None, + deltas: VecDeque::new(), + min_delta: 0, + skew: 0, + filling: true, + window_size: 0, + } + } +} + +impl Observations { + fn out_time(&self, base_local_time: u64, remote_diff: u64) -> (u64, bool) { + let out_time = base_local_time + remote_diff; + let out_time = if self.skew < 0 { + out_time.saturating_sub((-self.skew) as u64) + } else { + out_time + (self.skew as u64) + }; + + trace!("Skew {}, min delta {}", self.skew, self.min_delta); + trace!("Outputting {}", out_time); + + (out_time, false) + } + + // Based on the algorithm used in GStreamer's rtpjitterbuffer, which comes from + // Fober, Orlarey and Letz, 2005, "Real Time Clock Skew Estimation over Network Delays": + // http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.102.1546 + fn process(&mut self, remote_time: u64, local_time: u64) -> (u64, bool) { + trace!("Local time {}, remote time {}", local_time, remote_time,); + + let (base_remote_time, base_local_time) = + match (self.base_remote_time, self.base_local_time) { + (Some(remote), Some(local)) => (remote, local), + _ => { + debug!( + "Initializing base time: local {}, remote {}", + local_time, remote_time, + ); + self.base_remote_time = Some(remote_time); + self.base_local_time = Some(local_time); + self.highest_remote_time = Some(remote_time); + + return (local_time, false); + } + }; + + let highest_remote_time = self.highest_remote_time.unwrap(); + + let remote_diff = remote_time.saturating_sub(base_remote_time); + + /* Only update observations when remote times progress forward */ + if remote_time <= highest_remote_time { + return self.out_time(base_local_time, remote_diff); + } + + self.highest_remote_time = Some(remote_time); + + let local_diff = local_time.saturating_sub(base_local_time); + let delta = (local_diff as i64) - (remote_diff as i64); + + trace!( + "Local diff {}, remote diff {}, delta {}", + local_diff, + remote_diff, + delta, + ); + + if remote_diff > 0 && local_diff > 0 { + let slope = (local_diff as f64) / (remote_diff as f64); + if !(0.8..1.2).contains(&slope) { + warn!("Too small/big slope {}, resetting", slope); + + let discont = !self.deltas.is_empty(); + *self = Observations::default(); + + debug!( + "Initializing base time: local {}, remote {}", + local_time, remote_time, + ); + self.base_remote_time = Some(remote_time); + self.base_local_time = Some(local_time); + self.highest_remote_time = Some(remote_time); + + return (local_time, discont); + } + } + + if (delta > self.skew && delta - self.skew > 1_000_000_000) + || (delta < self.skew && self.skew - delta > 1_000_000_000) + { + warn!("Delta {} too far from skew {}, resetting", delta, self.skew); + + let discont = !self.deltas.is_empty(); + *self = Observations::default(); + + debug!( + "Initializing base time: local {}, remote {}", + local_time, remote_time, + ); + self.base_remote_time = Some(remote_time); + self.base_local_time = Some(local_time); + self.highest_remote_time = Some(remote_time); + + return (local_time, discont); + } + + if self.filling { + if self.deltas.is_empty() || delta < self.min_delta { + self.min_delta = delta; + } + self.deltas.push_back(delta); + + if remote_diff > WINDOW_DURATION || self.deltas.len() as u64 == WINDOW_LENGTH { + self.window_size = self.deltas.len(); + self.skew = self.min_delta; + self.filling = false; + } else { + let perc_time = remote_diff.mul_div_floor(100, WINDOW_DURATION).unwrap() as i64; + let perc_window = (self.deltas.len() as u64) + .mul_div_floor(100, WINDOW_LENGTH) + .unwrap() as i64; + let perc = std::cmp::max(perc_time, perc_window); + + self.skew = (perc * self.min_delta + ((10_000 - perc) * self.skew)) / 10_000; + } + } else { + let old = self.deltas.pop_front().unwrap(); + self.deltas.push_back(delta); + + if delta <= self.min_delta { + self.min_delta = delta; + } else if old == self.min_delta { + self.min_delta = self.deltas.iter().copied().min().unwrap(); + } + + self.skew = (self.min_delta + (124 * self.skew)) / 125; + } + + self.out_time(base_local_time, remote_diff) + } +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + use crate::rtpbin2::session::tests::init_logs; + use crate::rtpbin2::time::system_time_to_ntp_time_u64; + + #[test] + fn test_single_stream_no_sr() { + init_logs(); + + let mut ctx = Context::new(TimestampingMode::Rtp); + + let mut now = 0; + + ctx.set_clock_rate(0x12345678, 90000); + + assert_eq!(ctx.calculate_pts(0x12345678, 0, now), (0, None)); + now += 1_000_000_000; + assert_eq!( + ctx.calculate_pts(0x12345678, 90000, now), + (1_000_000_000, None) + ); + } + + #[test] + fn test_single_stream_with_sr() { + init_logs(); + + let mut ctx = Context::new(TimestampingMode::Rtp); + + let mut now = 0; + + ctx.set_clock_rate(0x12345678, 90000); + + ctx.add_sender_report( + 0x12345678, + 0, + system_time_to_ntp_time_u64(std::time::UNIX_EPOCH).as_u64(), + ); + + assert_eq!( + ctx.calculate_pts(0x12345678, 0, now), + (0, Some(system_time_to_ntp_time_u64(std::time::UNIX_EPOCH))) + ); + now += 1_000_000_000; + assert_eq!( + ctx.calculate_pts(0x12345678, 90000, now), + ( + 1_000_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(1000) + )) + ) + ); + } + + #[test] + fn test_two_streams_with_sr() { + init_logs(); + + let mut ctx = Context::new(TimestampingMode::Rtp); + + let mut now = 0; + + ctx.set_clock_rate(0x12345, 90000); + ctx.set_clock_rate(0x67890, 90000); + ctx.associate(0x12345, "foo@bar"); + ctx.associate(0x67890, "foo@bar"); + + ctx.add_sender_report( + 0x12345, + 0, + system_time_to_ntp_time_u64(std::time::UNIX_EPOCH).as_u64(), + ); + + ctx.add_sender_report( + 0x67890, + 0, + system_time_to_ntp_time_u64(std::time::UNIX_EPOCH + Duration::from_millis(500)) + .as_u64(), + ); + + // NTP time 0 + assert_eq!( + ctx.calculate_pts(0x12345, 0, now), + (0, Some(system_time_to_ntp_time_u64(std::time::UNIX_EPOCH))) + ); + now += 500_000_000; + + // NTP time 500, arrival time 500 + assert_eq!( + ctx.calculate_pts(0x12345, 45000, now), + ( + 500_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(500) + )) + ) + ); + // NTP time 500, arrival time 500 + assert_eq!( + ctx.calculate_pts(0x67890, 0, now), + ( + 500_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(500) + )) + ) + ); + now += 500_000_000; + // NTP time 1000, arrival time 1000 + assert_eq!( + ctx.calculate_pts(0x12345, 90000, now), + ( + 1_000_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(1000) + )) + ) + ); + now += 500_000_000; + // NTP time 1500, arrival time 1500 + assert_eq!( + ctx.calculate_pts(0x67890, 90000, now), + ( + 1_500_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(1500) + )) + ) + ); + } + + #[test] + fn test_two_streams_no_sr_and_offset_arrival_times() { + init_logs(); + + let mut ctx = Context::new(TimestampingMode::Rtp); + + let mut now = 0; + + ctx.set_clock_rate(0x12345, 90000); + ctx.set_clock_rate(0x67890, 90000); + ctx.associate(0x12345, "foo@bar"); + ctx.associate(0x67890, "foo@bar"); + + assert_eq!(ctx.calculate_pts(0x12345, 0, now), (0, None)); + + now += 500_000_000; + + assert_eq!(ctx.calculate_pts(0x67890, 0, now), (500_000_000, None)); + assert_eq!(ctx.calculate_pts(0x12345, 45000, now), (500_000_000, None)); + } + + #[test] + fn test_two_streams_with_same_sr_and_offset_arrival_times() { + init_logs(); + + let mut ctx = Context::new(TimestampingMode::Rtp); + + let mut now = 0; + + ctx.set_clock_rate(0x12345, 90000); + ctx.set_clock_rate(0x67890, 90000); + ctx.associate(0x12345, "foo@bar"); + ctx.associate(0x67890, "foo@bar"); + + ctx.add_sender_report( + 0x12345, + 0, + system_time_to_ntp_time_u64(std::time::UNIX_EPOCH).as_u64(), + ); + + ctx.add_sender_report( + 0x67890, + 0, + system_time_to_ntp_time_u64(std::time::UNIX_EPOCH).as_u64(), + ); + + assert_eq!( + ctx.calculate_pts(0x12345, 0, now), + (0, Some(system_time_to_ntp_time_u64(std::time::UNIX_EPOCH))) + ); + + now += 500_000_000; + + assert_eq!( + ctx.calculate_pts(0x67890, 0, now), + ( + 500_000_000, + Some(system_time_to_ntp_time_u64(std::time::UNIX_EPOCH)) + ) + ); + + assert_eq!( + ctx.calculate_pts(0x12345, 45000, now), + ( + 1_000_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(500) + )) + ) + ); + + now += 500_000_000; + + assert_eq!( + ctx.calculate_pts(0x67890, 45000, now), + ( + 1_000_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(500) + )) + ) + ); + + // Now remove the delayed source and observe that the offset is gone + // for the other source + + ctx.remove_ssrc(0x67890); + + assert_eq!( + ctx.calculate_pts(0x12345, 90000, now), + ( + 1_000_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(1000) + )) + ) + ); + } + + #[test] + fn test_two_streams_with_sr_different_cnames() { + init_logs(); + + let mut ctx = Context::new(TimestampingMode::Rtp); + + let mut now = 0; + + ctx.set_clock_rate(0x12345, 90000); + ctx.set_clock_rate(0x67890, 90000); + ctx.associate(0x12345, "foo@bar"); + ctx.associate(0x67890, "foo@baz"); + + ctx.add_sender_report( + 0x12345, + 0, + system_time_to_ntp_time_u64(std::time::UNIX_EPOCH).as_u64(), + ); + + ctx.add_sender_report( + 0x67890, + 0, + system_time_to_ntp_time_u64(std::time::UNIX_EPOCH).as_u64(), + ); + + assert_eq!( + ctx.calculate_pts(0x12345, 0, now), + (0, Some(system_time_to_ntp_time_u64(std::time::UNIX_EPOCH))) + ); + + now += 500_000_000; + + assert_eq!( + ctx.calculate_pts(0x67890, 0, now), + ( + 500_000_000, + Some(system_time_to_ntp_time_u64(std::time::UNIX_EPOCH)) + ) + ); + + assert_eq!( + ctx.calculate_pts(0x12345, 45000, now), + ( + 500_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(500) + )) + ) + ); + + now += 500_000_000; + + assert_eq!( + ctx.calculate_pts(0x67890, 45000, now), + ( + 1_000_000_000, + Some(system_time_to_ntp_time_u64( + std::time::UNIX_EPOCH + Duration::from_millis(500) + )) + ) + ); + } +} diff --git a/net/rtp/src/rtpbin2/time.rs b/net/rtp/src/rtpbin2/time.rs new file mode 100644 index 00000000..61b9afcf --- /dev/null +++ b/net/rtp/src/rtpbin2/time.rs @@ -0,0 +1,62 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + ops::{Add, Sub}, + time::{Duration, SystemTime}, +}; + +// time between the NTP time at 1900-01-01 and the unix EPOCH (1970-01-01) +const NTP_OFFSET: Duration = Duration::from_secs((365 * 70 + 17) * 24 * 60 * 60); + +// 2^32 +const F32: f64 = 4_294_967_296.0; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct NtpTime(u64); + +impl NtpTime { + pub fn from_duration(dur: Duration) -> Self { + Self((dur.as_secs_f64() * F32) as u64) + } + + pub fn as_duration(&self) -> Result { + Duration::try_from_secs_f64(self.0 as f64 / F32) + } + + pub fn as_u32(self) -> u32 { + ((self.0 >> 16) & 0xffffffff) as u32 + } + + pub fn as_u64(self) -> u64 { + self.0 + } +} + +impl Sub for NtpTime { + type Output = NtpTime; + fn sub(self, rhs: Self) -> Self::Output { + NtpTime(self.0 - rhs.0) + } +} + +impl Add for NtpTime { + type Output = NtpTime; + fn add(self, rhs: Self) -> Self::Output { + NtpTime(self.0 + rhs.0) + } +} + +pub fn system_time_to_ntp_time_u64(time: SystemTime) -> NtpTime { + let dur = time + .duration_since(SystemTime::UNIX_EPOCH) + .expect("time is before unix epoch?!") + + NTP_OFFSET; + + NtpTime::from_duration(dur) +} + +impl From for NtpTime { + fn from(value: u64) -> Self { + NtpTime(value) + } +} diff --git a/net/rtp/src/utils.rs b/net/rtp/src/utils.rs new file mode 100644 index 00000000..d73284dd --- /dev/null +++ b/net/rtp/src/utils.rs @@ -0,0 +1,639 @@ +/// Defines a comparable new type `$typ` on a `[std::num::Wrapping]::`. +/// +/// The new type will wrap-around on additions and substractions and it comparison +/// operators take the wrapping in consideration. +/// +/// The comparison algorithm uses [serial number arithmetic](serial-number-arithmetic). +/// The limit being that it can't tell whether 0x8000_0000 is greater or less than 0. +/// +/// # Examples +/// +/// ```rust +/// # use gstrsrtp::define_wrapping_comparable_u32; +/// +/// /// Error type to return when comparing 0x8000_0000 to 0. +/// struct RTPTimestampComparisonLimit; +/// +/// /// Define the new type comparable and wrapping `u32` `RTPTimestamp`: +/// define_wrapping_comparable_u32!(RTPTimestamp, RTPTimestampComparisonLimit); +/// +/// let ts0 = RTPTimestamp::ZERO; +/// assert!(ts0.is_zero()); +/// +/// let mut ts = ts0; +/// ts += 1; +/// assert_eq!(*ts, 1); +/// assert_eq!(RTPTimestamp::MAX + ts, ts0); +/// +/// let ts2: RTPTimestamp = 2.into(); +/// assert_eq!(*ts2, 2); +/// assert_eq!(ts - ts2, RTPTimestamp::MAX); +/// ``` +/// +/// [serial-number-arithmetic]: http://en.wikipedia.org/wiki/Serial_number_arithmetic +/// ``` +#[macro_export] +macro_rules! define_wrapping_comparable_u32 { + ($typ:ident) => { + #[derive(Clone, Copy, Debug, Default)] + pub struct $typ(std::num::Wrapping); + + impl $typ { + pub const ZERO: $typ = $typ(std::num::Wrapping(0)); + pub const MIN: $typ = $typ(std::num::Wrapping(u32::MIN)); + pub const MAX: $typ = $typ(std::num::Wrapping(u32::MAX)); + pub const NONE: Option<$typ> = None; + + #[inline] + pub const fn new(val: u32) -> Self { + Self(std::num::Wrapping((val))) + } + + #[inline] + pub fn from_ext(ext_val: u64) -> Self { + Self(std::num::Wrapping((ext_val & 0xffff_ffff) as u32)) + } + + #[inline] + pub fn is_zero(self) -> bool { + self.0 .0 == 0 + } + + #[inline] + pub fn distance(self, other: Self) -> Option { + self.distance_u32(other.0 .0) + } + + #[inline] + pub fn distance_u32(self, other: u32) -> Option { + // See http://en.wikipedia.org/wiki/Serial_number_arithmetic + + let this = i32::from_ne_bytes(self.0 .0.to_ne_bytes()); + let other = i32::from_ne_bytes(other.to_ne_bytes()); + + match this.wrapping_sub(other) { + -0x8000_0000 => { + // This is the limit of the algorithm: + // arguments are too far away to determine the result sign, + // i.e. which one is greater than the other + None + } + delta => Some(delta), + } + } + } + + impl From for $typ { + fn from(value: u32) -> Self { + Self(std::num::Wrapping(value)) + } + } + + impl From<$typ> for u32 { + fn from(value: $typ) -> Self { + value.0 .0 + } + } + + impl std::ops::Deref for $typ { + type Target = u32; + + fn deref(&self) -> &u32 { + &self.0 .0 + } + } + + impl std::ops::Add for $typ { + type Output = Self; + fn add(self, rhs: Self) -> Self { + Self(self.0.add(rhs.0)) + } + } + + impl std::ops::Add for $typ { + type Output = Self; + fn add(self, rhs: u32) -> Self { + Self(self.0.add(std::num::Wrapping(rhs))) + } + } + + impl std::ops::Add for $typ { + type Output = Self; + fn add(self, rhs: i32) -> Self { + // See http://en.wikipedia.org/wiki/Serial_number_arithmetic + + let this = i32::from_ne_bytes(self.0 .0.to_ne_bytes()); + let res = this.wrapping_add(rhs); + + let res = u32::from_ne_bytes(res.to_ne_bytes()); + Self(std::num::Wrapping(res)) + } + } + + impl std::ops::AddAssign for $typ { + fn add_assign(&mut self, rhs: Self) { + self.0.add_assign(rhs.0); + } + } + + impl std::ops::AddAssign for $typ { + fn add_assign(&mut self, rhs: u32) { + self.0.add_assign(std::num::Wrapping(rhs)); + } + } + + impl std::ops::AddAssign for $typ { + fn add_assign(&mut self, rhs: i32) { + *self = *self + rhs; + } + } + + impl std::ops::Sub for $typ { + type Output = Self; + fn sub(self, rhs: Self) -> Self { + self.sub(rhs.0 .0) + } + } + + impl std::ops::Sub for $typ { + type Output = Self; + fn sub(self, rhs: u32) -> Self { + Self(self.0.sub(std::num::Wrapping(rhs))) + } + } + + impl std::ops::SubAssign for $typ { + fn sub_assign(&mut self, rhs: Self) { + self.sub_assign(rhs.0 .0); + } + } + + impl std::ops::SubAssign for $typ { + fn sub_assign(&mut self, rhs: u32) { + self.0.sub_assign(std::num::Wrapping(rhs)); + } + } + + impl std::cmp::PartialEq for $typ { + fn eq(&self, other: &Self) -> bool { + self.0 .0 == other.0 .0 + } + } + + impl std::cmp::PartialEq for $typ { + fn eq(&self, other: &u32) -> bool { + self.0 .0 == *other + } + } + + impl std::cmp::Eq for $typ {} + + impl std::cmp::PartialOrd for $typ { + fn partial_cmp(&self, other: &Self) -> Option { + self.distance(*other).map(|d| d.cmp(&0)) + } + } + + impl gst::prelude::OptionOperations for $typ {} + }; + + ($typ:ident, $comp_err_type:ident) => { + define_wrapping_comparable_u32!($typ); + + impl $typ { + #[inline] + pub fn try_cmp(&self, other: $typ) -> Result { + self.partial_cmp(&other).ok_or($comp_err_type) + } + } + }; + + ($typ:ident, $err_enum:ty, $comp_err_variant:ident) => { + define_wrapping_comparable_u32!($typ); + + impl $typ { + #[inline] + pub fn try_cmp(&self, other: $typ) -> Result { + self.partial_cmp(&other) + .ok_or(<$err_enum>::$comp_err_variant) + } + } + }; +} + +#[macro_export] +macro_rules! define_wrapping_comparable_u32_with_display { + ($typ:ident, impl) => { + impl std::fmt::Display for $typ { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_fmt(format_args!("{}", self.0 .0)) + } + } + }; + + ($typ:ident) => { + define_wrapping_comparable_u32!($typ); + define_wrapping_comparable_u32_with_display!($typ, impl); + }; + + ($typ:ident, $comp_err_type:ty) => { + define_wrapping_comparable_u32!($typ, $comp_err_type); + define_wrapping_comparable_u32_with_display!($typ, impl); + }; + + ($typ:ident, $err_enum:ty, $comp_err_variant:ident,) => { + define_wrapping_comparable_u32!($typ, $err_enum, $comp_err_variant); + define_wrapping_comparable_u32_with_display!($typ, impl); + }; +} + +/// Stores information necessary to compute a series of extended timestamps +#[derive(Default, Debug)] +pub(crate) struct ExtendedTimestamp { + last_ext: Option, +} + +impl ExtendedTimestamp { + /// Produces the next extended timestamp from a new RTP timestamp + pub(crate) fn next(&mut self, rtp_timestamp: u32) -> u64 { + let ext = match self.last_ext { + None => (1u64 << 32) + rtp_timestamp as u64, + Some(last_ext) => { + // pick wraparound counter from previous timestamp and add to new timestamp + let mut ext = rtp_timestamp as u64 + (last_ext & !0xffffffff); + + // check for timestamp wraparound + if ext < last_ext { + let diff = last_ext - ext; + + if diff > std::i32::MAX as u64 { + // timestamp went backwards more than allowed, we wrap around and get + // updated extended timestamp. + ext += 1u64 << 32; + } + } else { + let diff = ext - last_ext; + + if diff > std::i32::MAX as u64 { + if ext < 1u64 << 32 { + // We can't ever get to such a case as our counter is opaque + unreachable!() + } else { + ext -= 1u64 << 32; + // We don't want the extended timestamp storage to go back, ever + return ext; + } + } + } + + ext + } + }; + + self.last_ext = Some(ext); + + ext + } +} + +/// Stores information necessary to compute a series of extended seqnums +#[derive(Default, Debug)] +pub(crate) struct ExtendedSeqnum { + last_ext: Option, +} + +impl ExtendedSeqnum { + /// Produces the next extended timestamp from a new RTP timestamp + pub(crate) fn next(&mut self, rtp_seqnum: u16) -> u64 { + let ext = match self.last_ext { + None => (1u64 << 16) + rtp_seqnum as u64, + Some(last_ext) => { + // pick wraparound counter from previous timestamp and add to new timestamp + let mut ext = rtp_seqnum as u64 + (last_ext & !0xffff); + + // check for timestamp wraparound + if ext < last_ext { + let diff = last_ext - ext; + + if diff > std::i16::MAX as u64 { + // timestamp went backwards more than allowed, we wrap around and get + // updated extended timestamp. + ext += 1u64 << 16; + } + } else { + let diff = ext - last_ext; + + if diff > std::i16::MAX as u64 { + if ext < 1u64 << 16 { + // We can't ever get to such a case as our counter is opaque + unreachable!() + } else { + ext -= 1u64 << 16; + // We don't want the extended timestamp storage to go back, ever + return ext; + } + } + } + + ext + } + }; + + self.last_ext = Some(ext); + + ext + } +} + +#[cfg(test)] +mod tests { + use super::*; + define_wrapping_comparable_u32!(MyWrapper); + + #[test] + fn wrapping_u32_basics() { + let zero = MyWrapper::ZERO; + let one = MyWrapper::from(1); + let two = MyWrapper::from(2); + + assert_eq!(u32::from(zero), 0); + assert!(zero.is_zero()); + assert_eq!(u32::from(one), 1); + assert_eq!(u32::from(two), 2); + + let max_plus_1_u64 = MyWrapper::from_ext((u32::MAX as u64) + 1); + assert_eq!(max_plus_1_u64, MyWrapper::ZERO); + } + + #[test] + fn add_wrapping_u32() { + let one = MyWrapper::from(1); + let two = MyWrapper::from(2); + + assert_eq!(MyWrapper::ZERO + one, one); + assert_eq!(MyWrapper::ZERO + 1u32, one); + assert_eq!(one + one, two); + assert_eq!(one + 1u32, two); + + assert_eq!(MyWrapper::MAX + MyWrapper::ZERO, MyWrapper::MAX); + assert_eq!(MyWrapper::MAX + one, MyWrapper::ZERO); + assert_eq!(MyWrapper::MAX + two, one); + + let mut var = MyWrapper::ZERO; + assert!(var.is_zero()); + var += 1; + assert_eq!(var, one); + var += one; + assert_eq!(var, two); + + let mut var = MyWrapper::MAX; + var += 1; + assert!(var.is_zero()); + var += one; + assert_eq!(var, one); + } + + #[test] + fn add_wrapping_u32_i32() { + let one = MyWrapper::from(1); + + assert_eq!(MyWrapper::ZERO + 1i32, one); + assert_eq!(MyWrapper::ZERO + -1i32, MyWrapper::MAX); + assert_eq!(MyWrapper::MAX + 1i32, MyWrapper::ZERO); + assert_eq!(MyWrapper::MAX + 2i32, one); + + assert_eq!( + MyWrapper::from(0x8000_0000) + -0i32, + MyWrapper::from(0x8000_0000) + ); + assert_eq!( + MyWrapper::from(0x8000_0000) + 1i32, + MyWrapper::from(0x8000_0001) + ); + assert_eq!( + MyWrapper::from(0x8000_0000) + -1i32, + MyWrapper::from(0x7fff_ffff) + ); + assert_eq!( + MyWrapper::from(0x7fff_ffff) + 1i32, + MyWrapper::from(0x8000_0000) + ); + assert_eq!(MyWrapper::ZERO + i32::MIN, MyWrapper::from(0x8000_0000)); + + let mut var = MyWrapper::ZERO; + var += 1i32; + assert_eq!(var, one); + + let mut var = MyWrapper::ZERO; + var += -1i32; + assert_eq!(var, MyWrapper::MAX); + + let mut var = MyWrapper::MAX; + var += 1; + assert_eq!(var, MyWrapper::ZERO); + } + + #[test] + fn sub_wrapping_u32() { + let one = MyWrapper::from(1); + + assert_eq!(MyWrapper::ZERO - MyWrapper::ZERO, MyWrapper::ZERO); + assert_eq!(MyWrapper::MAX - MyWrapper::MAX, MyWrapper::ZERO); + assert_eq!(MyWrapper::ZERO - one, MyWrapper::MAX); + assert_eq!(MyWrapper::ZERO - MyWrapper::MAX, one); + assert_eq!( + MyWrapper::ZERO - MyWrapper::from(0x8000_0000), + MyWrapper::from(0x8000_0000) + ); + assert_eq!( + MyWrapper::from(0x8000_0000) - MyWrapper::ZERO, + MyWrapper::from(0x8000_0000) + ); + + let mut var = MyWrapper::ZERO; + assert!(var.is_zero()); + var -= 1; + assert_eq!(var, MyWrapper::MAX); + + let mut var = MyWrapper::MAX; + var -= MyWrapper::MAX; + assert!(var.is_zero()); + } + + #[test] + fn compare_wrapping_u32() { + use std::cmp::Ordering::*; + + #[derive(Debug, PartialEq)] + pub struct ComparisonLimit; + define_wrapping_comparable_u32!(MyWrapper, ComparisonLimit); + + let cmp = |a: u32, b: u32| MyWrapper::from(a).partial_cmp(&MyWrapper::from(b)); + let try_cmp = |a: u32, b: u32| MyWrapper::from(a).try_cmp(MyWrapper::from(b)); + + assert_eq!(cmp(0, 1).unwrap(), Less); + assert_eq!(try_cmp(0, 1), Ok(Less)); + assert_eq!(cmp(1, 1).unwrap(), Equal); + assert_eq!(try_cmp(1, 1), Ok(Equal)); + assert_eq!(cmp(1, 0).unwrap(), Greater); + assert_eq!(try_cmp(1, 0), Ok(Greater)); + + assert_eq!(cmp(0x7fff_ffff, 0).unwrap(), Greater); + assert_eq!(try_cmp(0x7fff_ffff, 0), Ok(Greater)); + assert_eq!(cmp(0xffff_ffff, 0).unwrap(), Less); + assert_eq!(try_cmp(0xffff_ffff, 0), Ok(Less)); + + assert_eq!(cmp(0, 0x7fff_ffff).unwrap(), Less); + assert_eq!(try_cmp(0, 0x7fff_ffff), Ok(Less)); + assert_eq!(cmp(0, 0xffff_ffff).unwrap(), Greater); + assert_eq!(try_cmp(0, 0xffff_ffff), Ok(Greater)); + + // This is the limit of the algorithm: + assert!(cmp(0x8000_0000, 0).is_none()); + assert!(cmp(0, 0x8000_0000).is_none()); + assert_eq!(try_cmp(0x8000_0000, 0), Err(ComparisonLimit)); + assert_eq!(try_cmp(0, 0x8000_0000), Err(ComparisonLimit)); + } + + #[test] + fn extended_timestamp_basic() { + let mut ext_ts = ExtendedTimestamp::default(); + + // No wraparound when timestamps are increasing + assert_eq!(ext_ts.next(0), (1 << 32)); + assert_eq!(ext_ts.next(10), (1 << 32) + 10); + assert_eq!(ext_ts.next(10), (1 << 32) + 10); + assert_eq!( + ext_ts.next(1 + std::i32::MAX as u32), + (1 << 32) + 1 + std::i32::MAX as u64 + ); + + // Even big bumps under G_MAXINT32 don't result in wrap-around + ext_ts = ExtendedTimestamp::default(); + + assert_eq!(ext_ts.next(1087500), (1 << 32) + 1087500); + assert_eq!(ext_ts.next(24), (1 << 32) + 24); + } + + #[test] + fn extended_timestamp_wraparound() { + let mut ext_ts = ExtendedTimestamp::default(); + assert_eq!( + ext_ts.next(std::u32::MAX - 90000 + 1), + (1 << 32) + std::u32::MAX as u64 - 90000 + 1 + ); + assert_eq!(ext_ts.next(0), (1 << 32) + std::u32::MAX as u64 + 1); + assert_eq!( + ext_ts.next(90000), + (1 << 32) + std::u32::MAX as u64 + 1 + 90000 + ); + } + + #[test] + fn extended_timestamp_wraparound_disordered() { + let mut ext_ts = ExtendedTimestamp::default(); + + assert_eq!( + ext_ts.next(std::u32::MAX - 90000 + 1), + (1 << 32) + std::u32::MAX as u64 - 90000 + 1 + ); + assert_eq!(ext_ts.next(0), (1 << 32) + std::u32::MAX as u64 + 1); + + // Unwrapping around + assert_eq!( + ext_ts.next(std::u32::MAX - 90000 + 1), + (1 << 32) + std::u32::MAX as u64 - 90000 + 1 + ); + assert_eq!( + ext_ts.next(90000), + (1 << 32) + std::u32::MAX as u64 + 1 + 90000 + ); + } + + #[test] + fn extended_timestamp_wraparound_disordered_backwards() { + let mut ext_ts = ExtendedTimestamp::default(); + + assert_eq!(ext_ts.next(90000), (1 << 32) + 90000); + + // Wraps backwards + assert_eq!( + ext_ts.next(std::u32::MAX - 90000 + 1), + std::u32::MAX as u64 - 90000 + 1 + ); + + // Wraps again forwards + assert_eq!(ext_ts.next(90000), (1 << 32) + 90000); + } + + #[test] + fn extended_seqnum_basic() { + let mut ext_seq = ExtendedSeqnum::default(); + + // No wraparound when seqnums are increasing + assert_eq!(ext_seq.next(0), (1 << 16)); + assert_eq!(ext_seq.next(10), (1 << 16) + 10); + assert_eq!(ext_seq.next(10), (1 << 16) + 10); + assert_eq!( + ext_seq.next(1 + std::i16::MAX as u16), + (1 << 16) + 1 + std::i16::MAX as u64 + ); + + // Even big bumps under MAXINT16 don't result in wrap-around + ext_seq = ExtendedSeqnum::default(); + + assert_eq!(ext_seq.next(27500), (1 << 16) + 27500); + assert_eq!(ext_seq.next(24), (1 << 16) + 24); + } + + #[test] + fn extended_seqnum_wraparound() { + let mut ext_seq = ExtendedSeqnum::default(); + assert_eq!( + ext_seq.next(std::u16::MAX - 9000 + 1), + (1 << 16) + std::u16::MAX as u64 - 9000 + 1 + ); + assert_eq!(ext_seq.next(0), (1 << 16) + std::u16::MAX as u64 + 1); + assert_eq!( + ext_seq.next(9000), + (1 << 16) + std::u16::MAX as u64 + 1 + 9000 + ); + } + + #[test] + fn extended_seqnum_wraparound_disordered() { + let mut ext_seq = ExtendedSeqnum::default(); + + assert_eq!( + ext_seq.next(std::u16::MAX - 9000 + 1), + (1 << 16) + std::u16::MAX as u64 - 9000 + 1 + ); + assert_eq!(ext_seq.next(0), (1 << 16) + std::u16::MAX as u64 + 1); + + // Unwrapping around + assert_eq!( + ext_seq.next(std::u16::MAX - 9000 + 1), + (1 << 16) + std::u16::MAX as u64 - 9000 + 1 + ); + assert_eq!( + ext_seq.next(9000), + (1 << 16) + std::u16::MAX as u64 + 1 + 9000 + ); + } + + #[test] + fn extended_seqnum_wraparound_disordered_backwards() { + let mut ext_seq = ExtendedSeqnum::default(); + + assert_eq!(ext_seq.next(9000), (1 << 16) + 9000); + + // Wraps backwards + assert_eq!( + ext_seq.next(std::u16::MAX - 9000 + 1), + std::u16::MAX as u64 - 9000 + 1 + ); + + // Wraps again forwards + assert_eq!(ext_seq.next(9000), (1 << 16) + 9000); + } +} diff --git a/net/rtp/tests/rtpbin2.rs b/net/rtp/tests/rtpbin2.rs new file mode 100644 index 00000000..add575bf --- /dev/null +++ b/net/rtp/tests/rtpbin2.rs @@ -0,0 +1,262 @@ +// +// Copyright (C) 2023 Matthew Waters +// +// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0. +// If a copy of the MPL was not distributed with this file, You can obtain one at +// . +// +// SPDX-License-Identifier: MPL-2.0 + +use std::sync::{atomic::AtomicUsize, Arc, Mutex}; + +use gst::{prelude::*, Caps}; +use gst_check::Harness; +use rtp_types::*; + +static ELEMENT_COUNTER: AtomicUsize = AtomicUsize::new(0); + +fn next_element_counter() -> usize { + ELEMENT_COUNTER.fetch_add(1, std::sync::atomic::Ordering::SeqCst) +} + +fn init() { + use std::sync::Once; + static INIT: Once = Once::new(); + + INIT.call_once(|| { + gst::init().unwrap(); + gstrsrtp::plugin_register_static().expect("rtpbin2 test"); + }); +} + +const TEST_SSRC: u32 = 0x12345678; +const TEST_PT: u8 = 96; +const TEST_CLOCK_RATE: u32 = 48000; + +fn generate_rtp_buffer(seqno: u16, rtpts: u32, payload_len: usize) -> gst::Buffer { + let payload = vec![4; payload_len]; + let packet = RtpPacketBuilder::new() + .ssrc(TEST_SSRC) + .payload_type(TEST_PT) + .sequence_number(seqno) + .timestamp(rtpts) + .payload(payload.as_slice()); + let size = packet.calculate_size().unwrap(); + let mut data = vec![0; size]; + packet.write_into(&mut data).unwrap(); + gst::Buffer::from_mut_slice(data) +} + +#[test] +fn test_send() { + init(); + let id = next_element_counter(); + + let elem = gst::ElementFactory::make("rtpsend") + .property("rtp-id", id.to_string()) + .build() + .unwrap(); + let mut h = Harness::with_element(&elem, Some("rtp_sink_0"), Some("rtp_src_0")); + h.play(); + + let caps = Caps::builder("application/x-rtp") + .field("media", "audio") + .field("payload", TEST_PT as i32) + .field("clock-rate", TEST_CLOCK_RATE as i32) + .field("encoding-name", "custom-test") + .build(); + h.set_src_caps(caps); + + h.push(generate_rtp_buffer(500, 20, 9)).unwrap(); + h.push(generate_rtp_buffer(501, 30, 11)).unwrap(); + + let buffer = h.pull().unwrap(); + let mapped = buffer.map_readable().unwrap(); + let rtp = rtp_types::RtpPacket::parse(&mapped).unwrap(); + assert_eq!(rtp.sequence_number(), 500); + + let buffer = h.pull().unwrap(); + let mapped = buffer.map_readable().unwrap(); + let rtp = rtp_types::RtpPacket::parse(&mapped).unwrap(); + assert_eq!(rtp.sequence_number(), 501); + + let stats = h.element().unwrap().property::("stats"); + + let session_stats = stats.get::("0").unwrap(); + let source_stats = session_stats + .get::(TEST_SSRC.to_string()) + .unwrap(); + assert_eq!(source_stats.get::("ssrc").unwrap(), TEST_SSRC); + assert_eq!( + source_stats.get::("clock-rate").unwrap(), + TEST_CLOCK_RATE + ); + assert!(source_stats.get::("sender").unwrap()); + assert!(source_stats.get::("local").unwrap()); + assert_eq!(source_stats.get::("packets-sent").unwrap(), 2); + assert_eq!(source_stats.get::("octets-sent").unwrap(), 20); +} + +#[test] +fn test_receive() { + init(); + let id = next_element_counter(); + + let elem = gst::ElementFactory::make("rtprecv") + .property("rtp-id", id.to_string()) + .build() + .unwrap(); + let h = Arc::new(Mutex::new(Harness::with_element( + &elem, + Some("rtp_sink_0"), + None, + ))); + let weak_h = Arc::downgrade(&h); + let mut inner = h.lock().unwrap(); + inner + .element() + .unwrap() + .connect_pad_added(move |_elem, pad| { + weak_h + .upgrade() + .unwrap() + .lock() + .unwrap() + .add_element_src_pad(pad) + }); + inner.play(); + + let caps = Caps::builder("application/x-rtp") + .field("media", "audio") + .field("payload", TEST_PT as i32) + .field("clock-rate", TEST_CLOCK_RATE as i32) + .field("encoding-name", "custom-test") + .build(); + inner.set_src_caps(caps); + + // Cannot push with harness lock as the 'pad-added' handler needs to add the newly created pad to + // the harness and needs to also take the harness lock. Workaround by pushing from the + // internal harness pad directly. + let push_pad = inner + .element() + .unwrap() + .static_pad("rtp_sink_0") + .unwrap() + .peer() + .unwrap(); + drop(inner); + push_pad.push(generate_rtp_buffer(500, 20, 9)).unwrap(); + push_pad.push(generate_rtp_buffer(501, 30, 11)).unwrap(); + let mut inner = h.lock().unwrap(); + + let buffer = inner.pull().unwrap(); + let mapped = buffer.map_readable().unwrap(); + let rtp = rtp_types::RtpPacket::parse(&mapped).unwrap(); + assert_eq!(rtp.sequence_number(), 500); + + let buffer = inner.pull().unwrap(); + let mapped = buffer.map_readable().unwrap(); + let rtp = rtp_types::RtpPacket::parse(&mapped).unwrap(); + assert_eq!(rtp.sequence_number(), 501); + + let stats = inner.element().unwrap().property::("stats"); + + let session_stats = stats.get::("0").unwrap(); + let source_stats = session_stats + .get::(TEST_SSRC.to_string()) + .unwrap(); + let jitterbuffers_stats = session_stats + .get::("jitterbuffer-stats") + .unwrap(); + assert_eq!(jitterbuffers_stats.len(), 1); + let jitterbuffer_stats = jitterbuffers_stats + .first() + .unwrap() + .get::() + .unwrap(); + assert_eq!(source_stats.get::("ssrc").unwrap(), TEST_SSRC); + assert_eq!( + source_stats.get::("clock-rate").unwrap(), + TEST_CLOCK_RATE + ); + assert!(source_stats.get::("sender").unwrap()); + assert!(!source_stats.get::("local").unwrap()); + assert_eq!(source_stats.get::("packets-received").unwrap(), 2); + assert_eq!(source_stats.get::("octets-received").unwrap(), 20); + assert_eq!(jitterbuffer_stats.get::("num-late").unwrap(), 0); + assert_eq!(jitterbuffer_stats.get::("num-lost").unwrap(), 0); + assert_eq!(jitterbuffer_stats.get::("num-duplicates").unwrap(), 0); + assert_eq!(jitterbuffer_stats.get::("num-pushed").unwrap(), 2); + assert_eq!(jitterbuffer_stats.get::("pt").unwrap(), TEST_PT as i32); + assert_eq!( + jitterbuffer_stats.get::("ssrc").unwrap(), + TEST_SSRC as i32 + ); +} + +#[test] +fn test_receive_flush() { + init(); + let id = next_element_counter(); + + let elem = gst::ElementFactory::make("rtprecv") + .property("rtp-id", id.to_string()) + .build() + .unwrap(); + let h = Arc::new(Mutex::new(Harness::with_element( + &elem, + Some("rtp_sink_0"), + None, + ))); + let weak_h = Arc::downgrade(&h); + let mut inner = h.lock().unwrap(); + inner + .element() + .unwrap() + .connect_pad_added(move |_elem, pad| { + weak_h + .upgrade() + .unwrap() + .lock() + .unwrap() + .add_element_src_pad(pad) + }); + inner.play(); + + let caps = Caps::builder("application/x-rtp") + .field("media", "audio") + .field("payload", TEST_PT as i32) + .field("clock-rate", TEST_CLOCK_RATE as i32) + .field("encoding-name", "custom-test") + .build(); + inner.set_src_caps(caps); + + // Cannot push with harness lock as the 'pad-added' handler needs to add the newly created pad to + // the harness and needs to also take the harness lock. Workaround by pushing from the + // internal harness pad directly. + let push_pad = inner + .element() + .unwrap() + .static_pad("rtp_sink_0") + .unwrap() + .peer() + .unwrap(); + drop(inner); + push_pad.push(generate_rtp_buffer(500, 20, 9)).unwrap(); + push_pad.push(generate_rtp_buffer(501, 30, 11)).unwrap(); + let mut inner = h.lock().unwrap(); + let seqnum = gst::Seqnum::next(); + inner.push_event(gst::event::FlushStart::builder().seqnum(seqnum).build()); + inner.push_event(gst::event::FlushStop::builder(false).seqnum(seqnum).build()); + + let event = inner.pull_event().unwrap(); + let gst::EventView::FlushStart(fs) = event.view() else { + unreachable!(); + }; + assert_eq!(fs.seqnum(), seqnum); + let event = inner.pull_event().unwrap(); + let gst::EventView::FlushStop(fs) = event.view() else { + unreachable!(); + }; + assert_eq!(fs.seqnum(), seqnum); +} diff --git a/net/rtsp/src/rtspsrc/imp.rs b/net/rtsp/src/rtspsrc/imp.rs index 35cb460e..81c6acd2 100644 --- a/net/rtsp/src/rtspsrc/imp.rs +++ b/net/rtsp/src/rtspsrc/imp.rs @@ -816,12 +816,11 @@ impl RtspSrc { ) .await? }; - let manager = RtspManager::new(std::env::var("USE_RTPBIN2").is_ok_and(|s| s == "1")); + let manager = RtspManager::new(std::env::var("USE_RTP2").is_ok_and(|s| s == "1")); let obj = self.obj(); - obj.add(&manager.inner) + manager.add_to(obj.upcast_ref::()) .expect("Adding the manager cannot fail"); - manager.inner.sync_state_with_parent().unwrap(); let mut tcp_interleave_appsrcs = HashMap::new(); for (rtpsession_n, p) in state.setup_params.iter_mut().enumerate() { @@ -983,7 +982,7 @@ impl RtspSrc { obj.no_more_pads(); // Expose RTP srcpads - manager.inner.connect_pad_added(|manager, pad| { + manager.recv.connect_pad_added(|manager, pad| { if pad.direction() != gst::PadDirection::Src { return; } @@ -995,9 +994,9 @@ impl RtspSrc { }; let name = pad.name(); match *name.split('_').collect::>() { - // rtpbin and rtpbin2 + // rtpbin and rtp2 ["recv", "rtp", "src", stream_id, ssrc, pt] - | ["rtp", "recv", "src", stream_id, ssrc, pt] => { + | ["rtp", "src", stream_id, ssrc, pt] => { if stream_id.parse::().is_err() { gst::info!(CAT, "Ignoring srcpad with invalid stream id: {name}"); return; @@ -1128,16 +1127,27 @@ impl RtspSrc { } struct RtspManager { - inner: gst::Element, - using_rtpbin2: bool, + recv: gst::Element, + send: gst::Element, + using_rtp2: bool, } impl RtspManager { - fn new(rtpbin2: bool) -> Self { - let name = if rtpbin2 { "rtpbin2" } else { "rtpbin" }; - let manager = gst::ElementFactory::make_with_name(name, None) - .unwrap_or_else(|_| panic!("{name} not found")); - if !rtpbin2 { + fn new(rtp2: bool) -> Self { + let (recv, send) = if rtp2 { + let recv = gst::ElementFactory::make_with_name("rtprecv") + .unwrap_or_else(|_| panic!("rtprecv not found")); + let send = gst::ElementFactory::make("rtpsend") + .property("rtp-id", recv.property::("rtp-id")) + .build() + .unwrap_or_else(|_| panic!("rtpsend not found")); + (recv, send) + } else { + let e = gst::ElementFactory::make_with_name("rtpbin", None) + .unwrap_or_else(|_| panic!("rtpbin not found")); + (e.clone(), e) + }; + if !rtp2 { let on_bye = |args: &[glib::Value]| { let m = args[0].get::().unwrap(); let Some(obj) = m.parent() else { @@ -1147,46 +1157,62 @@ impl RtspManager { bin.send_event(gst::event::Eos::new()); None }; - manager.connect("on-bye-ssrc", true, move |args| { + recv.connect("on-bye-ssrc", true, move |args| { gst::info!(CAT, "Received BYE packet"); on_bye(args) }); - manager.connect("on-bye-timeout", true, move |args| { + recv.connect("on-bye-timeout", true, move |args| { gst::info!(CAT, "BYE due to timeout"); on_bye(args) }); } RtspManager { - inner: manager, - using_rtpbin2: rtpbin2, + recv, + send, + using_rtp2: rtp2, } } fn rtp_recv_sinkpad(&self, rtpsession: usize) -> Option { - let name = if self.using_rtpbin2 { - format!("rtp_recv_sink_{}", rtpsession) + let name = if self.using_rtp2 { + format!("rtp_sink_{}", rtpsession) } else { format!("recv_rtp_sink_{}", rtpsession) }; - self.inner.request_pad_simple(&name) + gst::info!(CAT, "requesting {name} for receiving RTP"); + self.recv.request_pad_simple(&name) } fn rtcp_recv_sinkpad(&self, rtpsession: usize) -> Option { - let name = if self.using_rtpbin2 { - format!("rtcp_recv_sink_{}", rtpsession) + let name = if self.using_rtp2 { + format!("rtcp_sink_{}", rtpsession) } else { format!("recv_rtcp_sink_{}", rtpsession) }; - self.inner.request_pad_simple(&name) + gst::info!(CAT, "requesting {name} for receiving RTCP"); + self.recv.request_pad_simple(&name) } fn rtcp_send_srcpad(&self, rtpsession: usize) -> Option { - let name = if self.using_rtpbin2 { - format!("rtcp_send_src_{}", rtpsession) + let name = if self.using_rtp2 { + format!("rtcp_src_{}", rtpsession) } else { format!("send_rtcp_src_{}", rtpsession) }; - self.inner.request_pad_simple(&name) + gst::info!(CAT, "requesting {name} for sending RTCP"); + self.send.request_pad_simple(&name) + } + + fn add_to>(&self, bin: &T) -> Result<(), glib::BoolError> { + if self.using_rtp2 { + bin.add_many(&[&self.recv, &self.send])?; + self.recv.sync_state_with_parent()?; + self.send.sync_state_with_parent()?; + } else { + bin.add_many(&[&self.recv])?; + self.recv.sync_state_with_parent()?; + } + Ok(()) } }