diff --git a/Cargo.lock b/Cargo.lock index 9e2a2f0d..e85d93f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2754,21 +2754,28 @@ dependencies = [ "bitstream-io", "byte-slice-cast", "chrono", + "futures", + "gio", "gst-plugin-version-helper", "gstreamer", "gstreamer-app", "gstreamer-audio", + "gstreamer-base", "gstreamer-check", + "gstreamer-net", "gstreamer-rtp", "gstreamer-video", "hex", + "log", "once_cell", "rand", + "rtcp-types", "rtp-types", "slab", "smallvec", "thiserror", "time", + "tokio", ] [[package]] @@ -5821,6 +5828,14 @@ dependencies = [ "winapi", ] +[[package]] +name = "rtcp-types" +version = "0.0.1" +source = "git+https://github.com/ystreet/rtcp-types#f7fddfb87e9d7f4fed0b967fedc34995dd81ca86" +dependencies = [ + "thiserror", +] + [[package]] name = "rtp-types" version = "0.1.1" diff --git a/docs/plugins/gst_plugins_cache.json b/docs/plugins/gst_plugins_cache.json index 3bd996fc..a5e079ab 100644 --- a/docs/plugins/gst_plugins_cache.json +++ b/docs/plugins/gst_plugins_cache.json @@ -7191,6 +7191,92 @@ }, "rank": "marginal" }, + "rtpbin2": { + "author": "Matthew Waters ", + "description": "RTP sessions management", + "hierarchy": [ + "GstRtpBin2", + "GstElement", + "GstObject", + "GInitiallyUnowned", + "GObject" + ], + "klass": "Network/RTP/Filter", + "pad-templates": { + "rtcp_recv_sink_%%u": { + "caps": "application/x-rtcp:\n", + "direction": "sink", + "presence": "request" + }, + "rtcp_send_src_%%u": { + "caps": "application/x-rtcp:\n", + "direction": "src", + "presence": "request" + }, + "rtp_recv_sink_%%u": { + "caps": "application/x-rtp:\n", + "direction": "sink", + "presence": "request" + }, + "rtp_recv_src_%%u_%%u_%%u": { + "caps": "application/x-rtp:\n", + "direction": "src", + "presence": "sometimes" + }, + "rtp_send_sink_%%u": { + "caps": "application/x-rtp:\n", + "direction": "sink", + "presence": "request" + }, + "rtp_send_src_%%u": { + "caps": "application/x-rtp:\n", + "direction": "src", + "presence": "sometimes" + } + }, + "properties": { + "latency": { + "blurb": "Amount of ms to buffer", + "conditionally-available": false, + "construct": false, + "construct-only": false, + "controllable": false, + "default": "0", + "max": "-1", + "min": "0", + "mutable": "ready", + "readable": true, + "type": "guint", + "writable": true + }, + "min-rtcp-interval": { + "blurb": "Minimum time (in ms) between RTCP reports", + "conditionally-available": false, + "construct": false, + "construct-only": false, + "controllable": false, + "default": "5000", + "max": "-1", + "min": "0", + "mutable": "ready", + "readable": true, + "type": "guint", + "writable": true + }, + "stats": { + "blurb": "Statistics about the session", + "conditionally-available": false, + "construct": false, + "construct-only": false, + "controllable": false, + "mutable": "null", + "readable": true, + "type": "guint", + "writable": false + } + }, + "rank": "none" + }, "rtpgccbwe": { "author": "Thibault Saunier ", "description": "Estimates current network bandwidth using the Google Congestion Control algorithm notifying about it through the 'bitrate' property", diff --git a/net/rtp/Cargo.toml b/net/rtp/Cargo.toml index 86988f88..ae2a5c25 100644 --- a/net/rtp/Cargo.toml +++ b/net/rtp/Cargo.toml @@ -14,18 +14,26 @@ atomic_refcell = "0.1" bitstream-io = "2.1" byte-slice-cast = "1.2" chrono = { version = "0.4", default-features = false } -gst = { workspace = true, features = ["v1_20"] } +gst = { workspace = true, features = ["v1_20"] } gst-audio = { workspace = true, features = ["v1_20"] } -gst-rtp = { workspace = true, features = ["v1_20"] } +gst-base = { workspace = true, features = ["v1_20"] } +gst-net = { workspace = true, features = ["v1_20"] } +gst-rtp = { workspace = true, features = ["v1_20"] } gst-video = { workspace = true, features = ["v1_20"] } +futures = "0.3" +gio.workspace = true hex = "0.4.3" +log = "0.4" once_cell.workspace = true rand = { version = "0.8", default-features = false, features = ["std", "std_rng" ] } rtp-types = { version = "0.1" } +rtcp-types = { git = "https://github.com/ystreet/rtcp-types", version = "0.0" } slab = "0.4.9" smallvec = { version = "1.11", features = ["union", "write", "const_generics", "const_new"] } thiserror = "1" time = { version = "0.3", default-features = false, features = ["std"] } +# TODO: experiment with other async executors (mio, async-std, etc) +tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "time"] } [dev-dependencies] gst-check = { workspace = true, features = ["v1_20"] } @@ -56,4 +64,4 @@ versioning = false import_library = false [package.metadata.capi.pkg_config] -requires_private = "gstreamer-1.0, gstreamer-base-1.0, gstreamer-rtp-1.0, gobject-2.0, glib-2.0, gmodule-2.0" +requires_private = "gstreamer-1.0, gstreamer-base-1.0, gstreamer-rtp-1.0, gstreamer-net-1.0, gobject-2.0, glib-2.0, gmodule-2.0, gio-2.0" diff --git a/net/rtp/src/lib.rs b/net/rtp/src/lib.rs index 403231b9..79393c8f 100644 --- a/net/rtp/src/lib.rs +++ b/net/rtp/src/lib.rs @@ -14,12 +14,17 @@ * * Since: plugins-rs-0.9.0 */ + +#[macro_use] +extern crate log; + use gst::glib; #[macro_use] mod utils; mod gcc; +mod rtpbin2; mod audio_discont; mod baseaudiopay; @@ -42,6 +47,7 @@ mod tests; fn plugin_init(plugin: &gst::Plugin) -> Result<(), glib::BoolError> { gcc::register(plugin)?; + rtpbin2::register(plugin)?; #[cfg(feature = "doc")] { diff --git a/net/rtp/src/rtpbin2/imp.rs b/net/rtp/src/rtpbin2/imp.rs new file mode 100644 index 00000000..3953b304 --- /dev/null +++ b/net/rtp/src/rtpbin2/imp.rs @@ -0,0 +1,1460 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::collections::HashMap; +use std::net::SocketAddr; +use std::pin::Pin; +use std::sync::{Arc, Mutex}; +use std::task::{Poll, Waker}; +use std::time::{Duration, Instant, SystemTime}; + +use futures::future::{AbortHandle, Abortable}; +use futures::StreamExt; +use gst::{glib, prelude::*, subclass::prelude::*}; +use once_cell::sync::Lazy; + +use super::session::{RecvReply, RtcpRecvReply, SendReply, Session, RTCP_MIN_REPORT_INTERVAL}; +use super::source::{ReceivedRb, SourceState}; + +use crate::rtpbin2::RUNTIME; + +const DEFAULT_LATENCY: gst::ClockTime = gst::ClockTime::from_mseconds(0); +const DEFAULT_MIN_RTCP_INTERVAL: Duration = RTCP_MIN_REPORT_INTERVAL; + +static CAT: Lazy = Lazy::new(|| { + gst::DebugCategory::new( + "rtpbin2", + gst::DebugColorFlags::empty(), + Some("RTP management bin"), + ) +}); + +#[derive(Debug, Clone)] +struct Settings { + latency: gst::ClockTime, + min_rtcp_interval: Duration, +} + +impl Default for Settings { + fn default() -> Self { + Settings { + latency: DEFAULT_LATENCY, + min_rtcp_interval: DEFAULT_MIN_RTCP_INTERVAL, + } + } +} + +#[derive(Debug)] +#[must_use = "futures/streams/sinks do nothing unless you `.await` or poll them"] +struct RtcpSendStream { + state: Arc>, + sleep: Pin>, +} + +impl RtcpSendStream { + fn new(state: Arc>) -> Self { + Self { + state, + sleep: Box::pin(tokio::time::sleep(Duration::from_secs(1))), + } + } +} + +impl futures::stream::Stream for RtcpSendStream { + type Item = (Vec, usize); + + fn poll_next( + self: Pin<&mut Self>, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + let mut state = self.state.lock().unwrap(); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let mut lowest_wait = None; + for session in state.sessions.iter_mut() { + let mut session = session.inner.lock().unwrap(); + if let Some(data) = session.session.poll_rtcp_send(now, ntp_now) { + return Poll::Ready(Some((data, session.id))); + } + if let Some(wait) = session.session.poll_rtcp_send_timeout(now) { + if lowest_wait.map_or(true, |lowest_wait| wait < lowest_wait) { + lowest_wait = Some(wait); + } + } + } + state.rtcp_waker = Some(cx.waker().clone()); + drop(state); + + // default to the minimum initial rtcp delay so we don't busy loop if there are no sessions or no + // timeouts available + let lowest_wait = + lowest_wait.unwrap_or(now + crate::rtpbin2::session::RTCP_MIN_REPORT_INTERVAL / 2); + let this = self.get_mut(); + this.sleep.as_mut().reset(lowest_wait.into()); + if !std::future::Future::poll(this.sleep.as_mut(), cx).is_pending() { + // wake us again if the delay is not pending for another go at finding the next timeout + // value + cx.waker().wake_by_ref(); + } + Poll::Pending + } +} + +#[derive(Debug, PartialEq, Eq)] +struct RtpRecvSrcPad { + pt: u8, + ssrc: u32, + pad: gst::Pad, +} + +#[derive(Debug)] +struct HeldRecvBuffer { + hold_id: Option, + buffer: gst::Buffer, + srcpad: gst::Pad, + new_pad: bool, +} + +#[derive(Debug, Clone)] +struct BinSession { + id: usize, + inner: Arc>, +} + +impl BinSession { + fn new(id: usize, min_rtcp_interval: Duration) -> Self { + Self { + id, + inner: Arc::new(Mutex::new(BinSessionInner::new(id, min_rtcp_interval))), + } + } +} + +#[derive(Debug)] +struct BinSessionInner { + id: usize, + + session: Session, + + // State for received RTP streams + rtp_recv_sinkpad: Option, + rtp_recv_sink_group_id: Option, + rtp_recv_sink_caps: Option, + rtp_recv_sink_segment: Option>, + rtp_recv_sink_seqnum: Option, + + caps_map: HashMap>, + recv_store: Vec, + rtp_recv_srcpads: Vec, + recv_flow_combiner: Arc>, + + // State for sending RTP streams + rtp_send_sinkpad: Option, + rtp_send_srcpad: Option, + + rtcp_recv_sinkpad: Option, + rtcp_send_srcpad: Option, +} + +impl BinSessionInner { + fn new(id: usize, min_rtcp_interval: Duration) -> Self { + let mut session = Session::new(); + session.set_min_rtcp_interval(min_rtcp_interval); + Self { + id, + + session: Session::new(), + + rtp_recv_sinkpad: None, + rtp_recv_sink_group_id: None, + rtp_recv_sink_caps: None, + rtp_recv_sink_segment: None, + rtp_recv_sink_seqnum: None, + + caps_map: HashMap::default(), + recv_store: vec![], + rtp_recv_srcpads: vec![], + recv_flow_combiner: Arc::new(Mutex::new(gst_base::UniqueFlowCombiner::new())), + + rtp_send_sinkpad: None, + rtp_send_srcpad: None, + + rtcp_recv_sinkpad: None, + rtcp_send_srcpad: None, + } + } + + fn caps_from_pt_ssrc(&self, pt: u8, ssrc: u32) -> gst::Caps { + self.caps_map + .get(&pt) + .and_then(|ssrc_map| ssrc_map.get(&ssrc)) + .cloned() + .unwrap_or( + gst::Caps::builder("application/x-rtp") + .field("payload", pt as i32) + .build(), + ) + } + + fn get_or_create_rtp_recv_src( + &mut self, + rtpbin: &RtpBin2, + pt: u8, + ssrc: u32, + ) -> (gst::Pad, bool) { + if let Some(pad) = self + .rtp_recv_srcpads + .iter() + .find(|&r| r.ssrc == ssrc && r.pt == pt) + { + (pad.pad.clone(), false) + } else { + let src_templ = rtpbin.obj().pad_template("rtp_recv_src_%u_%u_%u").unwrap(); + let srcpad = gst::Pad::builder_from_template(&src_templ) + .iterate_internal_links_function(|pad, parent| { + RtpBin2::catch_panic_pad_function( + parent, + || gst::Iterator::from_vec(vec![]), + |this| this.iterate_internal_links(pad), + ) + }) + .query_function(|pad, parent, query| { + RtpBin2::catch_panic_pad_function( + parent, + || false, + |this| this.src_query(pad, query), + ) + }) + .name(format!("rtp_recv_src_{}_{}_{}", self.id, pt, ssrc)) + .build(); + srcpad.set_active(true).unwrap(); + let recv_pad = RtpRecvSrcPad { + pt, + ssrc, + pad: srcpad.clone(), + }; + + let stream_id = format!("{pt}/{ssrc}"); + let mut stream_start = gst::event::StreamStart::builder(&stream_id); + if let Some(group_id) = self + .rtp_recv_sinkpad + .as_ref() + .unwrap() + .sticky_event::(0) + .and_then(|ss| ss.group_id()) + { + stream_start = stream_start.group_id(group_id); + } + let stream_start = stream_start.build(); + let seqnum = stream_start.seqnum(); + let _ = srcpad.store_sticky_event(&stream_start); + + let caps = self.caps_from_pt_ssrc(pt, ssrc); + let caps = gst::event::Caps::builder(&caps).seqnum(seqnum).build(); + let _ = srcpad.store_sticky_event(&caps); + + let segment = if let Some(segment) = self + .rtp_recv_sinkpad + .as_ref() + .unwrap() + .sticky_event::(0) + .map(|s| s.segment().clone()) + { + segment + } else { + let mut segment = gst::Segment::new(); + segment.set_format(gst::Format::Time); + segment + }; + let segment = gst::event::Segment::new(&segment); + let _ = srcpad.store_sticky_event(&segment); + + self.recv_flow_combiner + .lock() + .unwrap() + .add_pad(&recv_pad.pad); + self.rtp_recv_srcpads.push(recv_pad); + (srcpad, true) + } + } +} + +#[derive(Debug, Default)] +struct State { + sessions: Vec, + rtcp_waker: Option, + max_session_id: usize, + pads_session_id_map: HashMap, +} + +impl State { + fn session_by_id(&self, id: usize) -> Option<&BinSession> { + self.sessions.iter().find(|session| session.id == id) + } + + fn stats(&self) -> gst::Structure { + let mut ret = gst::Structure::builder("application/x-rtpbin2-stats"); + for session in self.sessions.iter() { + let sess_id = session.id; + let session = session.inner.lock().unwrap(); + let mut session_stats = gst::Structure::builder("application/x-rtp-session-stats"); + for ssrc in session.session.ssrcs() { + if let Some(ls) = session.session.local_send_source_by_ssrc(ssrc) { + let mut source_stats = + gst::Structure::builder("application/x-rtp-source-stats") + .field("ssrc", ls.ssrc()) + .field("sender", true) + .field("local", true) + .field("packets-sent", ls.packet_count()) + .field("octets-sent", ls.octet_count()) + .field("bitrate", ls.bitrate() as u64); + if let Some(pt) = ls.payload_type() { + if let Some(clock_rate) = session.session.clock_rate_from_pt(pt) { + source_stats = source_stats.field("clock-rate", clock_rate); + } + } + if let Some(sr) = ls.last_sent_sr() { + source_stats = source_stats + .field("sr-ntptime", sr.ntp_timestamp().as_u64()) + .field("sr-rtptime", sr.rtp_timestamp()) + .field("sr-octet-count", sr.octet_count()) + .field("sr-packet-count", sr.packet_count()); + } + let rbs = gst::List::new(ls.received_report_blocks().map( + |(sender_ssrc, ReceivedRb { rb, .. })| { + gst::Structure::builder("application/x-rtcp-report-block") + .field("sender-ssrc", sender_ssrc) + .field("rb-fraction-lost", rb.fraction_lost()) + .field("rb-packets-lost", rb.cumulative_lost()) + .field("rb-extended_sequence_number", rb.extended_sequence_number()) + .field("rb-jitter", rb.jitter()) + .field("rb-last-sr-ntp-time", rb.last_sr_ntp_time()) + .field("rb-delay_since_last-sr-ntp-time", rb.delay_since_last_sr()) + .build() + }, + )); + match rbs.len() { + 0 => (), + 1 => { + source_stats = + source_stats.field("report-blocks", rbs.first().unwrap().clone()); + } + _ => { + source_stats = source_stats.field("report-blocks", rbs); + } + } + // TODO: add jitter, packets-lost + session_stats = + session_stats.field(ls.ssrc().to_string(), source_stats.build()); + } else if let Some(lr) = session.session.local_receive_source_by_ssrc(ssrc) { + let mut source_stats = + gst::Structure::builder("application/x-rtp-source-stats") + .field("ssrc", lr.ssrc()) + .field("sender", false) + .field("local", true); + if let Some(pt) = lr.payload_type() { + if let Some(clock_rate) = session.session.clock_rate_from_pt(pt) { + source_stats = source_stats.field("clock-rate", clock_rate); + } + } + // TODO: add rb stats + session_stats = + session_stats.field(lr.ssrc().to_string(), source_stats.build()); + } else if let Some(rs) = session.session.remote_send_source_by_ssrc(ssrc) { + let mut source_stats = + gst::Structure::builder("application/x-rtp-source-stats") + .field("ssrc", rs.ssrc()) + .field("sender", true) + .field("local", false) + .field("octets-received", rs.octet_count()) + .field("packets-received", rs.packet_count()) + .field("bitrate", rs.bitrate() as u64) + .field("jitter", rs.jitter()) + .field("packets-lost", rs.packets_lost()); + if let Some(pt) = rs.payload_type() { + if let Some(clock_rate) = session.session.clock_rate_from_pt(pt) { + source_stats = source_stats.field("clock-rate", clock_rate); + } + } + if let Some(rtp_from) = rs.rtp_from() { + source_stats = source_stats.field("rtp-from", rtp_from.to_string()); + } + if let Some(rtcp_from) = rs.rtcp_from() { + source_stats = source_stats.field("rtcp-from", rtcp_from.to_string()); + } + if let Some(sr) = rs.last_received_sr() { + source_stats = source_stats + .field("sr-ntptime", sr.ntp_timestamp().as_u64()) + .field("sr-rtptime", sr.rtp_timestamp()) + .field("sr-octet-count", sr.octet_count()) + .field("sr-packet-count", sr.packet_count()); + } + if let Some(rb) = rs.last_sent_rb() { + source_stats = source_stats + .field("sent-rb-fraction-lost", rb.fraction_lost()) + .field("sent-rb-packets-lost", rb.cumulative_lost()) + .field( + "sent-rb-extended-sequence-number", + rb.extended_sequence_number(), + ) + .field("sent-rb-jitter", rb.jitter()) + .field("sent-rb-last-sr-ntp-time", rb.last_sr_ntp_time()) + .field( + "sent-rb-delay-since-last-sr-ntp-time", + rb.delay_since_last_sr(), + ); + } + let rbs = gst::List::new(rs.received_report_blocks().map( + |(sender_ssrc, ReceivedRb { rb, .. })| { + gst::Structure::builder("application/x-rtcp-report-block") + .field("sender-ssrc", sender_ssrc) + .field("rb-fraction-lost", rb.fraction_lost()) + .field("rb-packets-lost", rb.cumulative_lost()) + .field("rb-extended_sequence_number", rb.extended_sequence_number()) + .field("rb-jitter", rb.jitter()) + .field("rb-last-sr-ntp-time", rb.last_sr_ntp_time()) + .field("rb-delay_since_last-sr-ntp-time", rb.delay_since_last_sr()) + .build() + }, + )); + match rbs.len() { + 0 => (), + 1 => { + source_stats = + source_stats.field("report-blocks", rbs.first().unwrap().clone()); + } + _ => { + source_stats = source_stats.field("report-blocks", rbs); + } + } + session_stats = + session_stats.field(rs.ssrc().to_string(), source_stats.build()); + } else if let Some(rr) = session.session.remote_receive_source_by_ssrc(ssrc) { + let source_stats = gst::Structure::builder("application/x-rtp-source-stats") + .field("ssrc", rr.ssrc()) + .field("sender", false) + .field("local", false) + .build(); + session_stats = session_stats.field(rr.ssrc().to_string(), source_stats); + } + } + ret = ret.field(sess_id.to_string(), session_stats.build()); + } + ret.build() + } +} + +pub struct RtpBin2 { + settings: Mutex, + state: Arc>, + rtcp_task: Mutex>, +} + +struct RtcpTask { + abort_handle: AbortHandle, +} + +impl RtpBin2 { + fn start_rtcp_task(&self) { + let mut rtcp_task = self.rtcp_task.lock().unwrap(); + + if rtcp_task.is_some() { + return; + } + + // run the runtime from another task to prevent the "start a runtime from within a runtime" panic + // when the plugin is statically linked. + let (abort_handle, abort_registration) = AbortHandle::new_pair(); + let state = self.state.clone(); + RUNTIME.spawn(async move { + let future = Abortable::new(Self::rtcp_task(state), abort_registration); + future.await + }); + + rtcp_task.replace(RtcpTask { abort_handle }); + } + + async fn rtcp_task(state: Arc>) { + let mut stream = RtcpSendStream::new(state.clone()); + while let Some((data, session_id)) = stream.next().await { + let state = state.lock().unwrap(); + let Some(session) = state.session_by_id(session_id) else { + continue; + }; + let Some(rtcp_srcpad) = session.inner.lock().unwrap().rtcp_send_srcpad.clone() else { + continue; + }; + RUNTIME.spawn_blocking(move || { + let buffer = gst::Buffer::from_mut_slice(data); + if let Err(e) = rtcp_srcpad.push(buffer) { + gst::warning!(CAT, obj: rtcp_srcpad, "Failed to send rtcp data: flow return {e:?}"); + } + }); + } + } + + fn stop_rtcp_task(&self) { + let mut rtcp_task = self.rtcp_task.lock().unwrap(); + + if let Some(rtcp) = rtcp_task.take() { + rtcp.abort_handle.abort(); + } + } + + pub fn src_query(&self, pad: &gst::Pad, query: &mut gst::QueryRef) -> bool { + gst::log!(CAT, obj: pad, "Handling query {query:?}"); + + use gst::QueryViewMut::*; + match query.view_mut() { + Latency(q) => { + let mut peer_query = gst::query::Latency::new(); + + let ret = gst::Pad::query_default(pad, Some(&*self.obj()), &mut peer_query); + let our_latency = self.settings.lock().unwrap().latency; + + let min = if ret { + let (_, min, _) = peer_query.result(); + + our_latency + min + } else { + our_latency + }; + + gst::info!(CAT, obj: pad, "Handled latency query, our latency {our_latency}, minimum latency: {min}"); + q.set(true, min, gst::ClockTime::NONE); + + ret + } + _ => gst::Pad::query_default(pad, Some(pad), query), + } + } + + fn iterate_internal_links(&self, pad: &gst::Pad) -> gst::Iterator { + let state = self.state.lock().unwrap(); + if let Some(&id) = state.pads_session_id_map.get(pad) { + if let Some(session) = state.session_by_id(id) { + let session = session.inner.lock().unwrap(); + if let Some(ref sinkpad) = session.rtp_recv_sinkpad { + if sinkpad == pad { + let pads = session + .rtp_recv_srcpads + .iter() + .map(|r| r.pad.clone()) + .collect(); + return gst::Iterator::from_vec(pads); + } else if session.rtp_recv_srcpads.iter().any(|r| &r.pad == pad) { + return gst::Iterator::from_vec(vec![sinkpad.clone()]); + } + } + if let Some(ref sinkpad) = session.rtp_send_sinkpad { + if let Some(ref srcpad) = session.rtp_send_srcpad { + if sinkpad == pad { + return gst::Iterator::from_vec(vec![srcpad.clone()]); + } else if srcpad == pad { + return gst::Iterator::from_vec(vec![sinkpad.clone()]); + } + } + } + // nothing to do for rtcp pads + } + } + gst::Iterator::from_vec(vec![]) + } + + fn rtp_recv_sink_chain( + &self, + _pad: &gst::Pad, + id: usize, + buffer: gst::Buffer, + ) -> Result { + let state = self.state.lock().unwrap(); + let Some(session) = state.session_by_id(id) else { + return Err(gst::FlowError::Error); + }; + + let addr: Option = + buffer + .meta::() + .and_then(|net_meta| { + net_meta + .addr() + .dynamic_cast::() + .map(|a| a.into()) + .ok() + }); + let mapped = buffer.map_readable().map_err(|e| { + gst::error!(CAT, imp: self, "Failed to map input buffer {e:?}"); + gst::FlowError::Error + })?; + let rtp = match rtp_types::RtpPacket::parse(&mapped) { + Ok(rtp) => rtp, + Err(e) => { + // TODO: handle if it's a valid rtcp-muxed RTCP packet + gst::error!(CAT, imp: self, "Failed to parse input as valid rtp packet: {e:?}"); + return Ok(gst::FlowSuccess::Ok); + } + }; + + let session = session.clone(); + let mut session = session.inner.lock().unwrap(); + drop(state); + + let now = Instant::now(); + let mut buffers_to_push = vec![]; + loop { + match session.session.handle_recv(&rtp, addr, now) { + RecvReply::SsrcCollision(_ssrc) => (), // TODO: handle ssrc collision + RecvReply::NewSsrc(_ssrc, _pt) => (), // TODO: signal new ssrc externally + RecvReply::Hold(hold_id) => { + let pt = rtp.payload_type(); + let ssrc = rtp.ssrc(); + drop(mapped); + let (srcpad, new_pad) = session.get_or_create_rtp_recv_src(self, pt, ssrc); + session.recv_store.push(HeldRecvBuffer { + hold_id: Some(hold_id), + buffer, + srcpad, + new_pad, + }); + break; + } + RecvReply::Drop(hold_id) => { + if let Some(pos) = session + .recv_store + .iter() + .position(|b| b.hold_id.unwrap() == hold_id) + { + session.recv_store.remove(pos); + } + } + RecvReply::Forward(hold_id) => { + if let Some(pos) = session + .recv_store + .iter() + .position(|b| b.hold_id.unwrap() == hold_id) + { + buffers_to_push.push(session.recv_store.remove(pos)); + } else { + unreachable!(); + } + } + RecvReply::Ignore => break, + RecvReply::Passthrough => { + let pt = rtp.payload_type(); + let ssrc = rtp.ssrc(); + drop(mapped); + let (srcpad, new_pad) = session.get_or_create_rtp_recv_src(self, pt, ssrc); + buffers_to_push.push(HeldRecvBuffer { + hold_id: None, + buffer, + srcpad, + new_pad, + }); + break; + } + } + } + let recv_flow_combiner = session.recv_flow_combiner.clone(); + drop(session); + + let mut recv_flow_combiner = recv_flow_combiner.lock().unwrap(); + for held in buffers_to_push { + // TODO: handle other processing + if held.new_pad { + let mut state = self.state.lock().unwrap(); + state.pads_session_id_map.insert(held.srcpad.clone(), id); + drop(state); + self.obj().add_pad(&held.srcpad).unwrap(); + } + recv_flow_combiner.update_pad_flow(&held.srcpad, held.srcpad.push(held.buffer))?; + } + Ok(gst::FlowSuccess::Ok) + } + + fn rtp_send_sink_chain( + &self, + id: usize, + buffer: gst::Buffer, + ) -> Result { + let state = self.state.lock().unwrap(); + let Some(session) = state.session_by_id(id) else { + gst::error!(CAT, "No session?"); + return Err(gst::FlowError::Error); + }; + + let mapped = buffer.map_readable().map_err(|e| { + gst::error!(CAT, imp: self, "Failed to map input buffer {e:?}"); + gst::FlowError::Error + })?; + let rtp = match rtp_types::RtpPacket::parse(&mapped) { + Ok(rtp) => rtp, + Err(e) => { + gst::error!(CAT, imp: self, "Failed to parse input as valid rtp packet: {e:?}"); + return Ok(gst::FlowSuccess::Ok); + } + }; + + let session = session.clone(); + let mut session = session.inner.lock().unwrap(); + drop(state); + + let now = Instant::now(); + loop { + match session.session.handle_send(&rtp, now) { + SendReply::SsrcCollision(_ssrc) => (), // TODO: handle ssrc collision + SendReply::NewSsrc(_ssrc, _pt) => (), // TODO; signal ssrc externally + SendReply::Passthrough => break, + SendReply::Drop => return Ok(gst::FlowSuccess::Ok), + } + } + // TODO: handle other processing + drop(mapped); + let srcpad = session.rtp_send_srcpad.clone().unwrap(); + drop(session); + srcpad.push(buffer) + } + + fn rtcp_recv_sink_chain( + &self, + id: usize, + buffer: gst::Buffer, + ) -> Result { + let state = self.state.lock().unwrap(); + let Some(session) = state.session_by_id(id) else { + return Err(gst::FlowError::Error); + }; + + let addr: Option = + buffer + .meta::() + .and_then(|net_meta| { + net_meta + .addr() + .dynamic_cast::() + .map(|a| a.into()) + .ok() + }); + let mapped = buffer.map_readable().map_err(|e| { + gst::error!(CAT, imp: self, "Failed to map input buffer {e:?}"); + gst::FlowError::Error + })?; + let rtcp = match rtcp_types::Compound::parse(&mapped) { + Ok(rtcp) => rtcp, + Err(e) => { + gst::error!(CAT, imp: self, "Failed to parse input as valid rtcp packet: {e:?}"); + return Ok(gst::FlowSuccess::Ok); + } + }; + + let session = session.clone(); + let mut session = session.inner.lock().unwrap(); + let waker = state.rtcp_waker.clone(); + drop(state); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let replies = session + .session + .handle_rtcp_recv(rtcp, mapped.len(), addr, now, ntp_now); + for reply in replies { + match reply { + RtcpRecvReply::NewSsrc(_ssrc) => (), // TODO: handle new ssrc + RtcpRecvReply::SsrcCollision(_ssrc) => (), // TODO: handle ssrc collision + RtcpRecvReply::TimerReconsideration => { + if let Some(ref waker) = waker { + // reconsider timers means that we wake the rtcp task to get a new timeout + waker.wake_by_ref(); + } + } + } + } + drop(mapped); + + Ok(gst::FlowSuccess::Ok) + } + + fn rtp_send_sink_event(&self, pad: &gst::Pad, event: gst::Event, id: usize) -> bool { + match event.view() { + gst::EventView::Caps(caps) => { + if let Some((pt, clock_rate)) = Self::pt_clock_rate_from_caps(caps.caps()) { + let state = self.state.lock().unwrap(); + if let Some(session) = state.session_by_id(id) { + let mut session = session.inner.lock().unwrap(); + session.session.set_pt_clock_rate(pt, clock_rate); + } + } + gst::Pad::event_default(pad, Some(&*self.obj()), event) + } + gst::EventView::Eos(_eos) => { + let now = Instant::now(); + let mut state = self.state.lock().unwrap(); + if let Some(session) = state.session_by_id(id) { + let mut session = session.inner.lock().unwrap(); + let ssrcs = session.session.ssrcs().collect::>(); + // We want to bye all relevant ssrc's here. + // Relevant means they will not be used by something else which means that any + // local send ssrc that is not being used for Sr/Rr reports (internal_ssrc) can + // have the Bye state applied. + let mut all_local = true; + let internal_ssrc = session.session.internal_ssrc(); + for ssrc in ssrcs { + let Some(local_send) = session.session.mut_local_send_source_by_ssrc(ssrc) + else { + if let Some(local_recv) = + session.session.local_receive_source_by_ssrc(ssrc) + { + if local_recv.state() != SourceState::Bye + && Some(ssrc) != internal_ssrc + { + all_local = false; + } + } + continue; + }; + if Some(ssrc) != internal_ssrc { + local_send.mark_bye("End of Stream") + } + } + if all_local { + // if there are no non-local send ssrc's, then we can Bye the entire + // session. + session.session.schedule_bye("End of Stream", now); + } + drop(session); + if let Some(waker) = state.rtcp_waker.take() { + waker.wake(); + } + } + drop(state); + gst::Pad::event_default(pad, Some(&*self.obj()), event) + } + _ => gst::Pad::event_default(pad, Some(&*self.obj()), event), + } + } + + fn rtp_recv_sink_event(&self, pad: &gst::Pad, event: gst::Event, id: usize) -> bool { + match event.view() { + gst::EventView::Caps(caps) => { + if let Some((pt, clock_rate)) = Self::pt_clock_rate_from_caps(caps.caps()) { + let state = self.state.lock().unwrap(); + if let Some(session) = state.session_by_id(id) { + let mut session = session.inner.lock().unwrap(); + session.session.set_pt_clock_rate(pt, clock_rate); + } + } + true + } + gst::EventView::Eos(_eos) => { + let now = Instant::now(); + let mut state = self.state.lock().unwrap(); + if let Some(session) = state.session_by_id(id) { + let mut session = session.inner.lock().unwrap(); + let ssrcs = session.session.ssrcs().collect::>(); + // we can only Bye the entire session if we do not have any local send sources + // currently sending data + let mut all_remote = true; + let internal_ssrc = session.session.internal_ssrc(); + for ssrc in ssrcs { + let Some(_local_recv) = session.session.local_receive_source_by_ssrc(ssrc) + else { + if let Some(local_send) = + session.session.local_send_source_by_ssrc(ssrc) + { + if local_send.state() != SourceState::Bye + && Some(ssrc) != internal_ssrc + { + all_remote = false; + break; + } + } + continue; + }; + } + if all_remote { + session.session.schedule_bye("End of stream", now); + } + drop(session); + if let Some(waker) = state.rtcp_waker.take() { + waker.wake(); + } + } + // FIXME: may need to delay sending eos under some circumstances + true + } + _ => gst::Pad::event_default(pad, Some(&*self.obj()), event), + } + } + + fn pt_clock_rate_from_caps(caps: &gst::CapsRef) -> Option<(u8, u32)> { + let Some(s) = caps.structure(0) else { + return None; + }; + let Some((clock_rate, pt)) = Option::zip( + s.get::("clock-rate").ok(), + s.get::("payload").ok(), + ) else { + return None; + }; + if (0..=127).contains(&pt) && clock_rate > 0 { + Some((pt as u8, clock_rate as u32)) + } else { + None + } + } +} + +#[glib::object_subclass] +impl ObjectSubclass for RtpBin2 { + const NAME: &'static str = "GstRtpBin2"; + type Type = super::RtpBin2; + type ParentType = gst::Element; + + fn new() -> Self { + GstRustLogger::install(); + Self { + settings: Default::default(), + state: Default::default(), + rtcp_task: Mutex::new(None), + } + } +} + +impl ObjectImpl for RtpBin2 { + fn properties() -> &'static [glib::ParamSpec] { + static PROPERTIES: Lazy> = Lazy::new(|| { + vec![ + glib::ParamSpecUInt::builder("latency") + .nick("Buffer latency in ms") + .blurb("Amount of ms to buffer") + .default_value(DEFAULT_LATENCY.mseconds() as u32) + .mutable_ready() + .build(), + glib::ParamSpecUInt::builder("min-rtcp-interval") + .nick("Minimum RTCP interval in ms") + .blurb("Minimum time (in ms) between RTCP reports") + .default_value(DEFAULT_MIN_RTCP_INTERVAL.as_millis() as u32) + .mutable_ready() + .build(), + glib::ParamSpecUInt::builder("stats") + .nick("Statistics") + .blurb("Statistics about the session") + .read_only() + .build(), + ] + }); + + PROPERTIES.as_ref() + } + + fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) { + match pspec.name() { + "latency" => { + let _latency = { + let mut settings = self.settings.lock().unwrap(); + settings.latency = gst::ClockTime::from_mseconds( + value.get::().expect("type checked upstream").into(), + ); + settings.latency + }; + + let _ = self + .obj() + .post_message(gst::message::Latency::builder().src(&*self.obj()).build()); + } + "min-rtcp-interval" => { + let mut settings = self.settings.lock().unwrap(); + settings.min_rtcp_interval = Duration::from_millis( + value.get::().expect("type checked upstream").into(), + ); + } + _ => unimplemented!(), + } + } + + fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value { + match pspec.name() { + "latency" => { + let settings = self.settings.lock().unwrap(); + (settings.latency.mseconds() as u32).to_value() + } + "min-rtcp-interval" => { + let settings = self.settings.lock().unwrap(); + (settings.min_rtcp_interval.as_millis() as u32).to_value() + } + "stats" => { + let state = self.state.lock().unwrap(); + state.stats().to_value() + } + _ => unimplemented!(), + } + } +} + +impl GstObjectImpl for RtpBin2 {} + +impl ElementImpl for RtpBin2 { + fn metadata() -> Option<&'static gst::subclass::ElementMetadata> { + static ELEMENT_METADATA: Lazy = Lazy::new(|| { + gst::subclass::ElementMetadata::new( + "RTP Bin", + "Network/RTP/Filter", + "RTP sessions management", + "Matthew Waters ", + ) + }); + + Some(&*ELEMENT_METADATA) + } + + fn pad_templates() -> &'static [gst::PadTemplate] { + static PAD_TEMPLATES: Lazy> = Lazy::new(|| { + let rtp_caps = gst::Caps::builder_full() + .structure(gst::Structure::builder("application/x-rtp").build()) + .build(); + let rtcp_caps = gst::Caps::builder_full() + .structure(gst::Structure::builder("application/x-rtcp").build()) + .build(); + + vec![ + gst::PadTemplate::new( + "rtp_recv_sink_%u", + gst::PadDirection::Sink, + gst::PadPresence::Request, + &rtp_caps, + ) + .unwrap(), + gst::PadTemplate::new( + "rtcp_recv_sink_%u", + gst::PadDirection::Sink, + gst::PadPresence::Request, + &rtcp_caps, + ) + .unwrap(), + gst::PadTemplate::new( + "rtp_recv_src_%u_%u_%u", + gst::PadDirection::Src, + gst::PadPresence::Sometimes, + &rtp_caps, + ) + .unwrap(), + gst::PadTemplate::new( + "rtp_send_sink_%u", + gst::PadDirection::Sink, + gst::PadPresence::Request, + &rtp_caps, + ) + .unwrap(), + gst::PadTemplate::new( + "rtp_send_src_%u", + gst::PadDirection::Src, + gst::PadPresence::Sometimes, + &rtp_caps, + ) + .unwrap(), + gst::PadTemplate::new( + "rtcp_send_src_%u", + gst::PadDirection::Src, + gst::PadPresence::Request, + &rtcp_caps, + ) + .unwrap(), + ] + }); + + PAD_TEMPLATES.as_ref() + } + + fn request_new_pad( + &self, + templ: &gst::PadTemplate, + name: Option<&str>, + _caps: Option<&gst::Caps>, // XXX: do something with caps? + ) -> Option { + let this = self.obj(); + let min_rtcp_interval = self.settings.lock().unwrap().min_rtcp_interval; + let mut state = self.state.lock().unwrap(); + let max_session_id = state.max_session_id; + + // parse the possibly provided name into a session id or use the default + let sess_parse = move |name: Option<&str>, prefix, default_id| -> Option { + if let Some(name) = name { + name.strip_prefix(prefix).and_then(|suffix| { + if suffix.starts_with("%u") { + Some(default_id) + } else { + suffix.parse::().ok() + } + }) + } else { + Some(default_id) + } + }; + + match templ.name_template() { + "rtp_send_sink_%u" => { + sess_parse(name, "rtp_send_sink_", max_session_id).and_then(|id| { + let new_pad = move |session: &mut BinSessionInner| -> Option<(gst::Pad, Option, usize)> { + let sinkpad = gst::Pad::builder_from_template(templ) + .chain_function(move |_pad, parent, buffer| { + RtpBin2::catch_panic_pad_function( + parent, + || Err(gst::FlowError::Error), + |this| this.rtp_send_sink_chain(id, buffer), + ) + }) + .iterate_internal_links_function(|pad, parent| { + RtpBin2::catch_panic_pad_function(parent, || gst::Iterator::from_vec(vec![]), |this| this.iterate_internal_links(pad)) + }) + .event_function(move |pad, parent, event| + RtpBin2::catch_panic_pad_function(parent, || false, |this| this.rtp_send_sink_event(pad, event, id)) + ) + .flags(gst::PadFlags::PROXY_CAPS) + .name(format!("rtp_send_sink_{}", id)) + .build(); + sinkpad.set_active(true).unwrap(); + this.add_pad(&sinkpad).unwrap(); + let src_templ = self.obj().pad_template("rtp_send_src_%u").unwrap(); + let srcpad = gst::Pad::builder_from_template(&src_templ) + .iterate_internal_links_function(|pad, parent| { + RtpBin2::catch_panic_pad_function(parent, || gst::Iterator::from_vec(vec![]), |this| this.iterate_internal_links(pad)) + }) + .name(format!("rtp_send_src_{}", id)) + .build(); + srcpad.set_active(true).unwrap(); + this.add_pad(&srcpad).unwrap(); + session.rtp_send_sinkpad = Some(sinkpad.clone()); + session.rtp_send_srcpad = Some(srcpad.clone()); + Some((sinkpad, Some(srcpad), id)) + }; + + let session = state.session_by_id(id); + if let Some(session) = session { + let mut session = session.inner.lock().unwrap(); + if session.rtp_send_sinkpad.is_some() { + None + } else { + new_pad(&mut session) + } + } else { + let session = BinSession::new(id, min_rtcp_interval); + let mut inner = session.inner.lock().unwrap(); + let ret = new_pad(&mut inner); + drop(inner); + state.sessions.push(session); + ret + } + }) + } + "rtp_recv_sink_%u" => { + sess_parse(name, "rtp_recv_sink_", max_session_id).and_then(|id| { + let new_pad = move |session: &mut BinSessionInner| -> Option<(gst::Pad, Option, usize)> { + let sinkpad = gst::Pad::builder_from_template(templ) + .chain_function(move |pad, parent, buffer| { + RtpBin2::catch_panic_pad_function( + parent, + || Err(gst::FlowError::Error), + |this| this.rtp_recv_sink_chain(pad, id, buffer), + ) + }) + .iterate_internal_links_function(|pad, parent| { + RtpBin2::catch_panic_pad_function(parent, || gst::Iterator::from_vec(vec![]), |this| this.iterate_internal_links(pad)) + }) + .event_function(move |pad, parent, event| + RtpBin2::catch_panic_pad_function(parent, || false, |this| this.rtp_recv_sink_event(pad, event, id)) + ) + .name(format!("rtp_recv_sink_{}", id)) + .build(); + sinkpad.set_active(true).unwrap(); + this.add_pad(&sinkpad).unwrap(); + session.rtp_recv_sinkpad = Some(sinkpad.clone()); + Some((sinkpad, None, id)) + }; + + let session = state.session_by_id(id); + if let Some(session) = session { + let mut session = session.inner.lock().unwrap(); + if session.rtp_send_sinkpad.is_some() { + None + } else { + new_pad(&mut session) + } + } else { + let session = BinSession::new(id, min_rtcp_interval); + let mut inner = session.inner.lock().unwrap(); + let ret = new_pad(&mut inner); + drop(inner); + state.sessions.push(session); + ret + } + }) + } + "rtcp_recv_sink_%u" => { + sess_parse(name, "rtcp_recv_sink_", max_session_id).and_then(|id| { + state.session_by_id(id).and_then(|session| { + let mut session = session.inner.lock().unwrap(); + if session.rtcp_recv_sinkpad.is_some() { + None + } else { + let sinkpad = gst::Pad::builder_from_template(templ) + .chain_function(move |_pad, parent, buffer| { + RtpBin2::catch_panic_pad_function( + parent, + || Err(gst::FlowError::Error), + |this| this.rtcp_recv_sink_chain(id, buffer), + ) + }) + .iterate_internal_links_function(|pad, parent| { + RtpBin2::catch_panic_pad_function(parent, || gst::Iterator::from_vec(vec![]), |this| this.iterate_internal_links(pad)) + }) + .name(format!("rtcp_recv_sink_{}", id)) + .build(); + sinkpad.set_active(true).unwrap(); + this.add_pad(&sinkpad).unwrap(); + session.rtcp_recv_sinkpad = Some(sinkpad.clone()); + Some((sinkpad, None, id)) + } + }) + }) + } + "rtcp_send_src_%u" => { + self.start_rtcp_task(); + sess_parse(name, "rtcp_send_src_", max_session_id).and_then(|id| { + state.session_by_id(id).and_then(|session| { + let mut session = session.inner.lock().unwrap(); + + if session.rtcp_send_srcpad.is_some() { + None + } else { + let this = self.obj(); + let srcpad = gst::Pad::builder_from_template(templ) + .iterate_internal_links_function(|pad, parent| { + RtpBin2::catch_panic_pad_function(parent, || gst::Iterator::from_vec(vec![]), |this| this.iterate_internal_links(pad)) + }) + .name(format!("rtcp_send_src_{}", id)) + .build(); + + let stream_id = format!("{}/rtcp", id); + let stream_start = gst::event::StreamStart::builder(&stream_id).build(); + let seqnum = stream_start.seqnum(); + + let caps = gst::Caps::new_empty_simple("application/x-rtcp"); + let caps = gst::event::Caps::builder(&caps).seqnum(seqnum).build(); + + let segment = gst::FormattedSegment::::new(); + let segment = gst::event::Segment::new(&segment); + + srcpad.set_active(true).unwrap(); + + let _ = srcpad.store_sticky_event(&stream_start); + let _ = srcpad.store_sticky_event(&caps); + let _ = srcpad.store_sticky_event(&segment); + + this.add_pad(&srcpad).unwrap(); + session.rtcp_send_srcpad = Some(srcpad.clone()); + Some((srcpad, None, id)) + } + }) + }) + } + _ => None, + } + .map(|(pad, otherpad, id)| { + state.max_session_id = (id + 1).max(state.max_session_id); + state.pads_session_id_map.insert(pad.clone(), id); + if let Some(pad) = otherpad { + state.pads_session_id_map.insert(pad, id); + } + pad + }) + } + + fn release_pad(&self, pad: &gst::Pad) { + let mut state = self.state.lock().unwrap(); + let mut removed_pads = vec![]; + if let Some(&id) = state.pads_session_id_map.get(pad) { + removed_pads.push(pad.clone()); + if let Some(session) = state.session_by_id(id) { + let mut session = session.inner.lock().unwrap(); + + if Some(pad) == session.rtp_recv_sinkpad.as_ref() { + session.rtp_recv_sinkpad = None; + removed_pads.extend(session.rtp_recv_srcpads.iter().map(|r| r.pad.clone())); + session.recv_flow_combiner.lock().unwrap().clear(); + session.rtp_recv_srcpads.clear(); + session.recv_store.clear(); + } + + if Some(pad) == session.rtp_send_sinkpad.as_ref() { + session.rtp_send_sinkpad = None; + if let Some(srcpad) = session.rtp_send_srcpad.take() { + removed_pads.push(srcpad); + } + } + + if Some(pad) == session.rtcp_send_srcpad.as_ref() { + session.rtcp_send_srcpad = None; + } + + if Some(pad) == session.rtcp_recv_sinkpad.as_ref() { + session.rtcp_recv_sinkpad = None; + } + + if session.rtp_recv_sinkpad.is_none() + && session.rtp_send_sinkpad.is_none() + && session.rtcp_recv_sinkpad.is_none() + && session.rtcp_send_srcpad.is_none() + { + let id = session.id; + drop(session); + state.sessions.retain(|s| s.id != id); + } + } + + for pad in removed_pads.iter() { + state.pads_session_id_map.remove(pad); + } + } + drop(state); + + for pad in removed_pads { + let _ = pad.set_active(false); + // Pad might not have been added yet if it's a RTP recv srcpad + if pad.has_as_parent(&*self.obj()) { + let _ = self.obj().remove_pad(&pad); + } + } + + self.parent_release_pad(pad) + } + + fn change_state( + &self, + transition: gst::StateChange, + ) -> Result { + let mut success = self.parent_change_state(transition)?; + + match transition { + gst::StateChange::ReadyToNull => { + self.stop_rtcp_task(); + } + gst::StateChange::ReadyToPaused => { + success = gst::StateChangeSuccess::NoPreroll; + } + gst::StateChange::PlayingToPaused => { + success = gst::StateChangeSuccess::NoPreroll; + } + gst::StateChange::PausedToReady => { + let mut state = self.state.lock().unwrap(); + let mut removed_pads = vec![]; + for session in &state.sessions { + let mut session = session.inner.lock().unwrap(); + removed_pads.extend(session.rtp_recv_srcpads.iter().map(|r| r.pad.clone())); + session.recv_flow_combiner.lock().unwrap().clear(); + session.rtp_recv_srcpads.clear(); + session.recv_store.clear(); + + session.rtp_recv_sink_caps = None; + session.rtp_recv_sink_segment = None; + session.rtp_recv_sink_seqnum = None; + session.rtp_recv_sink_group_id = None; + + session.caps_map.clear(); + } + for pad in removed_pads.iter() { + state.pads_session_id_map.remove(pad); + } + drop(state); + + for pad in removed_pads { + let _ = pad.set_active(false); + // Pad might not have been added yet if it's a RTP recv srcpad + if pad.has_as_parent(&*self.obj()) { + let _ = self.obj().remove_pad(&pad); + } + } + } + _ => (), + } + Ok(success) + } +} + +static RUST_CAT: Lazy = Lazy::new(|| { + gst::DebugCategory::new( + "rust-log", + gst::DebugColorFlags::empty(), + Some("Logs from rust crates"), + ) +}); + +static GST_RUST_LOGGER_ONCE: once_cell::sync::OnceCell<()> = once_cell::sync::OnceCell::new(); +static GST_RUST_LOGGER: GstRustLogger = GstRustLogger {}; + +pub(crate) struct GstRustLogger {} + +impl GstRustLogger { + pub fn install() { + GST_RUST_LOGGER_ONCE.get_or_init(|| { + if log::set_logger(&GST_RUST_LOGGER).is_err() { + gst::warning!( + RUST_CAT, + "Cannot install log->gst logger, already installed?" + ); + } else { + log::set_max_level(GstRustLogger::debug_level_to_log_level_filter( + RUST_CAT.threshold(), + )); + gst::info!(RUST_CAT, "installed log->gst logger"); + } + }); + } + + fn debug_level_to_log_level_filter(level: gst::DebugLevel) -> log::LevelFilter { + match level { + gst::DebugLevel::None => log::LevelFilter::Off, + gst::DebugLevel::Error => log::LevelFilter::Error, + gst::DebugLevel::Warning => log::LevelFilter::Warn, + gst::DebugLevel::Fixme | gst::DebugLevel::Info => log::LevelFilter::Info, + gst::DebugLevel::Debug => log::LevelFilter::Debug, + gst::DebugLevel::Log | gst::DebugLevel::Trace | gst::DebugLevel::Memdump => { + log::LevelFilter::Trace + } + _ => log::LevelFilter::Trace, + } + } + + fn log_level_to_debug_level(level: log::Level) -> gst::DebugLevel { + match level { + log::Level::Error => gst::DebugLevel::Error, + log::Level::Warn => gst::DebugLevel::Warning, + log::Level::Info => gst::DebugLevel::Info, + log::Level::Debug => gst::DebugLevel::Debug, + log::Level::Trace => gst::DebugLevel::Trace, + } + } +} + +impl log::Log for GstRustLogger { + fn enabled(&self, metadata: &log::Metadata) -> bool { + RUST_CAT.above_threshold(GstRustLogger::log_level_to_debug_level(metadata.level())) + } + + fn log(&self, record: &log::Record) { + let gst_level = GstRustLogger::log_level_to_debug_level(record.metadata().level()); + let file = record + .file() + .map(glib::GString::from) + .unwrap_or_else(|| glib::GString::from("rust-log")); + let function = record.target(); + let line = record.line().unwrap_or(0); + RUST_CAT.log( + None::<&glib::Object>, + gst_level, + file.as_gstr(), + function, + line, + *record.args(), + ); + } + + fn flush(&self) {} +} diff --git a/net/rtp/src/rtpbin2/mod.rs b/net/rtp/src/rtpbin2/mod.rs new file mode 100644 index 00000000..8b913b17 --- /dev/null +++ b/net/rtp/src/rtpbin2/mod.rs @@ -0,0 +1,30 @@ +// SPDX-License-Identifier: MPL-2.0 + +use gst::glib; +use gst::prelude::*; +use once_cell::sync::Lazy; +mod imp; +mod session; +mod source; +mod time; + +glib::wrapper! { + pub struct RtpBin2(ObjectSubclass) @extends gst::Element, gst::Object; +} + +pub fn register(plugin: &gst::Plugin) -> Result<(), glib::BoolError> { + gst::Element::register( + Some(plugin), + "rtpbin2", + gst::Rank::NONE, + RtpBin2::static_type(), + ) +} + +pub static RUNTIME: Lazy = Lazy::new(|| { + tokio::runtime::Builder::new_multi_thread() + .enable_time() + .worker_threads(1) + .build() + .unwrap() +}); diff --git a/net/rtp/src/rtpbin2/session.rs b/net/rtp/src/rtpbin2/session.rs new file mode 100644 index 00000000..cb51b26c --- /dev/null +++ b/net/rtp/src/rtpbin2/session.rs @@ -0,0 +1,1917 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::collections::{HashMap, VecDeque}; +use std::net::SocketAddr; +use std::time::{Duration, Instant, SystemTime}; + +use rtcp_types::*; +use rtp_types::RtpPacket; + +use rand::prelude::*; + +use crate::rtpbin2::source::SourceRecvReply; + +use super::source::{ + LocalReceiveSource, LocalSendSource, RemoteReceiveSource, RemoteSendSource, SourceState, +}; +use super::time::system_time_to_ntp_time_u64; + +use gst::prelude::MulDiv; + +// TODO: make configurable +pub const RTCP_MIN_REPORT_INTERVAL: Duration = Duration::from_secs(5); +// TODO: reduced minimum interval? (360 / session bandwidth) + +const RTCP_SOURCE_TIMEOUT_N_INTERVALS: u32 = 5; +const RTCP_ADDRESS_CONFLICT_TIMEOUT: Duration = RTCP_MIN_REPORT_INTERVAL.saturating_mul(12); +// 5% of 8kB/s +const RTCP_MIN_BANDWIDTH: usize = 400; +const RTCP_MTU: usize = 1200; + +const UDP_IP_OVERHEAD_BYTES: usize = 28; + +#[derive(Debug, Default)] +struct RtcpTimeMembers { + time: Option, + p_members: usize, +} + +#[derive(Debug)] +struct ByeState { + members: usize, + pmembers: usize, +} + +#[derive(Debug)] +pub struct Session { + // settings + min_rtcp_interval: Duration, + // state + local_senders: HashMap, + local_receivers: HashMap, + remote_receivers: HashMap, + remote_senders: HashMap, + last_rtcp_sent_times: VecDeque, + // holds the next rtcp send time and the number of members at the time when the time was + // calculated + next_rtcp_send: RtcpTimeMembers, + average_rtcp_size: usize, + last_sent_data: Option, + hold_buffer_counter: usize, + sdes: HashMap, + pt_map: HashMap, + conflicting_addresses: HashMap, + // used when we have not sent anything but need a ssrc for Rr + internal_rtcp_sender_src: Option, + bye_state: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RecvReply { + /// A new ssrc was discovered. If you want to change things about the new ssrc, then do it now. + /// Call recv() again. + NewSsrc(u32, u8), + /// hold this buffer for later and give it the relevant id. The id will be used in a Drop, or + /// Forward return value + Hold(usize), + /// Drop a buffer by id. Should continue calling with the same input until not Drop or Forward + Drop(usize), + /// Forward a held buffer by id. Should continue calling with the same input until not Drop or Forward. + Forward(usize), + /// Forward the input buffer + Passthrough, + /// Ignore this buffer and do not passthrough + Ignore, + /// A ssrc collision has been detected for the provided ssrc. Sender (us) should change ssrc. + SsrcCollision(u32), +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum SendReply { + /// A new ssrc was discovered. If you want to change things about the new ssrc, then do it now. + /// Call send() again. + NewSsrc(u32, u8), + /// Forward the input buffer + Passthrough, + /// Drop this buffer + Drop, + /// SSRC collision detected, Sender (us) should change our SSRC and this packet must be dropped + SsrcCollision(u32), +} + +#[derive(Debug, Copy, Clone, PartialEq, Eq)] +pub enum RtcpRecvReply { + /// A new ssrc was discovered. If you want to change things about the new ssrc, then do it now + /// before pushing the buffer again + NewSsrc(u32), + /// SSRC collision detected, Sender (us) should change our SSRC and this packet must be dropped + SsrcCollision(u32), + /// RTCP timer needs to be reconsidered. Call poll_rtcp_send_timeout() to get the new time + TimerReconsideration, +} + +impl Session { + pub fn new() -> Self { + let cname = generate_cname(); + let mut sdes = HashMap::new(); + sdes.insert(SdesItem::CNAME, cname); + Self { + min_rtcp_interval: RTCP_MIN_REPORT_INTERVAL, + local_senders: HashMap::new(), + // also known as remote_senders + local_receivers: HashMap::new(), + remote_receivers: HashMap::new(), + remote_senders: HashMap::new(), + last_rtcp_sent_times: VecDeque::new(), + next_rtcp_send: RtcpTimeMembers { + time: None, + p_members: 0, + }, + average_rtcp_size: 100, + last_sent_data: None, + hold_buffer_counter: 0, + sdes, + pt_map: HashMap::new(), + conflicting_addresses: HashMap::new(), + internal_rtcp_sender_src: None, + bye_state: None, + } + } + + /// Set the minimum RTCP interval to use for this session + pub fn set_min_rtcp_interval(&mut self, min_rtcp_interval: Duration) { + self.min_rtcp_interval = min_rtcp_interval; + } + + fn n_members(&self) -> usize { + self.bye_state + .as_ref() + .map(|state| state.members) + .unwrap_or_else(|| { + self.local_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + + self + .local_receivers + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + + self + .remote_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + + self + .remote_receivers + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + }) + } + + fn n_senders(&self) -> usize { + self.bye_state.as_ref().map(|_state| 0).unwrap_or_else(|| { + self.local_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + + self + .remote_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .count() + }) + } + + fn p_members(&self) -> usize { + self.bye_state + .as_ref() + .map(|state| state.pmembers) + .unwrap_or(self.next_rtcp_send.p_members) + } + + /// Set the RTP clock rate for a particular payload type + pub fn set_pt_clock_rate(&mut self, pt: u8, clock_rate: u32) { + self.pt_map.insert(pt, clock_rate); + } + + /// Retrieve the RTP clock rate for a particular payload type + pub fn clock_rate_from_pt(&self, pt: u8) -> Option { + self.pt_map.get(&pt).copied() + } + + fn handle_ssrc_conflict(&mut self, addr: SocketAddr, now: Instant) -> bool { + if let Some(time) = self.conflicting_addresses.get_mut(&addr) { + trace!("ignoring looped packet from known collision address {addr:?}"); + *time = now; + false + } else { + trace!("New collision address {addr:?}"); + self.conflicting_addresses.insert(addr, now); + true + } + } + + /// Handle receiving an RTP packet. The [`RecvRecply`] return value outlines what the caller + /// must do with the packet. + pub fn handle_recv( + &mut self, + rtp: &RtpPacket, + from: Option, + now: Instant, + ) -> RecvReply { + trace!( + "receive rtp from:{from:?} at {now:?}, ssrc:{}, pt:{}, seqno:{}, rtp ts:{}, bytes:{}", + rtp.ssrc(), + rtp.payload_type(), + rtp.sequence_number(), + rtp.timestamp(), + rtp.payload().len() + ); + if let Some(addr) = from { + // handle possible collisions + if let Some(_source) = self.local_senders.get(&rtp.ssrc()) { + if self.handle_ssrc_conflict(addr, now) { + return RecvReply::SsrcCollision(rtp.ssrc()); + } else { + return RecvReply::Ignore; + } + } else if let Some(recv) = self.remote_receivers.remove(&rtp.ssrc()) { + let mut sender = recv.into_send(); + sender.set_rtp_from(from); + self.remote_senders.insert(rtp.ssrc(), sender); + } else if let Some(recv) = self.remote_senders.get_mut(&rtp.ssrc()) { + if let Some(from_addr) = recv.rtp_from() { + if addr != from_addr { + // this is favour old source behaviour + return RecvReply::Ignore; + } + } else { + recv.set_rtp_from(from); + } + } + } + + // TODO: handle CSRCs + + let clock_rate = self.clock_rate_from_pt(rtp.payload_type()); + + if let Some(source) = self.remote_senders.get_mut(&rtp.ssrc()) { + match source.recv_packet( + rtp.payload().len() as u32, + now, + rtp.sequence_number(), + rtp.timestamp(), + rtp.payload_type(), + clock_rate, + self.hold_buffer_counter, + ) { + SourceRecvReply::Hold(id) => { + self.hold_buffer_counter += 1; + RecvReply::Hold(id) + } + SourceRecvReply::Drop(id) => RecvReply::Drop(id), + SourceRecvReply::Ignore => RecvReply::Ignore, + SourceRecvReply::Forward(id) => RecvReply::Forward(id), + SourceRecvReply::Passthrough => RecvReply::Passthrough, + } + } else { + let mut source = RemoteSendSource::new(rtp.ssrc()); + source.set_rtp_from(from); + self.remote_senders.insert(rtp.ssrc(), source); + trace!("new receive ssrc:{}, pt:{}", rtp.ssrc(), rtp.payload_type()); + RecvReply::NewSsrc(rtp.ssrc(), rtp.payload_type()) + } + } + + /// Handle sending a RTP packet. The [`SendReply`] return value indicates what the caller + /// must do with this packet. + pub fn handle_send(&mut self, rtp: &RtpPacket, now: Instant) -> SendReply { + trace!( + "sending at {now:?} ssrc:{}, pt:{}, seqno:{}, rtp ts:{}, bytes:{}", + rtp.ssrc(), + rtp.payload_type(), + rtp.sequence_number(), + rtp.timestamp(), + rtp.payload().len() + ); + self.last_sent_data = Some(now); + + // handle possible collision + if let Some(source) = self.remote_senders.get(&rtp.ssrc()) { + if let Some(rtp_from) = source.rtp_from().or(source.rtcp_from()) { + if self.handle_ssrc_conflict(rtp_from, now) { + return SendReply::SsrcCollision(rtp.ssrc()); + } + } + return SendReply::Drop; + } else if let Some(source) = self.remote_receivers.get(&rtp.ssrc()) { + if let Some(rtcp_from) = source.rtcp_from() { + if self.handle_ssrc_conflict(rtcp_from, now) { + return SendReply::SsrcCollision(rtp.ssrc()); + } + } + return SendReply::Drop; + } + + if let Some(source) = self.local_senders.get_mut(&rtp.ssrc()) { + if source.state() != SourceState::Normal { + warn!( + "source {} is in state {:?}, dropping send", + source.ssrc(), + source.state() + ); + return SendReply::Drop; + } + source.set_last_activity(now); + if let Some(_clock_rate) = self.pt_map.get(&rtp.payload_type()) { + source.sent_packet( + rtp.payload().len(), + now, + rtp.sequence_number(), + rtp.timestamp(), + rtp.payload_type(), + ); + SendReply::Passthrough + } else { + trace!("no clock rate for pt:{}, dropping", rtp.payload_type()); + SendReply::Drop + } + } else { + self.local_receivers.remove_entry(&rtp.ssrc()); + let mut source = LocalSendSource::new(rtp.ssrc()); + source.set_last_activity(now); + source.set_state(SourceState::Normal); + for (k, v) in self.sdes.iter() { + source.set_sdes_item(*k, v.as_bytes()); + } + if self.local_senders.is_empty() && self.rtcp_reverse_consideration(0, now) { + // TODO: signal updated timeout + } + self.local_senders.insert(rtp.ssrc(), source); + info!("new send ssrc:{}, pt:{}", rtp.ssrc(), rtp.payload_type()); + SendReply::NewSsrc(rtp.ssrc(), rtp.payload_type()) + } + } + + fn update_rtcp_average(&mut self, additional_size: usize) { + if self.average_rtcp_size == 0 { + self.average_rtcp_size = additional_size; + } else { + self.average_rtcp_size = (additional_size + self.average_rtcp_size * 15) / 16; + } + } + + fn handle_rb( + &mut self, + sender_ssrc: u32, + rb: ReportBlock<'_>, + from: Option, + now: Instant, + ntp_time: SystemTime, + ) -> Option { + let mut ret = None; + if let Some(source) = self.local_senders.get_mut(&rb.ssrc()) { + source.add_last_rb(sender_ssrc, rb, now, ntp_time); + source.set_last_activity(now); + } else { + if let Some(source) = self.remote_receivers.remove(&rb.ssrc()) { + let sender = source.into_send(); + self.remote_senders.insert(rb.ssrc(), sender); + } + + let source = self.remote_senders.entry(rb.ssrc()).or_insert_with(|| { + ret = Some(RtcpRecvReply::NewSsrc(rb.ssrc())); + RemoteSendSource::new(rb.ssrc()) + }); + source.set_rtcp_from(from); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + source.add_last_rb(sender_ssrc, rb, now, ntp_time); + } + ret + } + + fn rtcp_reverse_consideration(&mut self, initial_n_members: usize, now: Instant) -> bool { + let n_members = self.n_members(); + if n_members >= self.p_members() { + trace!("rtcp reverse consideration not applied, n_members >= p_members"); + // this only applies if nmembers is less than pmembers + return false; + } + if let Some(ref mut prev) = self.next_rtcp_send.time { + if now > *prev { + trace!("rtcp reverse consideration not applied, last timeout in the past"); + // timer should have fired already, nothing to do + return false; + } + let dur = prev.saturating_duration_since(now); + if self.next_rtcp_send.p_members > 0 { + let member_factor = initial_n_members as f64 / self.next_rtcp_send.p_members as f64; + *prev = now + dur.mul_f64(member_factor); + self.next_rtcp_send.p_members = n_members; + if let Some(last_rtcp) = self.last_rtcp_sent_times.front_mut() { + let dur = last_rtcp.saturating_duration_since(now); + *last_rtcp = now - dur.mul_f64(member_factor); + } + trace!("rtcp reverse consideration applied"); + return true; + } + trace!("rtcp reverse consideration not applied, p_members <= 0"); + } else { + trace!("rtcp reverse consideration not applied, have not sent initial rtcp"); + } + false + } + + /// Handle receiving a RTCP packet. The returned [`RtcpRecvReply`]s indicates anything the + /// caller may need to handle. + pub fn handle_rtcp_recv( + &mut self, + rtcp: Compound, + rtcp_len: usize, + from: Option, + now: Instant, + ntp_time: SystemTime, + ) -> Vec { + trace!("Receive RTCP at {now:?}, ntp:{ntp_time:?}"); + // TODO: handle from: Option + let mut replies = vec![]; + + if self.bye_state.is_none() { + self.update_rtcp_average(rtcp_len + UDP_IP_OVERHEAD_BYTES); + } + + let mut reconsidered_timeout = false; + for (i, p) in rtcp.enumerate() { + trace!("recv rtcp {i}th packet: {p:?}"); + match p { + // TODO: actually handle App packets + Ok(Packet::App(_app)) => (), + Ok(Packet::Bye(bye)) => { + // https://datatracker.ietf.org/doc/html/rfc3550#section-6.3.4 + let n_members = self.n_members(); + let mut check_reconsideration = false; + for ssrc in bye.ssrcs() { + if let Some(source) = self.remote_senders.get_mut(&ssrc) { + source.set_rtcp_from(from); + source.set_last_activity(now); + source.set_state(SourceState::Bye); + check_reconsideration = true; + } else if let Some(source) = self.remote_receivers.get_mut(&ssrc) { + source.set_last_activity(now); + source.set_state(SourceState::Bye); + check_reconsideration = true; + } + // XXX: do we need to handle an unknown ssrc here? + // TODO: signal rtcp timeout needs recalcuating + } + if let Some(ref mut state) = self.bye_state { + state.members += 1; + let n_members = state.members; + self.update_rtcp_average(rtcp_len + UDP_IP_OVERHEAD_BYTES); + if check_reconsideration + && self.rtcp_reverse_consideration(n_members, now) + && !reconsidered_timeout + { + replies.push(RtcpRecvReply::TimerReconsideration); + reconsidered_timeout = true; + } + } else if check_reconsideration + && self.rtcp_reverse_consideration(n_members, now) + && !reconsidered_timeout + { + replies.push(RtcpRecvReply::TimerReconsideration); + reconsidered_timeout = true; + } + } + Ok(Packet::Rr(rr)) => { + if let Some(source) = self.remote_senders.remove(&rr.ssrc()) { + let receiver = source.into_receive(); + self.remote_receivers.insert(rr.ssrc(), receiver); + } + + let source = self.remote_receivers.entry(rr.ssrc()).or_insert_with(|| { + replies.push(RtcpRecvReply::NewSsrc(rr.ssrc())); + RemoteReceiveSource::new(rr.ssrc()) + }); + source.set_rtcp_from(from); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + + for rb in rr.report_blocks() { + if let Some(reply) = self.handle_rb(rr.ssrc(), rb, from, now, ntp_time) { + replies.push(reply); + } + } + } + Ok(Packet::Sr(sr)) => { + if let Some(addr) = from { + if self.local_senders.contains_key(&sr.ssrc()) + || self.local_receivers.contains_key(&sr.ssrc()) + { + if self.handle_ssrc_conflict(addr, now) { + replies.push(RtcpRecvReply::SsrcCollision(sr.ssrc())); + } + continue; + } + } + + if let Some(source) = self.remote_receivers.remove(&sr.ssrc()) { + let sender = source.into_send(); + self.remote_senders.insert(sr.ssrc(), sender); + } + + let source = self.remote_senders.entry(sr.ssrc()).or_insert_with(|| { + replies.push(RtcpRecvReply::NewSsrc(sr.ssrc())); + RemoteSendSource::new(sr.ssrc()) + }); + source.set_rtcp_from(from); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + source.set_last_received_sr( + ntp_time, + sr.ntp_timestamp().into(), + sr.rtp_timestamp(), + sr.octet_count(), + sr.packet_count(), + ); + + for rb in sr.report_blocks() { + if let Some(reply) = self.handle_rb(sr.ssrc(), rb, from, now, ntp_time) { + replies.push(reply); + } + } + } + Ok(Packet::Sdes(sdes)) => { + for chunk in sdes.chunks() { + for item in chunk.items() { + if let Some(addr) = from { + if self.local_senders.contains_key(&chunk.ssrc()) + || self.local_receivers.contains_key(&chunk.ssrc()) + { + if self.handle_ssrc_conflict(addr, now) { + replies.push(RtcpRecvReply::SsrcCollision(chunk.ssrc())); + } + continue; + } + } + if !matches!( + item.type_(), + SdesItem::CNAME + | SdesItem::NAME + | SdesItem::EMAIL + | SdesItem::PHONE + | SdesItem::LOC + | SdesItem::TOOL + | SdesItem::NOTE + ) { + // FIXME: handle unknown sdes items + continue; + } + if let Some(source) = self.remote_senders.get_mut(&chunk.ssrc()) { + source.set_rtcp_from(from); + source.received_sdes(item.type_(), item.value()); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + } else { + let source = self + .remote_receivers + .entry(chunk.ssrc()) + .or_insert_with(|| { + replies.push(RtcpRecvReply::NewSsrc(chunk.ssrc())); + RemoteReceiveSource::new(chunk.ssrc()) + }); + source.set_rtcp_from(from); + source.received_sdes(item.type_(), item.value()); + source.set_state(SourceState::Normal); + source.set_last_activity(now); + } + } + } + } + Ok(Packet::Unknown(_unk)) => (), + Err(_) => (), + } + } + replies + } + + fn generate_sr<'a>( + &mut self, + mut rtcp: CompoundBuilder<'a>, + now: Instant, + ntp_now: SystemTime, + ) -> CompoundBuilder<'a> { + let ntp_time = system_time_to_ntp_time_u64(ntp_now); + if self + .local_senders + .values() + .any(|source| match source.state() { + SourceState::Normal => true, + SourceState::Probation(_) => false, + SourceState::Bye => source.bye_sent_time().is_none(), + }) + { + let mut sender_srs = vec![]; + for sender in self.local_senders.values() { + if sender.state() != SourceState::Normal { + continue; + } + if sender.state() == SourceState::Bye && sender.bye_sent_time().is_some() { + continue; + } + // pick one of the sender ssrc's if we are going to + if self.internal_rtcp_sender_src.is_none() { + self.internal_rtcp_sender_src = Some(sender.ssrc()); + } + // get last rtp sent timestamp + let rtp_timestamp = sender + .last_rtp_sent_timestamp() + .map(|(last_rtp_ts, instant)| { + let dur_since_last_rtp = now.duration_since(instant); + trace!("last_rtp_ts: {last_rtp_ts}, dur since last rtp: {dur_since_last_rtp:?}"); + // get the clock-rate for this source + last_rtp_ts + sender + .payload_type() + .and_then(|pt| self.clock_rate_from_pt(pt)) + .and_then(|clock_rate| { + // assume that the rtp times and clock times advance at a rate + // close to 1.0 and do a direct linear extrapolation to get the rtp + // time for 'now' + trace!("clock-rate {clock_rate}"); + (dur_since_last_rtp.as_nanos() as u64).mul_div_round( + clock_rate as u64, + gst::ClockTime::SECOND.nseconds(), + ).map(|v| ((v & 0xffff_ffff) as u32)) + }) + .unwrap_or(0) + }) + .unwrap_or(0); + + let mut sr = SenderReport::builder(sender.ssrc()) + .packet_count((sender.packet_count() & 0xffff_ffff) as u32) + .octet_count((sender.octet_count() & 0xffff_ffff) as u32) + .ntp_timestamp(ntp_time.as_u64()) + .rtp_timestamp(rtp_timestamp); + + sender_srs.push((sender.ssrc(), ntp_now, ntp_time, rtp_timestamp)); + + for sender in self.remote_senders.values() { + if sender.state() != SourceState::Normal { + continue; + } + let rb = sender.generate_report_block(ntp_now); + sr = sr.add_report_block(rb.into()); + } + rtcp = rtcp.add_packet(sr); + } + for (ssrc, ntp_now, ntp_time, rtp_timestamp) in sender_srs { + self.local_senders + .entry(ssrc) + .and_modify(|sender| sender.take_sr_snapshot(ntp_now, ntp_time, rtp_timestamp)); + } + } + rtcp + } + + fn have_ssrc(&self, ssrc: u32) -> bool { + self.local_senders.contains_key(&ssrc) + || self.local_receivers.contains_key(&ssrc) + || self.remote_senders.contains_key(&ssrc) + || self.remote_receivers.contains_key(&ssrc) + } + + pub fn internal_ssrc(&self) -> Option { + self.internal_rtcp_sender_src + } + + fn ensure_internal_send_src(&mut self) -> u32 { + match self.internal_rtcp_sender_src { + Some(ssrc) => ssrc, + None => loop { + let ssrc = generate_ssrc(); + if !self.have_ssrc(ssrc) { + let mut source = LocalReceiveSource::new(ssrc); + source.set_state(SourceState::Normal); + for (k, v) in self.sdes.iter() { + source.set_sdes_item(*k, v.as_bytes()); + } + self.local_receivers.insert(ssrc, source); + self.internal_rtcp_sender_src = Some(ssrc); + return ssrc; + } + }, + } + } + + fn generate_rr<'a>( + &mut self, + mut rtcp: CompoundBuilder<'a>, + now: Instant, + ntp_now: SystemTime, + ) -> CompoundBuilder<'a> { + if self + .local_senders + .values() + .all(|source| match source.state() { + SourceState::Normal => false, + SourceState::Probation(_) => true, + SourceState::Bye => source.bye_sent_time().is_some(), + }) + { + let ssrc = self.ensure_internal_send_src(); + self.local_senders + .entry(ssrc) + .and_modify(|source| source.set_last_activity(now)); + self.local_receivers + .entry(ssrc) + .and_modify(|source| source.set_last_activity(now)); + let mut rr = ReceiverReport::builder(ssrc); + for sender in self.remote_senders.values() { + if sender.state() != SourceState::Normal { + continue; + } + let rb = sender.generate_report_block(ntp_now); + rr = rr.add_report_block(rb.into()); + } + rtcp = rtcp.add_packet(rr); + } + rtcp + } + + fn generate_sdes<'a>(&self, rtcp: CompoundBuilder<'a>) -> CompoundBuilder<'a> { + let mut sdes = Sdes::builder(); + let mut have_chunk = false; + if !self.local_senders.is_empty() { + for sender in self.local_senders.values() { + let sdes_map = sender.sdes(); + if !sdes_map.is_empty() { + let mut chunk = SdesChunk::builder(sender.ssrc()); + for (ty, val) in sdes_map { + chunk = chunk.add_item_owned(SdesItem::builder(*ty, val)); + } + have_chunk = true; + sdes = sdes.add_chunk(chunk); + } + } + } + for receiver in self.local_receivers.values() { + let sdes_map = receiver.sdes(); + if !sdes_map.is_empty() { + let mut chunk = SdesChunk::builder(receiver.ssrc()); + for (ty, val) in sdes_map { + chunk = chunk.add_item_owned(SdesItem::builder(*ty, val)); + } + have_chunk = true; + sdes = sdes.add_chunk(chunk); + } + } + if have_chunk { + rtcp.add_packet(sdes) + } else { + rtcp + } + } + + fn find_bye_sources(&mut self) -> HashMap> { + let mut reason_ssrcs = HashMap::new(); + for source in self + .local_senders + .values_mut() + .filter(|source| source.state() == SourceState::Bye) + { + if source.bye_sent_time().is_none() { + let reason = source + .bye_reason() + .cloned() + .unwrap_or_else(|| String::from("Bye")); + let ssrcs = reason_ssrcs.entry(reason).or_insert_with(Vec::new); + ssrcs.push(source.ssrc()); + } + } + for source in self + .local_receivers + .values_mut() + .filter(|source| source.state() == SourceState::Bye) + { + if source.bye_sent_time().is_none() { + let reason = source + .bye_reason() + .cloned() + .unwrap_or_else(|| String::from("Bye")); + let ssrcs = reason_ssrcs.entry(reason).or_insert_with(Vec::new); + ssrcs.push(source.ssrc()); + } + } + reason_ssrcs + } + + fn generate_bye<'a>( + &mut self, + mut rtcp: CompoundBuilder<'a>, + now: Instant, + ) -> CompoundBuilder<'a> { + let bye_reason_ssrcs = self.find_bye_sources(); + if !bye_reason_ssrcs.is_empty() { + for (reason, ssrcs) in bye_reason_ssrcs.iter() { + let mut bye = Bye::builder().reason_owned(reason); + for ssrc in ssrcs.iter() { + bye = bye.add_source(*ssrc); + if let Some(source) = self.local_senders.get_mut(ssrc) { + source.bye_sent_at(now); + } else if let Some(source) = self.local_receivers.get_mut(ssrc) { + source.bye_sent_at(now); + } + } + rtcp = rtcp.add_packet(bye); + } + } + rtcp + } + + // RFC 3550 6.3.5 + fn handle_timeouts(&mut self, now: Instant) { + trace!("handling rtcp timeouts"); + let td = RTCP_SOURCE_TIMEOUT_N_INTERVALS * self.deterministic_rtcp_duration(false); + + // delete all sources that are too old + self.local_receivers + .retain(|_ssrc, source| now - source.last_activity() < td); + self.remote_senders + .retain(|_ssrc, source| now - source.last_activity() < td); + self.remote_receivers + .retain(|_ssrc, source| now - source.last_activity() < td); + + // There is a SHOULD about performing RTCP reverse timer consideration here if any sources + // were timed out, however we are here before calculating the next rtcp timeout so are + // covered already with a changing number of members. + // If we call this outside of rtcp handling, then rtcp_reverse_consideration() would need to + // be called. + + // switch senders that haven't sent in a while to receivers + if self.last_rtcp_sent_times.len() >= 2 { + let two_rtcp_ago = *self.last_rtcp_sent_times.back().unwrap(); + let removed_senders = self + .local_senders + .iter() + .filter_map(|(&ssrc, source)| { + trace!( + "now: {now:?}, last activity: {:?} two_rtcp_ago: {:?}", + source.last_activity(), + two_rtcp_ago + ); + if source.last_activity() < two_rtcp_ago { + Some(ssrc) + } else { + None + } + }) + .inspect(|source| trace!("ssrc {source} has become a receiver")) + .collect::>(); + + for ssrc in removed_senders { + if let Some(source) = self.local_senders.remove(&ssrc) { + let new_source = source.into_receive(); + self.local_receivers.insert(new_source.ssrc(), new_source); + } + } + } + + // remove outdated conflicting addresses + self.conflicting_addresses + .retain(|_addr, time| now - *time < RTCP_ADDRESS_CONFLICT_TIMEOUT); + } + + /// Produce a RTCP packet (or None if it is too early to send a RTCP packet). After this call returns + /// a packet, the next time to send a RTCP packet can be retrieved from `poll_rtcp_send_timeout` + // TODO: return RtcpPacketBuilder thing + pub fn poll_rtcp_send(&mut self, now: Instant, ntp_now: SystemTime) -> Option> { + let Some(next_rtcp_send) = self.next_rtcp_send.time else { + return None; + }; + if now < next_rtcp_send { + return None; + } + + trace!("generating rtcp packet at {now:?}, ntp:{ntp_now:?}"); + + let data = { + let mut rtcp = Compound::builder(); + + // TODO: implement round robin of sr/rrs + rtcp = self.generate_sr(rtcp, now, ntp_now); + rtcp = self.generate_rr(rtcp, now, ntp_now); + rtcp = self.generate_sdes(rtcp); + rtcp = self.generate_bye(rtcp, now); + + let size = rtcp.calculate_size().unwrap(); + // TODO: handle dropping data + assert!(size < RTCP_MTU); + let mut data = vec![0; size]; + rtcp.write_into(&mut data).unwrap(); + data + }; + + for receiver in self.remote_senders.values_mut() { + receiver.update_last_rtcp(); + } + + self.update_rtcp_average(data.len() + UDP_IP_OVERHEAD_BYTES); + + self.handle_timeouts(now); + + self.next_rtcp_send = RtcpTimeMembers { + time: Some(self.next_rtcp_time(now)), + p_members: self.n_members(), + }; + self.last_rtcp_sent_times.push_front(now); + while self.last_rtcp_sent_times.len() > 2 { + self.last_rtcp_sent_times.pop_back(); + } + self.bye_state = None; + Some(data) + } + + /// Returns the next time to send a RTCP packet. + pub fn poll_rtcp_send_timeout(&mut self, now: Instant) -> Option { + if self.next_rtcp_send.time.is_none() { + self.next_rtcp_send = RtcpTimeMembers { + time: Some(self.next_rtcp_time(now)), + p_members: self.n_members(), + }; + } + self.next_rtcp_send.time + } + + fn deterministic_rtcp_duration(&self, we_sent: bool) -> Duration { + let n_senders = self.n_senders() as u64; + let n_members = self.n_members() as u64; + let session_bandwidth = self.session_bandwidth(); + // 5% of the session bandwidth, or the minimum of 400B/s + let rtcp_bw = (session_bandwidth / 20).max(RTCP_MIN_BANDWIDTH); + + let (n, rtcp_bw) = if n_senders * 4 <= n_members { + if we_sent { + (n_senders, rtcp_bw / 4) + } else { + (n_members - n_senders, rtcp_bw / 4 * 3) + } + } else { + (n_members, rtcp_bw) + }; + + let min_rtcp_interval = if !self.last_rtcp_sent_times.is_empty() && self.bye_state.is_none() + { + self.min_rtcp_interval + } else { + self.min_rtcp_interval / 2 + }; + + // 1_000_000_000 / (e-1.5) + let compensation_ns = 820_829_366; + let t_nanos = (compensation_ns + .mul_div_round(self.average_rtcp_size as u64 * n, rtcp_bw as u64)) + .unwrap() + .max(min_rtcp_interval.as_nanos() as u64); + trace!("deterministic rtcp interval {t_nanos}ns"); + Duration::from_nanos(t_nanos) + } + + fn session_bandwidth(&self) -> usize { + // TODO: allow to be externally provided + self.local_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .map(|source| source.bitrate()) + .sum::() + + self + .remote_senders + .values() + .filter(|source| source.state() == SourceState::Normal) + .map(|source| source.bitrate()) + .sum::() + } + + fn calculated_rtcp_duration(&self, we_sent: bool) -> Duration { + let dur = self.deterministic_rtcp_duration(we_sent); + + let mut rng = rand::thread_rng(); + // need a factor in [0.5, 1.5] + let factor = rng.gen::(); + dur.mul_f64(factor + 0.5) + } + + pub fn schedule_bye(&mut self, reason: &str, now: Instant) { + if self.bye_state.is_some() { + return; + } + + if self.n_members() <= 50 { + return; + } + + for source in self.local_senders.values_mut() { + source.mark_bye(reason); + } + for source in self.local_receivers.values_mut() { + source.mark_bye(reason); + } + + self.bye_state = Some(ByeState { + members: 1, + pmembers: 1, + }); + // tp is reset to tc + self.last_rtcp_sent_times = VecDeque::new(); + self.last_rtcp_sent_times.push_front(now); + // FIXME: use actual BYE packet size + self.average_rtcp_size = 100; + self.next_rtcp_send = RtcpTimeMembers { + time: Some(self.next_rtcp_time(now)), + p_members: self.n_members(), + }; + } + + fn next_rtcp_time(&self, now: Instant) -> Instant { + now + self + .calculated_rtcp_duration(!self.local_senders.is_empty() && self.bye_state.is_none()) + } + + /// Retrieve a list of all ssrc's currently handled by this session + pub fn ssrcs(&self) -> impl Iterator + '_ { + self.local_senders + .keys() + .chain(self.remote_senders.keys()) + .chain(self.local_receivers.keys()) + .chain(self.remote_receivers.keys()) + .cloned() + } + + /// Retrieve a local send source by ssrc + pub fn local_send_source_by_ssrc(&self, ssrc: u32) -> Option<&LocalSendSource> { + self.local_senders.get(&ssrc) + } + + /// Retrieve a local receive source by ssrc + pub fn local_receive_source_by_ssrc(&self, ssrc: u32) -> Option<&LocalReceiveSource> { + self.local_receivers.get(&ssrc) + } + + /// Retrieve a remote send source by ssrc + pub fn remote_send_source_by_ssrc(&self, ssrc: u32) -> Option<&RemoteSendSource> { + self.remote_senders.get(&ssrc) + } + + /// Retrieve a remote receive source by ssrc + pub fn remote_receive_source_by_ssrc(&self, ssrc: u32) -> Option<&RemoteReceiveSource> { + self.remote_receivers.get(&ssrc) + } + + pub fn mut_local_send_source_by_ssrc(&mut self, ssrc: u32) -> Option<&mut LocalSendSource> { + self.local_senders.get_mut(&ssrc) + } + + #[cfg(test)] + fn mut_remote_sender_source_by_ssrc(&mut self, ssrc: u32) -> Option<&mut RemoteSendSource> { + self.remote_senders.get_mut(&ssrc) + } +} + +fn generate_cname() -> String { + let mut rng = rand::thread_rng(); + let user = rng.gen::(); + let host = rng.gen::(); + format!("user{user}@{host:#}") +} + +fn generate_ssrc() -> u32 { + let mut rng = rand::thread_rng(); + rng.gen::() +} + +#[cfg(test)] +pub(crate) mod tests { + use rtp_types::RtpPacketBuilder; + + use crate::rtpbin2::time::NtpTime; + + use super::*; + + pub(crate) fn init_logs() { + let _ = gst::init(); + use crate::rtpbin2::imp::GstRustLogger; + GstRustLogger::install(); + } + + const TEST_PT: u8 = 96; + const TEST_CLOCK_RATE: u32 = 90000; + + #[test] + fn receive_probation() { + init_logs(); + let mut session = Session::new(); + let from = "127.0.0.1:1000".parse().unwrap(); + let now = Instant::now(); + let mut held = vec![]; + for seq_no in 0..5 { + let mut rtp_data = [0; 128]; + let len = RtpPacketBuilder::new() + .payload_type(TEST_PT) + .ssrc(0x12345678) + .sequence_number(seq_no) + .write_into(&mut rtp_data) + .unwrap(); + let rtp_data = &rtp_data[..len]; + let packet = RtpPacket::parse(rtp_data).unwrap(); + let mut ret = session.handle_recv(&packet, Some(from), now); + match seq_no { + // probation + 0 => { + if let RecvReply::NewSsrc(ssrc, pt) = ret { + assert_eq!(ssrc, 0x12345678); + assert_eq!(pt, TEST_PT); + if let RecvReply::Hold(id) = session.handle_recv(&packet, Some(from), now) { + held.push(id); + } else { + unreachable!(); + } + } else { + unreachable!(); + } + } + 1 => { + while let RecvReply::Forward(id) = ret { + let pos = held.iter().position(|&held_id| held_id == id).unwrap(); + held.remove(pos); + ret = session.handle_recv(&packet, Some(from), now); + } + assert!(held.is_empty()); + assert_eq!(ret, RecvReply::Passthrough); + } + 2..=4 => { + assert_eq!(ret, RecvReply::Passthrough) + } + _ => unreachable!(), + } + } + } + + fn generate_rtp_packet(ssrc: u32, seq_no: u16, rtp_ts: u32, payload_len: usize) -> Vec { + init_logs(); + let mut rtp_data = [0; 128]; + let payload = vec![1; payload_len]; + let len = RtpPacketBuilder::new() + .payload_type(TEST_PT) + .ssrc(ssrc) + .sequence_number(seq_no) + .timestamp(rtp_ts) + .payload(&payload) + .write_into(&mut rtp_data) + .unwrap(); + rtp_data[..len].to_vec() + } + + fn increment_rtcp_times( + old_now: Instant, + new_now: Instant, + ntp_now: SystemTime, + ) -> (Instant, SystemTime) { + (new_now, ntp_now + new_now.duration_since(old_now)) + } + + #[test] + fn send_new_ssrc() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let rtp_data = generate_rtp_packet(0x12345678, 100, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(0x12345678, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + } + + fn session_recv_first_packet_disable_probation( + session: &mut Session, + packet: &RtpPacket<'_>, + now: Instant, + ) { + assert_eq!( + session.handle_recv(packet, None, now), + RecvReply::NewSsrc(packet.ssrc(), packet.payload_type()) + ); + let src = session + .mut_remote_sender_source_by_ssrc(packet.ssrc()) + .unwrap(); + src.set_probation_packets(0); + } + + #[test] + fn receive_disable_probation() { + init_logs(); + let mut session = Session::new(); + let now = Instant::now(); + let rtp_data = generate_rtp_packet(0x12345678, 100, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + } + + #[test] + fn receive_two_ssrc_rr() { + init_logs(); + let mut session = Session::new(); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrcs = [0x12345678, 0x87654321]; + + let rtp_data = generate_rtp_packet(ssrcs[0], 100, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let rtp_data = generate_rtp_packet(ssrcs[1], 200, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + assert_eq!( + session.handle_recv( + &RtpPacket::parse(&generate_rtp_packet(ssrcs[1], 207, 0, 4)).unwrap(), + None, + now + ), + RecvReply::Passthrough + ); + + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut n_rb_ssrcs = 0; + let mut found_sdes_cname = false; + let mut sdes_ssrc = None; + let mut rr_ssrc = None; + + for p in rtcp { + match p { + Ok(Packet::Rr(rr)) => { + rr_ssrc = Some(rr.ssrc()); + for rb in rr.report_blocks() { + if ssrcs.contains(&rb.ssrc()) { + n_rb_ssrcs += 1; + } + match rb.ssrc() { + 0x12345678 => { + assert_eq!(rb.extended_sequence_number() & 0xffff, 100); + assert_eq!(rb.cumulative_lost(), 0xFFFFFF); // -1 in 24-bit + assert_eq!(rb.fraction_lost(), 0); + } + 0x87654321 => { + assert_eq!(rb.extended_sequence_number() & 0xffff, 207); + assert_eq!(rb.cumulative_lost(), 6); + assert_eq!(rb.fraction_lost(), 182); + } + _ => unreachable!(), + } + } + } + Ok(Packet::Sdes(sdes)) => { + for chunk in sdes.chunks() { + sdes_ssrc = Some(chunk.ssrc()); + for item in chunk.items() { + if item.type_() == SdesItem::CNAME { + found_sdes_cname = true; + } else { + unreachable!(); + } + } + } + } + _ => unreachable!("{p:?}"), + } + } + assert_eq!(n_rb_ssrcs, ssrcs.len()); + assert!(found_sdes_cname); + assert_eq!(sdes_ssrc, rr_ssrc); + } + + #[test] + fn send_two_ssrc_sr() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrcs = [0x12345678, 0x87654321]; + + // generate packets at the 'same time' as rtcp so some calculated timestamps will match + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + + let rtp_data = generate_rtp_packet(ssrcs[0], 100, 4, 8); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrcs[0], 96) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let rtp_data = generate_rtp_packet(ssrcs[1], 200, 4, 8); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrcs[1], TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut n_rb_ssrcs = 0; + for p in rtcp { + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 0); + if ssrcs.contains(&sr.ssrc()) { + n_rb_ssrcs += 1; + } + // we sent 1 packet on each ssrc, rtcp should reflect that + assert_eq!(sr.packet_count(), 1); + assert_eq!(sr.octet_count() as usize, 8); + assert_eq!( + sr.ntp_timestamp(), + system_time_to_ntp_time_u64(ntp_now).as_u64() + ); + assert_eq!(sr.rtp_timestamp(), 4); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + assert_eq!(n_rb_ssrcs, ssrcs.len()); + } + + #[test] + fn receive_two_ssrc_sr() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrcs = [0x12345678, 0x87654321]; + + let rtp_data = generate_rtp_packet(ssrcs[0], 100, 4, 8); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let rtp_data = generate_rtp_packet(ssrcs[1], 200, 20, 12); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet( + SenderReport::builder(ssrcs[0]) + .ntp_timestamp(system_time_to_ntp_time_u64(ntp_now).as_u64()) + .packet_count(1) + .octet_count(8) + .rtp_timestamp(4), + ) + .add_packet( + SenderReport::builder(ssrcs[1]) + .ntp_timestamp(system_time_to_ntp_time_u64(ntp_now).as_u64()) + .packet_count(2) + .octet_count(24) + .rtp_timestamp(20), + ) + .write_into(&mut data) + .unwrap(); + let data = &data[..len]; + let rtcp = Compound::parse(data).unwrap(); + + assert_eq!( + session.handle_rtcp_recv(rtcp, len, None, now, ntp_now), + vec![] + ); + + // generate packets at the 'same time' as rtcp so some calculated timestamps will match + let (new_now, new_ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + + let rtcp_data = session.poll_rtcp_send(new_now, new_ntp_now).unwrap(); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + for p in rtcp { + match p { + Ok(Packet::Rr(rr)) => { + assert_eq!(rr.n_reports(), 2); + let mut rb_ssrcs = rr.report_blocks().map(|rb| rb.ssrc()).collect::>(); + rb_ssrcs.sort(); + assert_eq!(rb_ssrcs, &ssrcs); + for rb in rr.report_blocks() { + assert_eq!( + rb.last_sender_report_timestamp(), + system_time_to_ntp_time_u64(ntp_now).as_u32() + ); + assert_eq!( + rb.delay_since_last_sender_report_timestamp(), + NtpTime::from_duration(new_ntp_now.duration_since(ntp_now).unwrap()) + .as_u32() + ); + if rb.ssrc() == ssrcs[0] { + assert_eq!(rb.extended_sequence_number() & 0xffff, 100); + } else if rb.ssrc() == ssrcs[1] { + assert_eq!(rb.extended_sequence_number() & 0xffff, 200); + } else { + unreachable!() + } + } + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + } + + #[test] + fn send_receiver_two_ssrc_sr_rr() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + session.set_min_rtcp_interval(Duration::from_secs(1)); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrcs = [0x12345678, 0x87654321]; + + // get the next rtcp packet times and send at the same time + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + + // send from two ssrcs + let rtp_data = generate_rtp_packet(ssrcs[0], 100, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrcs[0], TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let rtp_data = generate_rtp_packet(ssrcs[1], 200, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrcs[1], TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + trace!("rtcp data {rtcp_data:?}"); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut n_sr_ssrcs = 0; + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + // no reports as there are no receivers + assert_eq!(sr.n_reports(), 0); + if ssrcs.contains(&sr.ssrc()) { + n_sr_ssrcs += 1; + } + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + assert_eq!(n_sr_ssrcs, ssrcs.len()); + + let recv_ssrcs = [0x11223344, 0xFFEEDDCC]; + + let rtp_data = generate_rtp_packet(recv_ssrcs[0], 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + let rtp_data = generate_rtp_packet(recv_ssrcs[1], 600, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + // get the next rtcp packet + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + trace!("rtcp data {rtcp_data:?}"); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut n_sr_ssrcs = 0; + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 2); + if ssrcs.contains(&sr.ssrc()) { + n_sr_ssrcs += 1; + } + let mut rb_ssrcs = sr.report_blocks().map(|rb| rb.ssrc()).collect::>(); + rb_ssrcs.sort(); + assert_eq!(rb_ssrcs, recv_ssrcs); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + assert_eq!(n_sr_ssrcs, ssrcs.len()); + } + + #[test] + fn session_internal_sender_ssrc() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let recv_ssrc = 0x11223344; + + let rtp_data = generate_rtp_packet(recv_ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + // get the next rtcp packet + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Rr(rr)) => { + // no reports as there are no receivers + assert_eq!(rr.n_reports(), 1); + let mut rb_ssrcs = rr.report_blocks().map(|rb| rb.ssrc()).collect::>(); + rb_ssrcs.sort(); + assert_eq!(rb_ssrcs, &[recv_ssrc]); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + } + + #[test] + fn sender_source_timeout() { + init_logs(); + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x12345678; + + let rtp_data = generate_rtp_packet(ssrc, 200, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + // get the next rtcp packet + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 0); + assert_eq!(sr.ssrc(), ssrc); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + + let mut seen_rr = false; + for _ in 0..=5 { + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 0); + assert_eq!(sr.ssrc(), ssrc); + } + Ok(Packet::Rr(rr)) => { + assert_eq!(rr.ssrc(), ssrc); + seen_rr |= true; + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + } + assert!(seen_rr); + } + + #[test] + fn ignore_recv_bye_for_local_sender() { + // test that receiving a BYE for our (local) senders is ignored + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + // get the next rtcp packet + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + let rtcp = Compound::parse(&rtcp_data).unwrap(); + + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Sr(sr)) => { + assert_eq!(sr.n_reports(), 0); + assert_eq!(sr.ssrc(), ssrc); + } + Ok(Packet::Sdes(_)) => (), + _ => unreachable!(), + } + } + + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet(Bye::builder().add_source(ssrc)) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + + assert_eq!( + session.handle_rtcp_recv(rtcp, len, None, now, ntp_now), + vec![] + ); + let source = session.mut_local_send_source_by_ssrc(ssrc).unwrap(); + assert_eq!(source.state(), SourceState::Normal); + } + + #[test] + fn ssrc_collision_on_send() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + let from = "127.0.0.1:8080".parse().unwrap(); + + // add remote ssrc + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet(Sdes::builder().add_chunk( + SdesChunk::builder(ssrc).add_item(SdesItem::builder(SdesItem::CNAME, "cname")), + )) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, Some(from), now, ntp_now), + vec![RtcpRecvReply::NewSsrc(ssrc)] + ); + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::SsrcCollision(ssrc) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Drop); + + // add ssrc as if our packets are being looped. As we have already discovered the + // conflicting address, these looped packets should be dropped. + let new_ssrc = 0x44332211; + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet(Sdes::builder().add_chunk( + SdesChunk::builder(new_ssrc).add_item(SdesItem::builder(SdesItem::CNAME, "cname")), + )) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, Some(from), now, ntp_now), + vec![RtcpRecvReply::NewSsrc(new_ssrc)] + ); + + let rtp_data = generate_rtp_packet(new_ssrc, 510, 10, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!(session.handle_send(&packet, now), SendReply::Drop); + } + + #[test] + fn ssrc_collision_on_recv() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + let from = "127.0.0.1:8080".parse().unwrap(); + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + let mut data = vec![0; 128]; + let len = Compound::builder() + .add_packet(Sdes::builder().add_chunk( + SdesChunk::builder(ssrc).add_item(SdesItem::builder(SdesItem::CNAME, "cname")), + )) + .write_into(&mut data) + .unwrap(); + let rtcp = Compound::parse(&data[..len]).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, Some(from), now, ntp_now), + vec![RtcpRecvReply::SsrcCollision(ssrc)] + ); + } + + #[test] + fn ssrc_collision_third_party() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ssrc = 0x11223344; + let from1 = "127.0.0.1:8080".parse().unwrap(); + let from2 = "127.0.0.2:8080".parse().unwrap(); + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, Some(from1), now), + RecvReply::Passthrough + ); + + // packet from a different address should be dropped as a third party collision + assert_eq!( + session.handle_recv(&packet, Some(from2), now), + RecvReply::Ignore + ); + + // packet from a original address should still succeed + assert_eq!( + session.handle_recv(&packet, Some(from1), now), + RecvReply::Passthrough + ); + } + + #[test] + fn bye_remote_sender() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + session_recv_first_packet_disable_probation(&mut session, &packet, now); + assert_eq!( + session.handle_recv(&packet, None, now), + RecvReply::Passthrough + ); + + // send initial rtcp + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + let _rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + + let rtcp = Compound::builder().add_packet(Bye::builder().add_source(ssrc)); + let mut data = vec![0; 128]; + let len = rtcp.write_into(&mut data).unwrap(); + let data = &data[..len]; + + let rtcp = Compound::parse(data).unwrap(); + assert_eq!( + session.handle_rtcp_recv(rtcp, len, None, now, ntp_now), + vec![RtcpRecvReply::TimerReconsideration] + ); + let source = session.mut_remote_sender_source_by_ssrc(ssrc).unwrap(); + assert_eq!(source.state(), SourceState::Bye); + } + + #[test] + fn bye_local_sender() { + let mut session = Session::new(); + session.set_pt_clock_rate(TEST_PT, TEST_CLOCK_RATE); + let now = Instant::now(); + let ntp_now = SystemTime::now(); + let ssrc = 0x11223344; + + let rtp_data = generate_rtp_packet(ssrc, 500, 0, 4); + let packet = RtpPacket::parse(&rtp_data).unwrap(); + assert_eq!( + session.handle_send(&packet, now), + SendReply::NewSsrc(ssrc, TEST_PT) + ); + assert_eq!(session.handle_send(&packet, now), SendReply::Passthrough); + + // send initial rtcp + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + let _rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + + let source = session.mut_local_send_source_by_ssrc(ssrc).unwrap(); + source.mark_bye("Cya"); + assert_eq!(source.state(), SourceState::Bye); + + // data after bye should be dropped + assert_eq!(session.handle_send(&packet, now), SendReply::Drop); + + let (now, ntp_now) = + increment_rtcp_times(now, session.poll_rtcp_send_timeout(now).unwrap(), ntp_now); + let rtcp_data = session.poll_rtcp_send(now, ntp_now).unwrap(); + + let rtcp = Compound::parse(&rtcp_data).unwrap(); + let mut received_bye = false; + for p in rtcp { + trace!("{p:?}"); + match p { + Ok(Packet::Bye(bye)) => { + assert_eq!(bye.reason(), Some(b"Cya".as_ref())); + assert_eq!(bye.ssrcs().next(), Some(ssrc)); + // bye must not be followed by any other packets + received_bye = true; + } + Ok(Packet::Sdes(_sdes)) => { + assert!(!received_bye); + } + _ => unreachable!(), + } + } + assert!(received_bye); + } +} diff --git a/net/rtp/src/rtpbin2/source.rs b/net/rtp/src/rtpbin2/source.rs new file mode 100644 index 00000000..9f8c33ab --- /dev/null +++ b/net/rtp/src/rtpbin2/source.rs @@ -0,0 +1,1203 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + collections::{HashMap, VecDeque}, + net::SocketAddr, + time::{Duration, Instant, SystemTime}, +}; + +use rtcp_types::{ReportBlock, ReportBlockBuilder}; + +use super::time::{system_time_to_ntp_time_u64, NtpTime}; + +use gst::prelude::MulDiv; + +pub const DEFAULT_PROBATION_N_PACKETS: usize = 2; +pub const DEFAULT_MAX_DROPOUT: u32 = 3000; +pub const DEFAULT_MAX_MISORDER: u32 = 100; + +const BITRATE_WINDOW: Duration = Duration::from_secs(3); + +#[derive(Debug, Clone, Copy)] +pub struct Rb { + ssrc: u32, + /// fraction out of 256 of packets lost since the last Rb + fraction_lost: u8, + /// signed 24-bit number of expected packets - received packets (including duplicates and late + /// packets) + cumulative_lost: u32, + extended_sequence_number: u32, + /// jitter in clock rate units + jitter: u32, + /// 16.16 fixed point ntp time + last_sr: u32, + /// 16.16 fixed point ntp duration + delay_since_last_sr: u32, +} + +impl Rb { + pub fn fraction_lost(&self) -> u8 { + self.fraction_lost + } + + pub fn cumulative_lost(&self) -> i32 { + if self.cumulative_lost & 0x800000 > 0 { + -((self.cumulative_lost & 0x7fffff) as i32) + } else { + self.cumulative_lost as i32 + } + } + + pub fn extended_sequence_number(&self) -> u32 { + self.extended_sequence_number + } + + pub fn jitter(&self) -> u32 { + self.jitter + } + + pub fn last_sr_ntp_time(&self) -> u32 { + self.last_sr + } + + pub fn delay_since_last_sr(&self) -> u32 { + self.delay_since_last_sr + } +} + +impl<'a> From> for Rb { + fn from(value: ReportBlock) -> Self { + Self { + ssrc: value.ssrc(), + fraction_lost: value.fraction_lost(), + cumulative_lost: value.cumulative_lost(), + extended_sequence_number: value.extended_sequence_number(), + jitter: value.interarrival_jitter(), + last_sr: value.last_sender_report_timestamp(), + delay_since_last_sr: value.delay_since_last_sender_report_timestamp(), + } + } +} + +impl From for ReportBlockBuilder { + fn from(value: Rb) -> Self { + ReportBlock::builder(value.ssrc) + .fraction_lost(value.fraction_lost) + .cumulative_lost(value.cumulative_lost) + .extended_sequence_number(value.extended_sequence_number) + .interarrival_jitter(value.jitter) + .last_sender_report_timestamp(value.last_sr) + .delay_since_last_sender_report_timestamp(value.delay_since_last_sr) + } +} + +#[derive(Debug)] +pub(crate) struct Source { + ssrc: u32, + state: SourceState, + sdes: HashMap, + last_activity: Instant, + payload_type: Option, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SourceState { + Probation(usize), + Normal, + Bye, +} + +impl Source { + fn new(ssrc: u32) -> Self { + Self { + ssrc, + state: SourceState::Probation(DEFAULT_PROBATION_N_PACKETS), + sdes: HashMap::new(), + last_activity: Instant::now(), + payload_type: None, + } + } + + fn set_state(&mut self, state: SourceState) { + self.state = state; + } +} + +#[derive(Debug)] +pub struct ReceivedRb { + pub rb: Rb, + pub receive_time: Instant, + pub receive_ntp_time: NtpTime, +} + +impl ReceivedRb { + #[allow(unused)] + fn round_trip_time(&self) -> Duration { + let rb_send_ntp_time = self.rb.last_sr as u64 + self.rb.delay_since_last_sr as u64; + + // Can't calculate any round trip time + if rb_send_ntp_time == 0 { + return Duration::ZERO; + } + + let mut rb_recv_ntp_time = self.receive_ntp_time.as_u32() as u64; + + if rb_send_ntp_time > rb_recv_ntp_time { + // 16.16 bit fixed point NTP time wrapped around + if rb_send_ntp_time - rb_recv_ntp_time > 0x7fff_ffff { + rb_recv_ntp_time += u32::MAX as u64; + } + } + + let diff = rb_recv_ntp_time.saturating_sub(rb_send_ntp_time); + // Bogus RTT of more than 2*5 seconds, return 1s as a fallback + if (diff >> 16) > 5 { + return Duration::from_secs(1); + } + + let rtt = 2 * diff; + let rtt_ns = rtt * 1_000_000_000 / 65_536; + Duration::from_nanos(rtt_ns) + } +} + +#[derive(Debug)] +pub struct LocalSendSource { + source: Source, + ext_seqnum: u64, + last_rtp_sent: Option<(u32, Instant)>, + sent_bytes: u64, + sent_packets: u64, + bitrate: Bitrate, + bye_sent_time: Option, + bye_reason: Option, + last_sent_sr: Option, + last_received_rb: HashMap, +} + +impl LocalSendSource { + pub(crate) fn new(ssrc: u32) -> Self { + Self { + source: Source::new(ssrc), + ext_seqnum: 0, + last_rtp_sent: None, + sent_bytes: 0, + sent_packets: 0, + bitrate: Bitrate::new(BITRATE_WINDOW), + bye_sent_time: None, + bye_reason: None, + last_sent_sr: None, + last_received_rb: HashMap::new(), + } + } + + pub(crate) fn set_state(&mut self, state: SourceState) { + self.source.set_state(state); + } + + pub(crate) fn state(&self) -> SourceState { + self.source.state + } + + pub(crate) fn sent_packet( + &mut self, + bytes: usize, + time: Instant, + seqnum: u16, + rtp_time: u32, + payload_type: u8, + ) { + self.bitrate.add_entry(bytes, time); + + let mut ext_seqnum = seqnum as u64 + (self.ext_seqnum & !0xffff); + + if ext_seqnum < self.ext_seqnum { + let diff = self.ext_seqnum - ext_seqnum; + if diff > 0x7fff { + ext_seqnum += 1 << 16; + } + } else { + let diff = ext_seqnum - self.ext_seqnum; + if diff > 0x7fff { + ext_seqnum -= 1 << 16; + } + } + self.ext_seqnum = ext_seqnum; + + self.source.payload_type = Some(payload_type); + + self.sent_bytes = self.sent_bytes.wrapping_add(bytes as u64); + self.sent_packets += 1; + self.last_rtp_sent = Some((rtp_time, time)); + } + + /// Retrieve the last rtp timestamp (and time) that data was sent for this source + pub fn last_rtp_sent_timestamp(&self) -> Option<(u32, Instant)> { + self.last_rtp_sent + } + + /// Retrieve the last seen payload type for this source + pub fn payload_type(&self) -> Option { + self.source.payload_type + } + + pub(crate) fn bitrate(&self) -> usize { + self.bitrate.bitrate() + } + + pub(crate) fn packet_count(&self) -> u64 { + self.sent_packets + } + + pub(crate) fn octet_count(&self) -> u64 { + self.sent_bytes + } + + /// Retrieve the ssrc for this source + pub fn ssrc(&self) -> u32 { + self.source.ssrc + } + + /// Set an sdes item for this source + pub fn set_sdes_item(&mut self, type_: u8, value: &[u8]) { + if let Ok(s) = std::str::from_utf8(value) { + self.source.sdes.insert(type_, s.to_owned()); + } + } + + /// Retrieve the sdes for this source + pub fn sdes(&self) -> &HashMap { + &self.source.sdes + } + + /// Set the last time when activity was seen for this source + pub fn set_last_activity(&mut self, time: Instant) { + self.source.last_activity = time; + } + + /// The last time when activity was seen for this source + pub fn last_activity(&self) -> Instant { + self.source.last_activity + } + + pub(crate) fn take_sr_snapshot( + &mut self, + ntp_now: SystemTime, + ntp_time: NtpTime, + rtp_timestamp: u32, + ) { + self.last_sent_sr = Some(Sr { + local_time: ntp_now, + remote_time: ntp_time, + rtp_time: rtp_timestamp, + octet_count: (self.sent_bytes & 0xffff_ffff) as u32, + packet_count: (self.sent_packets & 0xffff_ffff) as u32, + }); + } + + pub fn last_sent_sr(&self) -> Option { + self.last_sent_sr + } + + pub(crate) fn bye_sent_at(&mut self, time: Instant) { + self.bye_sent_time = Some(time); + } + + pub(crate) fn bye_sent_time(&self) -> Option { + self.bye_sent_time + } + + pub(crate) fn mark_bye(&mut self, reason: &str) { + if self.source.state == SourceState::Bye { + return; + } + self.set_state(SourceState::Bye); + self.bye_reason = Some(reason.to_string()); + } + + pub(crate) fn bye_reason(&self) -> Option<&String> { + self.bye_reason.as_ref() + } + + pub(crate) fn into_receive(self) -> LocalReceiveSource { + LocalReceiveSource { + source: self.source, + bye_sent_time: self.bye_sent_time, + bye_reason: self.bye_reason, + } + } + + pub fn add_last_rb( + &mut self, + sender_ssrc: u32, + rb: ReportBlock<'_>, + now: Instant, + ntp_now: SystemTime, + ) { + let ntp_now = system_time_to_ntp_time_u64(ntp_now); + let owned_rb = rb.into(); + self.last_received_rb + .entry(sender_ssrc) + .and_modify(|entry| { + *entry = ReceivedRb { + rb: owned_rb, + receive_time: now, + receive_ntp_time: ntp_now, + } + }) + .or_insert_with(|| ReceivedRb { + rb: owned_rb, + receive_time: now, + receive_ntp_time: ntp_now, + }); + } + + pub fn received_report_blocks(&self) -> impl Iterator + '_ { + self.last_received_rb.iter().map(|(&k, v)| (k, v)) + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum SourceRecvReply { + /// hold this buffer for later and give it the relevant id. The id will be used in a Drop, or + /// Forward return value + Hold(usize), + /// drop a buffer by id. Should continue calling with the same input until not Drop or Forward + Drop(usize), + /// forward a held buffer by id. Should continue calling with the same input until not Drop or Forward. + Forward(usize), + /// forward the input buffer + Passthrough, + /// Ignore this buffer and do not passthrough + Ignore, +} + +#[derive(Debug)] +struct HeldRecvBuffer { + id: usize, + time: Instant, + seqnum: u16, + bytes: u32, +} + +#[derive(Debug, Clone, Copy)] +pub struct Sr { + local_time: SystemTime, + remote_time: NtpTime, + rtp_time: u32, + octet_count: u32, + packet_count: u32, +} + +impl Sr { + pub fn ntp_timestamp(&self) -> NtpTime { + self.remote_time + } + + pub fn rtp_timestamp(&self) -> u32 { + self.rtp_time + } + + pub fn octet_count(&self) -> u32 { + self.octet_count + } + + pub fn packet_count(&self) -> u32 { + self.packet_count + } +} + +#[derive(Debug)] +pub struct RemoteSendSource { + source: Source, + probation_packets: usize, + last_received_sr: Option, + rtp_from: Option, + rtcp_from: Option, + initial_seqnum: Option, + ext_seqnum: Option, + recv_bytes: u64, + recv_packets: u64, + recv_packets_at_last_rtcp: u64, + ext_seqnum_at_last_rtcp: u64, + jitter: u32, + transit: Option, + // any held buffers. Used when source is on probation. + held_buffers: VecDeque, + bitrate: Bitrate, + last_sent_rb: Option, + last_received_rb: HashMap, +} + +// The first time we recev a packet for jitter calculations +static INITIAL_RECV_TIME: once_cell::sync::OnceCell = once_cell::sync::OnceCell::new(); + +impl RemoteSendSource { + pub fn new(ssrc: u32) -> Self { + Self { + source: Source::new(ssrc), + probation_packets: DEFAULT_PROBATION_N_PACKETS, + last_received_sr: None, + rtp_from: None, + rtcp_from: None, + initial_seqnum: None, + ext_seqnum: None, + recv_bytes: 0, + recv_packets: 0, + recv_packets_at_last_rtcp: 0, + ext_seqnum_at_last_rtcp: 0, + held_buffers: VecDeque::new(), + jitter: 0, + transit: None, + bitrate: Bitrate::new(BITRATE_WINDOW), + last_sent_rb: None, + last_received_rb: HashMap::new(), + } + } + + /// Retrieve the ssrc for this source + pub fn ssrc(&self) -> u32 { + self.source.ssrc + } + + pub(crate) fn set_state(&mut self, state: SourceState) { + self.source.set_state(state); + } + + pub(crate) fn state(&self) -> SourceState { + self.source.state + } + + pub(crate) fn set_rtp_from(&mut self, from: Option) { + self.rtp_from = from; + } + + pub(crate) fn rtp_from(&self) -> Option { + self.rtp_from + } + + pub(crate) fn set_rtcp_from(&mut self, from: Option) { + self.rtcp_from = from; + } + + pub(crate) fn rtcp_from(&self) -> Option { + self.rtcp_from + } + + pub(crate) fn set_last_received_sr( + &mut self, + ntp_time: SystemTime, + remote_time: NtpTime, + rtp_time: u32, + octet_count: u32, + packet_count: u32, + ) { + self.last_received_sr = Some(Sr { + local_time: ntp_time, + remote_time, + rtp_time, + octet_count, + packet_count, + }); + } + + /// Retrieve the last received Sr for this source + pub fn last_received_sr(&self) -> Option { + self.last_received_sr + } + + /// Get the last sent RTCP report block for this source + pub fn last_sent_rb(&self) -> Option { + self.last_sent_rb + } + + fn init_sequence(&mut self, seqnum: u16) { + self.last_received_sr = None; + self.recv_bytes = 0; + self.recv_packets = 0; + self.recv_packets_at_last_rtcp = 0; + self.initial_seqnum = self.ext_seqnum; + self.ext_seqnum_at_last_rtcp = match self.ext_seqnum { + Some(ext) => ext, + None => 0x10000 + seqnum as u64, + }; + self.bitrate.reset(); + } + + #[allow(clippy::too_many_arguments)] + pub(crate) fn recv_packet( + &mut self, + bytes: u32, + time: Instant, + seqnum: u16, + rtp_timestamp: u32, + payload_type: u8, + clock_rate: Option, + hold_buffer_id: usize, + ) -> SourceRecvReply { + let initial_time = *INITIAL_RECV_TIME.get_or_init(|| time); + + if matches!(self.state(), SourceState::Bye) { + return SourceRecvReply::Ignore; + } + + let (max_seq, mut ext_seqnum) = match self.ext_seqnum { + Some(ext) => ((ext & 0xffff) as u16, seqnum as u64 + (ext & !0xffff)), + None => (seqnum.wrapping_sub(1), 0x10000 + seqnum as u64), + }; + trace!( + "source {} max seq {max_seq}, ext_seqnum {ext_seqnum}", + self.ssrc() + ); + + let diff = if seqnum < max_seq { + let mut diff = max_seq - seqnum; + + if diff > 0x7fff { + ext_seqnum += 1 << 16; + diff = u16::MAX - diff; + } + -(diff as i32 - 1) + } else { + let mut diff = seqnum - max_seq; + if diff > 0x7fff { + ext_seqnum -= 1 << 16; + diff = u16::MAX - diff; + } + diff as i32 + }; + + trace!("source {} in state {:?} received seqnum {seqnum} with a difference of {diff} from the previous seqnum", self.ssrc(), self.state()); + + let ret = if let SourceState::Probation(n_probation) = self.state() { + // consecutive packets are good + if diff == 1 { + if (0..=1).contains(&n_probation) { + info!("source {} leaving probation", self.ssrc()); + self.init_sequence(seqnum); + self.set_state(SourceState::Normal); + SourceRecvReply::Passthrough + } else { + debug!( + "source {} holding seqnum {seqnum} on probation", + self.ssrc() + ); + self.held_buffers.push_front(HeldRecvBuffer { + id: hold_buffer_id, + seqnum, + bytes, + time, + }); + while self.held_buffers.len() > self.probation_packets { + if let Some(held) = self.held_buffers.pop_back() { + debug!( + "source {} dropping seqnum {seqnum} on probation", + self.ssrc() + ); + return SourceRecvReply::Drop(held.id); + } + } + self.set_state(SourceState::Probation(n_probation - 1)); + SourceRecvReply::Hold(hold_buffer_id) + } + } else if self.probation_packets > 0 { + debug!( + "source {} resetting probation counter to {} at seqnum {seqnum}", + self.ssrc(), + self.probation_packets + ); + self.set_state(SourceState::Probation(self.probation_packets - 1)); + if let Some(held) = self.held_buffers.pop_back() { + return SourceRecvReply::Drop(held.id); + } + self.held_buffers.push_front(HeldRecvBuffer { + id: hold_buffer_id, + seqnum, + bytes, + time, + }); + while self.held_buffers.len() > self.probation_packets { + if let Some(held) = self.held_buffers.pop_back() { + return SourceRecvReply::Drop(held.id); + } + } + SourceRecvReply::Hold(hold_buffer_id) + } else { + info!( + "source {} leaving probation (no probation configured)", + self.ssrc() + ); + self.init_sequence(seqnum); + self.set_state(SourceState::Normal); + SourceRecvReply::Passthrough + } + } else if diff >= 1 && diff < DEFAULT_MAX_DROPOUT as i32 { + SourceRecvReply::Passthrough + } else if diff < -(DEFAULT_MAX_MISORDER as i32) || diff >= DEFAULT_MAX_DROPOUT as i32 { + debug!("non-consecutive packet outside of configured limits, dropping"); + // FIXME: should be a harder error? + return SourceRecvReply::Ignore; + } else { + // duplicate or reordered packet + // downstream jitterbuffer will deal with this + SourceRecvReply::Passthrough + }; + + if matches!(ret, SourceRecvReply::Passthrough) { + if let Some(held) = self.held_buffers.pop_back() { + info!( + "source {} pushing stored seqnum {}", + self.ssrc(), + held.seqnum + ); + self.recv_packet_add_to_stats( + rtp_timestamp, + held.time, + initial_time, + payload_type, + clock_rate, + ext_seqnum, + held.bytes, + ); + return SourceRecvReply::Forward(held.id); + } + } + + trace!("setting ext seqnum to {ext_seqnum}"); + self.ext_seqnum = Some(ext_seqnum); + self.recv_packet_add_to_stats( + rtp_timestamp, + time, + initial_time, + payload_type, + clock_rate, + ext_seqnum, + bytes, + ); + + ret + } + + #[allow(clippy::too_many_arguments)] + fn recv_packet_add_to_stats( + &mut self, + rtp_timestamp: u32, + now: Instant, + initial_time: Instant, + payload_type: u8, + clock_rate: Option, + ext_seqnum: u64, + bytes: u32, + ) { + /* calculate jitter */ + if let Some(clock_rate) = clock_rate { + let rtparrival = + ((now.duration_since(initial_time).as_micros() & 0xffff_ffff_ffff_ffff) as u32) + .mul_div_round(clock_rate, 1_000_000) + .unwrap(); + let transit = rtparrival.wrapping_sub(rtp_timestamp); + let diff = if let Some(existing_transit) = self.transit { + existing_transit.abs_diff(transit) + } else { + 0 + }; + self.transit = Some(transit); + trace!("jitter {} diff {diff}", self.jitter); + self.jitter += diff.saturating_sub((self.jitter + 8) >> 4); + } + self.source.payload_type = Some(payload_type); + + if self.initial_seqnum.is_none() { + self.initial_seqnum = Some(ext_seqnum); + } + + self.bitrate.add_entry(bytes as usize, now); + self.recv_bytes = self.recv_bytes.wrapping_add(bytes as u64); + self.recv_packets += 1; + } + + pub(crate) fn received_sdes(&mut self, type_: u8, value: &[u8]) { + if let Ok(s) = std::str::from_utf8(value) { + self.source.sdes.insert(type_, s.to_owned()); + } + } + + pub(crate) fn set_last_activity(&mut self, time: Instant) { + self.source.last_activity = time; + } + + /// Retrieve the last time that activity was seen on this source + pub fn last_activity(&self) -> Instant { + self.source.last_activity + } + + pub(crate) fn bitrate(&self) -> usize { + self.bitrate.bitrate() + } + + pub fn payload_type(&self) -> Option { + self.source.payload_type + } + + fn extended_sequence_number(&self) -> u32 { + (self.ext_seqnum.unwrap_or(0) & 0xffff_ffff) as u32 + } + + pub(crate) fn generate_report_block(&self, ntp_time: SystemTime) -> Rb { + let (last_sr, delay_since_last_sr) = self + .last_received_sr + .as_ref() + .map(|t| { + ( + t.remote_time, + NtpTime::from_duration( + ntp_time + .duration_since(t.local_time) + .unwrap_or(Duration::from_secs(0)), + ), + ) + }) + .unwrap_or(( + NtpTime::from_duration(Duration::from_secs(0)), + NtpTime::from_duration(Duration::from_secs(0)), + )); + + let lost = self.packets_lost(); + + let expected_since_last_rtcp = self + .ext_seqnum + .unwrap_or(0) + .saturating_sub(self.ext_seqnum_at_last_rtcp); + let recv_packets_since_last_rtcp = self.recv_packets - self.recv_packets_at_last_rtcp; + let lost_packets_since_last_rtcp = + expected_since_last_rtcp as i64 - recv_packets_since_last_rtcp as i64; + let fraction_lost = if expected_since_last_rtcp == 0 || lost_packets_since_last_rtcp <= 0 { + 0 + } else { + (((lost_packets_since_last_rtcp as u64) << 8) / expected_since_last_rtcp) as u8 + }; + let cumulative_lost = if lost < 0 { + 0x800000 | (lost & 0x7fffff) as u32 + } else { + (lost & 0x7ffffff) as u32 + }; + + trace!( + "ssrc {} current packet counts ext_seqnum {:?} recv_packets {}", + self.source.ssrc, + self.ext_seqnum, + self.recv_packets + ); + trace!( + "ssrc {} previous rtcp values ext_seqnum {:?} recv_packets {}", + self.source.ssrc, + self.ext_seqnum_at_last_rtcp, + self.recv_packets_at_last_rtcp + ); + trace!("ssrc {} fraction expected {expected_since_last_rtcp} lost {lost_packets_since_last_rtcp} fraction lost {fraction_lost}", self.source.ssrc); + + Rb { + ssrc: self.source.ssrc, + fraction_lost, + cumulative_lost, + extended_sequence_number: self.extended_sequence_number(), + jitter: self.jitter >> 4, + last_sr: last_sr.as_u32(), + delay_since_last_sr: delay_since_last_sr.as_u32(), + } + } + + pub(crate) fn update_last_rtcp(&mut self) { + self.recv_packets_at_last_rtcp = self.recv_packets; + if let Some(ext) = self.ext_seqnum { + self.ext_seqnum_at_last_rtcp = ext; + } + } + + /// The amount of jitter (in clock-rate units) + pub fn jitter(&self) -> u32 { + self.jitter >> 4 + } + + /// The total number of packets lost over the lifetime of this source + pub fn packets_lost(&self) -> i64 { + let expected = self.ext_seqnum.unwrap_or(0) - self.initial_seqnum.unwrap_or(0) + 1; + expected as i64 - self.recv_packets as i64 + } + + #[cfg(test)] + /// Set the number of probation packets before validating this source + pub fn set_probation_packets(&mut self, n_packets: usize) { + info!("source {} setting probation to {n_packets}", self.ssrc()); + self.probation_packets = n_packets; + match self.state() { + SourceState::Bye | SourceState::Normal => (), + SourceState::Probation(existing) => { + if n_packets < existing { + self.set_state(SourceState::Probation(n_packets)); + } + } + } + } + + pub fn packet_count(&self) -> u64 { + self.recv_packets + } + + pub fn octet_count(&self) -> u64 { + self.recv_bytes + } + + pub fn add_last_rb( + &mut self, + sender_ssrc: u32, + rb: ReportBlock<'_>, + now: Instant, + ntp_now: SystemTime, + ) { + let ntp_now = system_time_to_ntp_time_u64(ntp_now); + let owned_rb = rb.into(); + self.last_received_rb + .entry(sender_ssrc) + .and_modify(|entry| { + *entry = ReceivedRb { + rb: owned_rb, + receive_time: now, + receive_ntp_time: ntp_now, + } + }) + .or_insert_with(|| ReceivedRb { + rb: owned_rb, + receive_time: now, + receive_ntp_time: ntp_now, + }); + } + + pub fn received_report_blocks(&self) -> impl Iterator + '_ { + self.last_received_rb.iter().map(|(&k, v)| (k, v)) + } + + pub(crate) fn into_receive(self) -> RemoteReceiveSource { + RemoteReceiveSource { + source: self.source, + rtcp_from: self.rtcp_from, + } + } +} + +#[derive(Debug)] +pub struct LocalReceiveSource { + source: Source, + bye_sent_time: Option, + bye_reason: Option, +} + +impl LocalReceiveSource { + pub(crate) fn new(ssrc: u32) -> Self { + Self { + source: Source::new(ssrc), + bye_sent_time: None, + bye_reason: None, + } + } + + pub fn ssrc(&self) -> u32 { + self.source.ssrc + } + + pub(crate) fn set_state(&mut self, state: SourceState) { + self.source.set_state(state); + } + + pub(crate) fn state(&self) -> SourceState { + self.source.state + } + + pub(crate) fn payload_type(&self) -> Option { + self.source.payload_type + } + + /// Set an sdes item for this source + pub fn set_sdes_item(&mut self, type_: u8, value: &[u8]) { + if let Ok(s) = std::str::from_utf8(value) { + self.source.sdes.insert(type_, s.to_owned()); + } + } + + /// Retrieve the sdes for this source + pub fn sdes(&self) -> &HashMap { + &self.source.sdes + } + + /// Set the last time when activity was seen for this source + pub fn set_last_activity(&mut self, time: Instant) { + self.source.last_activity = time; + } + + /// Retrieve the last time that activity was seen on this source + pub fn last_activity(&self) -> Instant { + self.source.last_activity + } + + pub(crate) fn bye_sent_at(&mut self, time: Instant) { + self.bye_sent_time = Some(time); + } + + pub(crate) fn bye_sent_time(&self) -> Option { + self.bye_sent_time + } + + pub(crate) fn mark_bye(&mut self, reason: &str) { + if self.source.state == SourceState::Bye { + return; + } + self.set_state(SourceState::Bye); + self.bye_reason = Some(reason.to_string()); + } + + pub(crate) fn bye_reason(&self) -> Option<&String> { + self.bye_reason.as_ref() + } +} + +#[derive(Debug)] +pub struct RemoteReceiveSource { + source: Source, + rtcp_from: Option, +} + +impl RemoteReceiveSource { + pub(crate) fn new(ssrc: u32) -> Self { + Self { + source: Source::new(ssrc), + rtcp_from: None, + } + } + + pub fn ssrc(&self) -> u32 { + self.source.ssrc + } + + pub(crate) fn set_state(&mut self, state: SourceState) { + self.source.set_state(state); + } + + pub(crate) fn state(&self) -> SourceState { + self.source.state + } + + pub(crate) fn set_rtcp_from(&mut self, from: Option) { + self.rtcp_from = from; + } + + pub(crate) fn rtcp_from(&self) -> Option { + self.rtcp_from + } + + pub(crate) fn set_last_activity(&mut self, time: Instant) { + self.source.last_activity = time; + } + + pub(crate) fn received_sdes(&mut self, type_: u8, value: &[u8]) { + if let Ok(s) = std::str::from_utf8(value) { + self.source.sdes.insert(type_, s.to_owned()); + } + } + + /// Retrieve the last time that activity was seen on this source + pub fn last_activity(&self) -> Instant { + self.source.last_activity + } + + pub(crate) fn into_send(self) -> RemoteSendSource { + RemoteSendSource { + source: self.source, + probation_packets: DEFAULT_PROBATION_N_PACKETS, + last_received_sr: None, + rtp_from: None, + rtcp_from: self.rtcp_from, + initial_seqnum: None, + ext_seqnum: None, + recv_bytes: 0, + recv_packets: 0, + recv_packets_at_last_rtcp: 0, + ext_seqnum_at_last_rtcp: 0, + held_buffers: VecDeque::new(), + jitter: 0, + transit: None, + bitrate: Bitrate::new(BITRATE_WINDOW), + last_sent_rb: None, + last_received_rb: HashMap::new(), + } + } +} + +#[derive(Debug)] +struct Bitrate { + max_time: Duration, + entries: VecDeque<(usize, Instant)>, +} + +impl Bitrate { + fn new(max_time: Duration) -> Self { + Self { + max_time, + entries: VecDeque::new(), + } + } + + fn add_entry(&mut self, bytes: usize, time: Instant) { + self.entries.push_back((bytes, time)); + while let Some((bytes, latest_time)) = self.entries.pop_front() { + if time.duration_since(latest_time) < self.max_time { + self.entries.push_front((bytes, latest_time)); + break; + } + } + } + + fn bitrate(&self) -> usize { + if let Some(front) = self.entries.front() { + let back = self.entries.back().unwrap(); + let dur_micros = (back.1 - front.1).as_micros(); + if dur_micros == 0 { + return front.0; + } + let bytes = self.entries.iter().map(|entry| entry.0).sum::(); + + (bytes as u64) + .mul_div_round(1_000_000, dur_micros as u64) + .unwrap_or(front.0 as u64) as usize + } else { + 0 + } + } + + fn reset(&mut self) { + self.entries.clear() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::rtpbin2::session::tests::init_logs; + + const TEST_PT: u8 = 96; + + #[test] + fn bitrate_single_value() { + init_logs(); + // the bitrate of a single entry is the entry itself + let mut bitrate = Bitrate::new(BITRATE_WINDOW); + bitrate.add_entry(100, Instant::now()); + assert_eq!(bitrate.bitrate(), 100); + } + + #[test] + fn bitrate_two_values_over_half_second() { + init_logs(); + let mut bitrate = Bitrate::new(Duration::from_secs(1)); + let now = Instant::now(); + bitrate.add_entry(100, now); + bitrate.add_entry(300, now + Duration::from_millis(500)); + assert_eq!(bitrate.bitrate(), (100 + 300) * 2); + } + + #[test] + fn receive_probation() { + init_logs(); + let mut source = RemoteSendSource::new(100); + let now = Instant::now(); + let mut hold_buffer_id = 0; + assert_eq!( + source.state(), + SourceState::Probation(DEFAULT_PROBATION_N_PACKETS) + ); + assert_eq!( + SourceRecvReply::Hold(0), + source.recv_packet(16, now, 500, 100, TEST_PT, None, hold_buffer_id) + ); + hold_buffer_id += 1; + assert_eq!( + SourceRecvReply::Forward(0), + source.recv_packet(16, now, 501, 100, TEST_PT, None, hold_buffer_id) + ); + assert_eq!(source.state(), SourceState::Normal); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(16, now, 501, 100, TEST_PT, None, hold_buffer_id) + ); + assert_eq!(source.state(), SourceState::Normal); + } + + #[test] + fn receive_probation_gap() { + init_logs(); + let mut source = RemoteSendSource::new(100); + let now = Instant::now(); + let mut hold_buffer_id = 0; + assert_eq!( + source.state(), + SourceState::Probation(DEFAULT_PROBATION_N_PACKETS) + ); + assert_eq!( + SourceRecvReply::Hold(0), + source.recv_packet(100, now, 500, 100, TEST_PT, None, hold_buffer_id) + ); + hold_buffer_id += 1; + // push a buffer with a sequence gap and reset the probation counter + assert_eq!( + SourceRecvReply::Drop(0), + source.recv_packet(101, now, 502, 100, TEST_PT, None, hold_buffer_id) + ); + assert_eq!( + SourceRecvReply::Hold(1), + source.recv_packet(100, now, 502, 100, TEST_PT, None, hold_buffer_id) + ); + hold_buffer_id += 1; + assert_eq!( + SourceRecvReply::Forward(1), + source.recv_packet(101, now, 503, 100, TEST_PT, None, hold_buffer_id) + ); + assert_eq!(source.state(), SourceState::Normal); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(101, now, 503, 100, TEST_PT, None, hold_buffer_id) + ); + } + + #[test] + fn receive_no_probation() { + init_logs(); + let mut source = RemoteSendSource::new(100); + let now = Instant::now(); + assert_eq!( + source.state(), + SourceState::Probation(DEFAULT_PROBATION_N_PACKETS) + ); + source.set_probation_packets(0); + assert_eq!(source.state(), SourceState::Probation(0)); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(100, now, 500, 100, TEST_PT, None, 0) + ); + assert_eq!(source.state(), SourceState::Normal); + } + + #[test] + fn receive_wraparound() { + init_logs(); + let mut source = RemoteSendSource::new(100); + source.set_probation_packets(0); + let now = Instant::now(); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(16, now, u16::MAX, u32::MAX, TEST_PT, None, 0) + ); + assert_eq!( + SourceRecvReply::Passthrough, + source.recv_packet(16, now, 0, 0, TEST_PT, None, 0) + ); + } +} diff --git a/net/rtp/src/rtpbin2/time.rs b/net/rtp/src/rtpbin2/time.rs new file mode 100644 index 00000000..7373e21a --- /dev/null +++ b/net/rtp/src/rtpbin2/time.rs @@ -0,0 +1,58 @@ +// SPDX-License-Identifier: MPL-2.0 + +use std::{ + ops::{Add, Sub}, + time::{Duration, SystemTime}, +}; + +// time between the NTP time at 1900-01-01 and the unix EPOCH (1970-01-01) +const NTP_OFFSET: Duration = Duration::from_secs((365 * 70 + 17) * 24 * 60 * 60); + +// 2^32 +const F32: f64 = 4_294_967_296.0; + +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] +pub struct NtpTime(u64); + +impl NtpTime { + pub fn from_duration(dur: Duration) -> Self { + Self((dur.as_secs_f64() * F32) as u64) + } + + pub fn as_u32(self) -> u32 { + ((self.0 >> 16) & 0xffffffff) as u32 + } + + pub fn as_u64(self) -> u64 { + self.0 + } +} + +impl Sub for NtpTime { + type Output = NtpTime; + fn sub(self, rhs: Self) -> Self::Output { + NtpTime(self.0 - rhs.0) + } +} + +impl Add for NtpTime { + type Output = NtpTime; + fn add(self, rhs: Self) -> Self::Output { + NtpTime(self.0 + rhs.0) + } +} + +pub fn system_time_to_ntp_time_u64(time: SystemTime) -> NtpTime { + let dur = time + .duration_since(SystemTime::UNIX_EPOCH) + .expect("time is before unix epoch?!") + + NTP_OFFSET; + + NtpTime::from_duration(dur) +} + +impl From for NtpTime { + fn from(value: u64) -> Self { + NtpTime(value) + } +} diff --git a/net/rtp/tests/rtpbin2.rs b/net/rtp/tests/rtpbin2.rs new file mode 100644 index 00000000..4b911658 --- /dev/null +++ b/net/rtp/tests/rtpbin2.rs @@ -0,0 +1,159 @@ +// +// Copyright (C) 2023 Matthew Waters +// +// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0. +// If a copy of the MPL was not distributed with this file, You can obtain one at +// . +// +// SPDX-License-Identifier: MPL-2.0 + +use std::sync::{Arc, Mutex}; + +use gst::{prelude::*, Caps}; +use gst_check::Harness; +use rtp_types::*; + +fn init() { + use std::sync::Once; + static INIT: Once = Once::new(); + + INIT.call_once(|| { + gst::init().unwrap(); + gstrsrtp::plugin_register_static().expect("rtpbin2 test"); + }); +} + +const TEST_SSRC: u32 = 0x12345678; +const TEST_PT: u8 = 96; +const TEST_CLOCK_RATE: u32 = 48000; + +fn generate_rtp_buffer(seqno: u16, rtpts: u32, payload_len: usize) -> gst::Buffer { + let payload = vec![4; payload_len]; + let packet = RtpPacketBuilder::new() + .ssrc(TEST_SSRC) + .payload_type(TEST_PT) + .sequence_number(seqno) + .timestamp(rtpts) + .payload(&payload); + let size = packet.calculate_size().unwrap(); + let mut data = vec![0; size]; + packet.write_into(&mut data).unwrap(); + gst::Buffer::from_mut_slice(data) +} + +#[test] +fn test_send() { + init(); + + let mut h = Harness::with_padnames("rtpbin2", Some("rtp_send_sink_0"), Some("rtp_send_src_0")); + h.play(); + + let caps = Caps::builder("application/x-rtp") + .field("media", "audio") + .field("payload", TEST_PT as i32) + .field("clock-rate", TEST_CLOCK_RATE as i32) + .field("encoding-name", "custom-test") + .build(); + h.set_src_caps(caps); + + h.push(generate_rtp_buffer(500, 20, 9)).unwrap(); + h.push(generate_rtp_buffer(501, 30, 11)).unwrap(); + + let buffer = h.pull().unwrap(); + let mapped = buffer.map_readable().unwrap(); + let rtp = rtp_types::RtpPacket::parse(&mapped).unwrap(); + assert_eq!(rtp.sequence_number(), 500); + + let buffer = h.pull().unwrap(); + let mapped = buffer.map_readable().unwrap(); + let rtp = rtp_types::RtpPacket::parse(&mapped).unwrap(); + assert_eq!(rtp.sequence_number(), 501); + + let stats = h.element().unwrap().property::("stats"); + let session_stats = stats.get::("0").unwrap(); + let source_stats = session_stats + .get::(TEST_SSRC.to_string()) + .unwrap(); + assert_eq!(source_stats.get::("ssrc").unwrap(), TEST_SSRC); + assert_eq!( + source_stats.get::("clock-rate").unwrap(), + TEST_CLOCK_RATE + ); + assert!(source_stats.get::("sender").unwrap()); + assert!(source_stats.get::("local").unwrap()); + assert_eq!(source_stats.get::("packets-sent").unwrap(), 2); + assert_eq!(source_stats.get::("octets-sent").unwrap(), 20); +} + +#[test] +fn test_receive() { + init(); + + let h = Arc::new(Mutex::new(Harness::with_padnames( + "rtpbin2", + Some("rtp_recv_sink_0"), + None, + ))); + let weak_h = Arc::downgrade(&h); + let mut inner = h.lock().unwrap(); + inner + .element() + .unwrap() + .connect_pad_added(move |_elem, pad| { + weak_h + .upgrade() + .unwrap() + .lock() + .unwrap() + .add_element_src_pad(pad) + }); + inner.play(); + + let caps = Caps::builder("application/x-rtp") + .field("media", "audio") + .field("payload", TEST_PT as i32) + .field("clock-rate", TEST_CLOCK_RATE as i32) + .field("encoding-name", "custom-test") + .build(); + inner.set_src_caps(caps); + + // Cannot push with harness lock as the 'pad-added' handler needs to add the newly created pad to + // the harness and needs to also take the harness lock. Workaround by pushing from the + // internal harness pad directly. + let push_pad = inner + .element() + .unwrap() + .static_pad("rtp_recv_sink_0") + .unwrap() + .peer() + .unwrap(); + drop(inner); + push_pad.push(generate_rtp_buffer(500, 20, 9)).unwrap(); + push_pad.push(generate_rtp_buffer(501, 30, 11)).unwrap(); + let mut inner = h.lock().unwrap(); + + let buffer = inner.pull().unwrap(); + let mapped = buffer.map_readable().unwrap(); + let rtp = rtp_types::RtpPacket::parse(&mapped).unwrap(); + assert_eq!(rtp.sequence_number(), 500); + + let buffer = inner.pull().unwrap(); + let mapped = buffer.map_readable().unwrap(); + let rtp = rtp_types::RtpPacket::parse(&mapped).unwrap(); + assert_eq!(rtp.sequence_number(), 501); + + let stats = inner.element().unwrap().property::("stats"); + let session_stats = stats.get::("0").unwrap(); + let source_stats = session_stats + .get::(TEST_SSRC.to_string()) + .unwrap(); + assert_eq!(source_stats.get::("ssrc").unwrap(), TEST_SSRC); + assert_eq!( + source_stats.get::("clock-rate").unwrap(), + TEST_CLOCK_RATE + ); + assert!(source_stats.get::("sender").unwrap()); + assert!(!source_stats.get::("local").unwrap()); + assert_eq!(source_stats.get::("packets-received").unwrap(), 2); + assert_eq!(source_stats.get::("octets-received").unwrap(), 20); +}