2021-10-18 06:42:42 +00:00
|
|
|
// Copyright (C) 2021 Sebastian Dröge <sebastian@centricular.com>
|
|
|
|
//
|
|
|
|
// This Source Code Form is subject to the terms of the Mozilla Public License, v2.0.
|
|
|
|
// If a copy of the MPL was not distributed with this file, You can obtain one at
|
|
|
|
// <https://mozilla.org/MPL/2.0/>.
|
|
|
|
//
|
|
|
|
// SPDX-License-Identifier: MPL-2.0
|
|
|
|
|
|
|
|
use gst::glib;
|
|
|
|
use gst::prelude::*;
|
|
|
|
use gst::subclass::prelude::*;
|
2022-05-05 12:09:19 +00:00
|
|
|
use gst_base::prelude::*;
|
|
|
|
use gst_base::subclass::prelude::*;
|
2021-10-18 06:42:42 +00:00
|
|
|
|
|
|
|
use std::collections::VecDeque;
|
2023-01-23 18:43:26 +00:00
|
|
|
use std::mem;
|
2021-10-18 06:42:42 +00:00
|
|
|
use std::sync::Mutex;
|
|
|
|
|
|
|
|
use once_cell::sync::Lazy;
|
|
|
|
|
|
|
|
use super::boxes;
|
|
|
|
use super::Buffer;
|
2022-10-26 09:16:42 +00:00
|
|
|
use super::DeltaFrames;
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
/// Offset for the segment in non-single-stream variants.
|
|
|
|
const SEGMENT_OFFSET: gst::ClockTime = gst::ClockTime::from_seconds(60 * 60 * 1000);
|
|
|
|
|
2022-05-27 10:27:10 +00:00
|
|
|
/// Offset between UNIX epoch and Jan 1 1601 epoch in seconds.
|
|
|
|
/// 1601 = UNIX + UNIX_1601_OFFSET.
|
|
|
|
const UNIX_1601_OFFSET: u64 = 11_644_473_600;
|
|
|
|
|
|
|
|
/// Offset between NTP and UNIX epoch in seconds.
|
|
|
|
/// NTP = UNIX + NTP_UNIX_OFFSET.
|
|
|
|
const NTP_UNIX_OFFSET: u64 = 2_208_988_800;
|
|
|
|
|
|
|
|
/// Reference timestamp meta caps for NTP timestamps.
|
|
|
|
static NTP_CAPS: Lazy<gst::Caps> = Lazy::new(|| gst::Caps::builder("timestamp/x-ntp").build());
|
|
|
|
|
|
|
|
/// Reference timestamp meta caps for UNIX timestamps.
|
|
|
|
static UNIX_CAPS: Lazy<gst::Caps> = Lazy::new(|| gst::Caps::builder("timestamp/x-unix").build());
|
|
|
|
|
|
|
|
/// Returns the UTC time of the buffer in the UNIX epoch.
|
|
|
|
fn get_utc_time_from_buffer(buffer: &gst::BufferRef) -> Option<gst::ClockTime> {
|
|
|
|
buffer
|
|
|
|
.iter_meta::<gst::ReferenceTimestampMeta>()
|
|
|
|
.find_map(|meta| {
|
|
|
|
if meta.reference().can_intersect(&UNIX_CAPS) {
|
|
|
|
Some(meta.timestamp())
|
|
|
|
} else if meta.reference().can_intersect(&NTP_CAPS) {
|
2022-10-17 17:48:43 +00:00
|
|
|
meta.timestamp().checked_sub(NTP_UNIX_OFFSET.seconds())
|
2022-05-27 10:27:10 +00:00
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Converts a running time to an UTC time.
|
|
|
|
fn running_time_to_utc_time(
|
|
|
|
running_time: gst::Signed<gst::ClockTime>,
|
|
|
|
running_time_utc_time_mapping: (gst::Signed<gst::ClockTime>, gst::ClockTime),
|
|
|
|
) -> Option<gst::ClockTime> {
|
|
|
|
gst::Signed::Positive(running_time_utc_time_mapping.1)
|
|
|
|
.checked_sub(running_time_utc_time_mapping.0)
|
|
|
|
.and_then(|res| res.checked_add(running_time))
|
|
|
|
.and_then(|res| res.positive())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Converts an UTC time to a running time.
|
|
|
|
fn utc_time_to_running_time(
|
|
|
|
utc_time: gst::ClockTime,
|
|
|
|
running_time_utc_time_mapping: (gst::Signed<gst::ClockTime>, gst::ClockTime),
|
|
|
|
) -> Option<gst::ClockTime> {
|
|
|
|
running_time_utc_time_mapping
|
|
|
|
.0
|
|
|
|
.checked_sub(gst::Signed::Positive(running_time_utc_time_mapping.1))
|
|
|
|
.and_then(|res| res.checked_add_unsigned(utc_time))
|
|
|
|
.and_then(|res| res.positive())
|
|
|
|
}
|
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
static CAT: Lazy<gst::DebugCategory> = Lazy::new(|| {
|
|
|
|
gst::DebugCategory::new(
|
|
|
|
"fmp4mux",
|
|
|
|
gst::DebugColorFlags::empty(),
|
|
|
|
Some("FMP4Mux Element"),
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
const DEFAULT_FRAGMENT_DURATION: gst::ClockTime = gst::ClockTime::from_seconds(10);
|
2023-01-23 18:43:26 +00:00
|
|
|
const DEFAULT_CHUNK_DURATION: Option<gst::ClockTime> = gst::ClockTime::NONE;
|
2021-10-18 06:42:42 +00:00
|
|
|
const DEFAULT_HEADER_UPDATE_MODE: super::HeaderUpdateMode = super::HeaderUpdateMode::None;
|
|
|
|
const DEFAULT_WRITE_MFRA: bool = false;
|
|
|
|
const DEFAULT_WRITE_MEHD: bool = false;
|
2022-05-12 10:44:20 +00:00
|
|
|
const DEFAULT_INTERLEAVE_BYTES: Option<u64> = None;
|
|
|
|
const DEFAULT_INTERLEAVE_TIME: Option<gst::ClockTime> = Some(gst::ClockTime::from_mseconds(250));
|
2021-10-18 06:42:42 +00:00
|
|
|
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
struct Settings {
|
|
|
|
fragment_duration: gst::ClockTime,
|
2023-01-23 18:43:26 +00:00
|
|
|
chunk_duration: Option<gst::ClockTime>,
|
2021-10-18 06:42:42 +00:00
|
|
|
header_update_mode: super::HeaderUpdateMode,
|
|
|
|
write_mfra: bool,
|
|
|
|
write_mehd: bool,
|
2022-05-12 10:44:20 +00:00
|
|
|
interleave_bytes: Option<u64>,
|
|
|
|
interleave_time: Option<gst::ClockTime>,
|
2022-11-07 17:47:31 +00:00
|
|
|
movie_timescale: u32,
|
2023-01-25 09:59:52 +00:00
|
|
|
offset_to_zero: bool,
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Default for Settings {
|
|
|
|
fn default() -> Self {
|
|
|
|
Settings {
|
|
|
|
fragment_duration: DEFAULT_FRAGMENT_DURATION,
|
2023-01-23 18:43:26 +00:00
|
|
|
chunk_duration: DEFAULT_CHUNK_DURATION,
|
2021-10-18 06:42:42 +00:00
|
|
|
header_update_mode: DEFAULT_HEADER_UPDATE_MODE,
|
|
|
|
write_mfra: DEFAULT_WRITE_MFRA,
|
|
|
|
write_mehd: DEFAULT_WRITE_MEHD,
|
2022-05-12 10:44:20 +00:00
|
|
|
interleave_bytes: DEFAULT_INTERLEAVE_BYTES,
|
|
|
|
interleave_time: DEFAULT_INTERLEAVE_TIME,
|
2022-11-07 17:47:31 +00:00
|
|
|
movie_timescale: 0,
|
2023-01-25 09:59:52 +00:00
|
|
|
offset_to_zero: false,
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
struct PreQueuedBuffer {
|
|
|
|
/// Buffer
|
|
|
|
///
|
|
|
|
/// Buffer PTS/DTS are updated to the output segment in multi-stream configurations.
|
|
|
|
buffer: gst::Buffer,
|
|
|
|
|
|
|
|
/// PTS
|
|
|
|
///
|
|
|
|
/// In ONVIF mode this is the UTC time, otherwise it is the PTS running time.
|
|
|
|
pts: gst::ClockTime,
|
|
|
|
|
|
|
|
/// End PTS
|
|
|
|
///
|
|
|
|
/// In ONVIF mode this is the UTC time, otherwise it is the PTS running time.
|
|
|
|
end_pts: gst::ClockTime,
|
|
|
|
|
|
|
|
/// DTS
|
|
|
|
///
|
|
|
|
/// In ONVIF mode this is the UTC time, otherwise it is the DTS running time.
|
|
|
|
dts: Option<gst::Signed<gst::ClockTime>>,
|
|
|
|
|
|
|
|
/// End DTS
|
|
|
|
///
|
|
|
|
/// In ONVIF mode this is the UTC time, otherwise it is the DTS running time.
|
|
|
|
end_dts: Option<gst::Signed<gst::ClockTime>>,
|
|
|
|
}
|
|
|
|
|
2022-10-01 17:52:18 +00:00
|
|
|
#[derive(Debug)]
|
2022-06-30 13:29:09 +00:00
|
|
|
struct GopBuffer {
|
|
|
|
buffer: gst::Buffer,
|
|
|
|
pts: gst::ClockTime,
|
2023-01-23 18:43:26 +00:00
|
|
|
pts_position: gst::ClockTime,
|
2022-06-30 13:29:09 +00:00
|
|
|
dts: Option<gst::ClockTime>,
|
|
|
|
}
|
|
|
|
|
2022-10-01 17:52:18 +00:00
|
|
|
#[derive(Debug)]
|
2021-10-18 06:42:42 +00:00
|
|
|
struct Gop {
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Start PTS.
|
2021-10-18 06:42:42 +00:00
|
|
|
start_pts: gst::ClockTime,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Start DTS.
|
2021-10-18 06:42:42 +00:00
|
|
|
start_dts: Option<gst::ClockTime>,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Earliest PTS.
|
2021-10-18 06:42:42 +00:00
|
|
|
earliest_pts: gst::ClockTime,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Once this is known to be the final earliest PTS/DTS
|
2021-10-18 06:42:42 +00:00
|
|
|
final_earliest_pts: bool,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// PTS plus duration of last buffer, or start of next GOP
|
2021-10-18 06:42:42 +00:00
|
|
|
end_pts: gst::ClockTime,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Once this is known to be the final end PTS/DTS
|
2022-05-12 10:44:20 +00:00
|
|
|
final_end_pts: bool,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// DTS plus duration of last buffer, or start of next GOP
|
2021-10-18 06:42:42 +00:00
|
|
|
end_dts: Option<gst::ClockTime>,
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Earliest PTS buffer position
|
2021-10-18 06:42:42 +00:00
|
|
|
earliest_pts_position: gst::ClockTime,
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Buffer, PTS running time, DTS running time
|
2022-06-30 13:29:09 +00:00
|
|
|
buffers: Vec<GopBuffer>,
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
struct Stream {
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Sink pad for this stream.
|
2022-11-07 17:47:31 +00:00
|
|
|
sinkpad: super::FMP4MuxPad,
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Pre-queue for ONVIF variant to timestamp all buffers with their UTC time.
|
|
|
|
///
|
|
|
|
/// In non-ONVIF mode this just collects the PTS/DTS and the corresponding running
|
|
|
|
/// times for later usage.
|
|
|
|
pre_queue: VecDeque<PreQueuedBuffer>,
|
|
|
|
|
|
|
|
/// Currently configured caps for this stream.
|
2022-05-12 10:44:20 +00:00
|
|
|
caps: gst::Caps,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Whether this stream is intra-only and has frame reordering.
|
2022-10-26 09:16:42 +00:00
|
|
|
delta_frames: DeltaFrames,
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Currently queued GOPs, including incomplete ones.
|
2021-10-18 06:42:42 +00:00
|
|
|
queued_gops: VecDeque<Gop>,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Whether the fully queued GOPs are filling a whole fragment.
|
2022-05-12 10:44:20 +00:00
|
|
|
fragment_filled: bool,
|
2023-01-23 18:43:26 +00:00
|
|
|
/// Whether a whole chunk is queued.
|
|
|
|
chunk_filled: bool,
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Difference between the first DTS and 0 in case of negative DTS
|
2021-10-18 06:42:42 +00:00
|
|
|
dts_offset: Option<gst::ClockTime>,
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Current position (DTS, or PTS for intra-only) to prevent
|
|
|
|
/// timestamps from going backwards when queueing new buffers
|
2022-06-01 17:04:58 +00:00
|
|
|
current_position: gst::ClockTime,
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Mapping between running time and UTC time in ONVIF mode.
|
|
|
|
running_time_utc_time_mapping: Option<(gst::Signed<gst::ClockTime>, gst::ClockTime)>,
|
2022-05-12 10:44:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Default)]
|
|
|
|
struct State {
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Currently configured streams.
|
2022-05-12 10:44:20 +00:00
|
|
|
streams: Vec<Stream>,
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Stream header with ftyp and moov box.
|
|
|
|
///
|
|
|
|
/// Created once we received caps and kept up to date with the caps,
|
|
|
|
/// sent as part of the buffer list for the first fragment.
|
2022-05-12 10:44:20 +00:00
|
|
|
stream_header: Option<gst::Buffer>,
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Sequence number of the current fragment.
|
2022-05-12 10:44:20 +00:00
|
|
|
sequence_number: u32,
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Fragment tracking for mfra box
|
2021-10-18 06:42:42 +00:00
|
|
|
current_offset: u64,
|
|
|
|
fragment_offsets: Vec<super::FragmentOffset>,
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Earliest PTS of the whole stream
|
2021-10-18 06:42:42 +00:00
|
|
|
earliest_pts: Option<gst::ClockTime>,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Current end PTS of the whole stream
|
2021-10-18 06:42:42 +00:00
|
|
|
end_pts: Option<gst::ClockTime>,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Start DTS of the whole stream
|
|
|
|
start_dts: Option<gst::ClockTime>,
|
2022-04-28 15:39:55 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Start PTS of the current fragment
|
2022-05-12 10:44:20 +00:00
|
|
|
fragment_start_pts: Option<gst::ClockTime>,
|
2023-01-23 18:43:26 +00:00
|
|
|
/// Start PTS of the current chunk
|
|
|
|
///
|
|
|
|
/// This is equal to `fragment_start_pts` if the current chunk is the first of a fragment,
|
|
|
|
/// and always equal to `fragment_start_pts` if no `chunk_duration` is set.
|
|
|
|
chunk_start_pts: Option<gst::ClockTime>,
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Additional timeout delay in case GOPs are bigger than the fragment duration
|
2022-10-01 17:52:18 +00:00
|
|
|
timeout_delay: gst::ClockTime,
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
/// If headers (ftyp / moov box) were sent.
|
2022-05-12 10:44:20 +00:00
|
|
|
sent_headers: bool,
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
#[derive(Default)]
|
2021-10-18 06:42:42 +00:00
|
|
|
pub(crate) struct FMP4Mux {
|
|
|
|
state: Mutex<State>,
|
|
|
|
settings: Mutex<Settings>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl FMP4Mux {
|
2022-11-17 17:53:48 +00:00
|
|
|
/// Checks if a buffer is valid according to the stream configuration.
|
|
|
|
fn check_buffer(
|
|
|
|
buffer: &gst::BufferRef,
|
|
|
|
sinkpad: &super::FMP4MuxPad,
|
|
|
|
delta_frames: super::DeltaFrames,
|
|
|
|
) -> Result<(), gst::FlowError> {
|
|
|
|
if delta_frames.requires_dts() && buffer.dts().is_none() {
|
|
|
|
gst::error!(CAT, obj: sinkpad, "Require DTS for video streams");
|
|
|
|
return Err(gst::FlowError::Error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if buffer.pts().is_none() {
|
|
|
|
gst::error!(CAT, obj: sinkpad, "Require timestamped buffers");
|
|
|
|
return Err(gst::FlowError::Error);
|
|
|
|
}
|
|
|
|
|
|
|
|
if delta_frames.intra_only() && buffer.flags().contains(gst::BufferFlags::DELTA_UNIT) {
|
|
|
|
gst::error!(CAT, obj: sinkpad, "Intra-only stream with delta units");
|
|
|
|
return Err(gst::FlowError::Error);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Peek the currently queued buffer on this stream.
|
|
|
|
///
|
|
|
|
/// This also determines the PTS/DTS that is finally going to be used, including
|
|
|
|
/// timestamp conversion to the UTC times in ONVIF mode.
|
2022-11-17 17:53:48 +00:00
|
|
|
fn peek_buffer(
|
|
|
|
&self,
|
2023-01-30 14:27:46 +00:00
|
|
|
stream: &mut Stream,
|
2022-11-17 17:53:48 +00:00
|
|
|
fragment_duration: gst::ClockTime,
|
|
|
|
) -> Result<Option<PreQueuedBuffer>, gst::FlowError> {
|
|
|
|
// If not in ONVIF mode or the mapping is already known and there is a pre-queued buffer
|
|
|
|
// then we can directly return it from here.
|
|
|
|
if self.obj().class().as_ref().variant != super::Variant::ONVIF
|
2023-01-30 14:27:46 +00:00
|
|
|
|| stream.running_time_utc_time_mapping.is_some()
|
2022-11-17 17:53:48 +00:00
|
|
|
{
|
2023-01-30 14:27:46 +00:00
|
|
|
if let Some(pre_queued_buffer) = stream.pre_queue.front() {
|
2022-11-17 17:53:48 +00:00
|
|
|
return Ok(Some(pre_queued_buffer.clone()));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pop buffer here, it will be stored in the pre-queue after calculating its timestamps
|
2023-01-30 14:27:46 +00:00
|
|
|
let mut buffer = match stream.sinkpad.pop_buffer() {
|
2022-11-17 17:53:48 +00:00
|
|
|
None => return Ok(None),
|
|
|
|
Some(buffer) => buffer,
|
|
|
|
};
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
Self::check_buffer(&buffer, &stream.sinkpad, stream.delta_frames)?;
|
2022-11-17 17:53:48 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let segment = match stream.sinkpad.segment().downcast::<gst::ClockTime>().ok() {
|
2022-11-17 17:53:48 +00:00
|
|
|
Some(segment) => segment,
|
|
|
|
None => {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Got buffer before segment");
|
2022-11-17 17:53:48 +00:00
|
|
|
return Err(gst::FlowError::Error);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let pts_position = buffer.pts().unwrap();
|
|
|
|
let duration = buffer.duration();
|
|
|
|
let end_pts_position = duration.opt_add(pts_position).unwrap_or(pts_position);
|
|
|
|
|
|
|
|
let pts = segment
|
|
|
|
.to_running_time_full(pts_position)
|
|
|
|
.ok_or_else(|| {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Couldn't convert PTS to running time");
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::FlowError::Error
|
|
|
|
})?
|
|
|
|
.positive()
|
|
|
|
.unwrap_or_else(|| {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::warning!(CAT, obj: stream.sinkpad, "Negative PTSs are not supported");
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::ClockTime::ZERO
|
|
|
|
});
|
|
|
|
|
|
|
|
let end_pts = segment
|
|
|
|
.to_running_time_full(end_pts_position)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
2023-01-30 14:27:46 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-11-17 17:53:48 +00:00
|
|
|
"Couldn't convert end PTS to running time"
|
|
|
|
);
|
|
|
|
gst::FlowError::Error
|
|
|
|
})?
|
|
|
|
.positive()
|
|
|
|
.unwrap_or_else(|| {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::warning!(CAT, obj: stream.sinkpad, "Negative PTSs are not supported");
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::ClockTime::ZERO
|
|
|
|
});
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let (dts, end_dts) = if !stream.delta_frames.requires_dts() {
|
2022-11-17 17:53:48 +00:00
|
|
|
(None, None)
|
|
|
|
} else {
|
|
|
|
// Negative DTS are handled via the dts_offset and by having negative composition time
|
|
|
|
// offsets in the `trun` box. The smallest DTS here is shifted to zero.
|
|
|
|
let dts_position = buffer.dts().expect("not DTS");
|
|
|
|
let end_dts_position = duration.opt_add(dts_position).unwrap_or(dts_position);
|
|
|
|
|
|
|
|
let dts = segment.to_running_time_full(dts_position).ok_or_else(|| {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Couldn't convert DTS to running time");
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let end_dts = segment
|
|
|
|
.to_running_time_full(end_dts_position)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
2023-01-30 14:27:46 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-11-17 17:53:48 +00:00
|
|
|
"Couldn't convert end DTS to running time"
|
|
|
|
);
|
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let end_dts = std::cmp::max(end_dts, dts);
|
|
|
|
|
|
|
|
(Some(dts), Some(end_dts))
|
|
|
|
};
|
|
|
|
|
|
|
|
// If this is a multi-stream element then we need to update the PTS/DTS positions according
|
|
|
|
// to the output segment, specifically to re-timestamp them with the running time and
|
|
|
|
// adjust for the segment shift to compensate for negative DTS.
|
|
|
|
if !self.obj().class().as_ref().variant.is_single_stream() {
|
|
|
|
let pts_position = pts + SEGMENT_OFFSET;
|
|
|
|
let dts_position = dts.map(|dts| {
|
|
|
|
dts.checked_add_unsigned(SEGMENT_OFFSET)
|
|
|
|
.and_then(|dts| dts.positive())
|
|
|
|
.unwrap_or(gst::ClockTime::ZERO)
|
|
|
|
});
|
|
|
|
|
|
|
|
let buffer = buffer.make_mut();
|
|
|
|
buffer.set_pts(pts_position);
|
|
|
|
buffer.set_dts(dts_position);
|
|
|
|
}
|
|
|
|
|
|
|
|
if self.obj().class().as_ref().variant != super::Variant::ONVIF {
|
|
|
|
// Store in the queue so we don't have to recalculate this all the time
|
2023-01-30 14:27:46 +00:00
|
|
|
stream.pre_queue.push_back(PreQueuedBuffer {
|
2022-11-17 17:53:48 +00:00
|
|
|
buffer,
|
|
|
|
pts,
|
|
|
|
end_pts,
|
|
|
|
dts,
|
|
|
|
end_dts,
|
|
|
|
});
|
2023-01-30 14:27:46 +00:00
|
|
|
} else if let Some(running_time_utc_time_mapping) = stream.running_time_utc_time_mapping {
|
2022-11-17 17:53:48 +00:00
|
|
|
// For ONVIF we need to re-timestamp the buffer with its UTC time.
|
|
|
|
//
|
|
|
|
// After re-timestamping, put the buffer into the pre-queue so re-timestamping only has to
|
|
|
|
// happen once.
|
|
|
|
let utc_time = match get_utc_time_from_buffer(&buffer) {
|
|
|
|
None => {
|
|
|
|
// Calculate from the mapping
|
|
|
|
running_time_to_utc_time(
|
|
|
|
gst::Signed::Positive(pts),
|
2023-01-30 14:27:46 +00:00
|
|
|
running_time_utc_time_mapping,
|
2022-11-17 17:53:48 +00:00
|
|
|
)
|
|
|
|
.ok_or_else(|| {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Stream has negative PTS UTC time");
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::FlowError::Error
|
|
|
|
})?
|
|
|
|
}
|
|
|
|
Some(utc_time) => utc_time,
|
|
|
|
};
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
2023-01-30 14:27:46 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-11-17 17:53:48 +00:00
|
|
|
"Mapped PTS running time {pts} to UTC time {utc_time}"
|
|
|
|
);
|
|
|
|
|
|
|
|
let end_pts_utc_time = running_time_to_utc_time(
|
|
|
|
gst::Signed::Positive(end_pts),
|
|
|
|
(gst::Signed::Positive(pts), utc_time),
|
|
|
|
)
|
|
|
|
.ok_or_else(|| {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Stream has negative end PTS UTC time");
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
|
|
|
|
|
|
|
let (dts_utc_time, end_dts_utc_time) = if let Some(dts) = dts {
|
2023-01-30 14:27:46 +00:00
|
|
|
let dts_utc_time = running_time_to_utc_time(
|
|
|
|
dts,
|
|
|
|
(gst::Signed::Positive(pts), utc_time),
|
|
|
|
)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Stream has negative DTS UTC time");
|
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
2023-01-30 14:27:46 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-11-17 17:53:48 +00:00
|
|
|
"Mapped DTS running time {dts} to UTC time {dts_utc_time}"
|
|
|
|
);
|
|
|
|
|
|
|
|
let end_dts_utc_time = running_time_to_utc_time(
|
|
|
|
end_dts.unwrap(),
|
|
|
|
(gst::Signed::Positive(pts), utc_time),
|
|
|
|
)
|
|
|
|
.ok_or_else(|| {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Stream has negative end DTS UTC time");
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
|
|
|
(
|
|
|
|
Some(gst::Signed::Positive(dts_utc_time)),
|
|
|
|
Some(gst::Signed::Positive(end_dts_utc_time)),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(None, None)
|
|
|
|
};
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
stream.pre_queue.push_back(PreQueuedBuffer {
|
2022-11-17 17:53:48 +00:00
|
|
|
buffer,
|
|
|
|
pts: utc_time,
|
|
|
|
end_pts: end_pts_utc_time,
|
|
|
|
dts: dts_utc_time,
|
|
|
|
end_dts: end_dts_utc_time,
|
|
|
|
});
|
|
|
|
} else {
|
|
|
|
// In ONVIF mode we need to get UTC times for each buffer and synchronize based on that.
|
|
|
|
// Queue up to min(6s, fragment_duration) of data in the very beginning to get the first UTC time and then backdate.
|
2023-01-30 14:27:46 +00:00
|
|
|
if let Some((last, first)) =
|
|
|
|
Option::zip(stream.pre_queue.back(), stream.pre_queue.front())
|
|
|
|
{
|
2022-11-17 17:53:48 +00:00
|
|
|
// Existence of PTS/DTS checked below
|
2023-01-30 14:27:46 +00:00
|
|
|
let (last, first) = if stream.delta_frames.requires_dts() {
|
2022-11-17 17:53:48 +00:00
|
|
|
(last.end_dts.unwrap(), first.end_dts.unwrap())
|
|
|
|
} else {
|
|
|
|
(
|
|
|
|
gst::Signed::Positive(last.end_pts),
|
|
|
|
gst::Signed::Positive(first.end_pts),
|
|
|
|
)
|
|
|
|
};
|
|
|
|
|
|
|
|
let limit = std::cmp::min(gst::ClockTime::from_seconds(6), fragment_duration);
|
|
|
|
if last.saturating_sub(first) > gst::Signed::Positive(limit) {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
2023-01-30 14:27:46 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-11-17 17:53:48 +00:00
|
|
|
"Got no UTC time in the first {limit} of the stream"
|
|
|
|
);
|
|
|
|
return Err(gst::FlowError::Error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let utc_time = match get_utc_time_from_buffer(&buffer) {
|
|
|
|
Some(utc_time) => utc_time,
|
|
|
|
None => {
|
2023-01-30 14:27:46 +00:00
|
|
|
stream.pre_queue.push_back(PreQueuedBuffer {
|
2022-11-17 17:53:48 +00:00
|
|
|
buffer,
|
|
|
|
pts,
|
|
|
|
end_pts,
|
|
|
|
dts,
|
|
|
|
end_dts,
|
|
|
|
});
|
|
|
|
return Err(gst_base::AGGREGATOR_FLOW_NEED_DATA);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let mapping = (gst::Signed::Positive(pts), utc_time);
|
2023-01-30 14:27:46 +00:00
|
|
|
stream.running_time_utc_time_mapping = Some(mapping);
|
2022-11-17 17:53:48 +00:00
|
|
|
|
|
|
|
// Push the buffer onto the pre-queue and re-timestamp it and all other buffers
|
|
|
|
// based on the mapping above once we have an UTC time.
|
2023-01-30 14:27:46 +00:00
|
|
|
stream.pre_queue.push_back(PreQueuedBuffer {
|
2022-11-17 17:53:48 +00:00
|
|
|
buffer,
|
|
|
|
pts,
|
|
|
|
end_pts,
|
|
|
|
dts,
|
|
|
|
end_dts,
|
|
|
|
});
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
for pre_queued_buffer in stream.pre_queue.iter_mut() {
|
|
|
|
let pts_utc_time = running_time_to_utc_time(
|
|
|
|
gst::Signed::Positive(pre_queued_buffer.pts),
|
|
|
|
mapping,
|
|
|
|
)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Stream has negative PTS UTC time");
|
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
2023-01-30 14:27:46 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-11-17 17:53:48 +00:00
|
|
|
"Mapped PTS running time {} to UTC time {pts_utc_time}",
|
|
|
|
pre_queued_buffer.pts,
|
|
|
|
);
|
|
|
|
pre_queued_buffer.pts = pts_utc_time;
|
|
|
|
|
|
|
|
let end_pts_utc_time = running_time_to_utc_time(
|
|
|
|
gst::Signed::Positive(pre_queued_buffer.end_pts),
|
|
|
|
mapping,
|
|
|
|
)
|
|
|
|
.ok_or_else(|| {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Stream has negative end PTS UTC time");
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
|
|
|
pre_queued_buffer.end_pts = end_pts_utc_time;
|
|
|
|
|
|
|
|
if let Some(dts) = pre_queued_buffer.dts {
|
|
|
|
let dts_utc_time = running_time_to_utc_time(dts, mapping).ok_or_else(|| {
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Stream has negative DTS UTC time");
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
2023-01-30 14:27:46 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-11-17 17:53:48 +00:00
|
|
|
"Mapped DTS running time {dts} to UTC time {dts_utc_time}"
|
|
|
|
);
|
|
|
|
pre_queued_buffer.dts = Some(gst::Signed::Positive(dts_utc_time));
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let end_dts_utc_time = running_time_to_utc_time(
|
|
|
|
pre_queued_buffer.end_dts.unwrap(),
|
|
|
|
mapping,
|
|
|
|
)
|
|
|
|
.ok_or_else(|| {
|
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Stream has negative DTS UTC time");
|
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
2022-11-17 17:53:48 +00:00
|
|
|
pre_queued_buffer.end_dts = Some(gst::Signed::Positive(end_dts_utc_time));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Fall through and return the front of the queue
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
Ok(Some(stream.pre_queue.front().unwrap().clone()))
|
2022-11-17 17:53:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Pop the currently queued buffer from this stream.
|
|
|
|
fn pop_buffer(&self, stream: &mut Stream) -> PreQueuedBuffer {
|
2022-11-17 17:53:48 +00:00
|
|
|
// Only allowed to be called after peek was successful so there must be a buffer now
|
|
|
|
// or in ONVIF mode we must also know the mapping now.
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
assert!(!stream.pre_queue.is_empty());
|
2022-11-17 17:53:48 +00:00
|
|
|
if self.obj().class().as_ref().variant == super::Variant::ONVIF {
|
2023-01-30 14:27:46 +00:00
|
|
|
assert!(stream.running_time_utc_time_mapping.is_some());
|
2022-11-17 17:53:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
stream.pre_queue.pop_front().unwrap()
|
2022-11-17 17:53:48 +00:00
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Finds the stream that has the earliest buffer queued.
|
2022-08-17 10:19:08 +00:00
|
|
|
fn find_earliest_stream<'a>(
|
|
|
|
&self,
|
|
|
|
state: &'a mut State,
|
|
|
|
timeout: bool,
|
2022-11-17 17:53:48 +00:00
|
|
|
fragment_duration: gst::ClockTime,
|
2023-01-30 14:27:46 +00:00
|
|
|
) -> Result<Option<&'a mut Stream>, gst::FlowError> {
|
2023-02-08 14:18:22 +00:00
|
|
|
if state
|
|
|
|
.streams
|
|
|
|
.iter()
|
|
|
|
.all(|s| s.fragment_filled || s.chunk_filled)
|
|
|
|
{
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"All streams are currently filled and have to be drained"
|
|
|
|
);
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
|
2022-08-17 10:19:08 +00:00
|
|
|
let mut earliest_stream = None;
|
|
|
|
let mut all_have_data_or_eos = true;
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
for stream in state.streams.iter_mut() {
|
|
|
|
let pre_queued_buffer = match Self::peek_buffer(self, stream, fragment_duration) {
|
2022-11-17 17:53:48 +00:00
|
|
|
Ok(Some(buffer)) => buffer,
|
|
|
|
Ok(None) | Err(gst_base::AGGREGATOR_FLOW_NEED_DATA) => {
|
2022-08-17 10:19:08 +00:00
|
|
|
if stream.sinkpad.is_eos() {
|
2022-10-23 18:46:18 +00:00
|
|
|
gst::trace!(CAT, obj: stream.sinkpad, "Stream is EOS");
|
2022-08-17 10:19:08 +00:00
|
|
|
} else {
|
|
|
|
all_have_data_or_eos = false;
|
2022-10-23 18:46:18 +00:00
|
|
|
gst::trace!(CAT, obj: stream.sinkpad, "Stream has no buffer");
|
2022-08-17 10:19:08 +00:00
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
2022-11-17 17:53:48 +00:00
|
|
|
Err(err) => return Err(err),
|
2022-08-17 10:19:08 +00:00
|
|
|
};
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Stream has running time PTS {} / DTS {} queued",
|
|
|
|
pre_queued_buffer.pts,
|
|
|
|
pre_queued_buffer.dts.display(),
|
|
|
|
);
|
2022-08-17 10:19:08 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
let running_time = if stream.delta_frames.requires_dts() {
|
|
|
|
pre_queued_buffer.dts.unwrap()
|
|
|
|
} else {
|
|
|
|
gst::Signed::Positive(pre_queued_buffer.pts)
|
2022-08-17 10:19:08 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
if earliest_stream
|
|
|
|
.as_ref()
|
2023-01-30 14:27:46 +00:00
|
|
|
.map_or(true, |(_stream, earliest_running_time)| {
|
2022-08-17 10:19:08 +00:00
|
|
|
*earliest_running_time > running_time
|
|
|
|
})
|
|
|
|
{
|
2023-01-30 14:27:46 +00:00
|
|
|
earliest_stream = Some((stream, running_time));
|
2022-08-17 10:19:08 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !timeout && !all_have_data_or_eos {
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
2022-10-09 13:06:59 +00:00
|
|
|
imp: self,
|
2022-08-17 10:19:08 +00:00
|
|
|
"No timeout and not all streams have a buffer or are EOS"
|
|
|
|
);
|
|
|
|
Ok(None)
|
2023-01-30 14:27:46 +00:00
|
|
|
} else if let Some((stream, earliest_running_time)) = earliest_stream {
|
2022-08-17 10:19:08 +00:00
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
2022-10-09 13:06:59 +00:00
|
|
|
imp: self,
|
2022-08-17 10:19:08 +00:00
|
|
|
"Stream {} is earliest stream with running time {}",
|
|
|
|
stream.sinkpad.name(),
|
|
|
|
earliest_running_time
|
|
|
|
);
|
2023-01-30 14:27:46 +00:00
|
|
|
Ok(Some(stream))
|
2022-08-17 10:19:08 +00:00
|
|
|
} else {
|
2022-10-09 13:06:59 +00:00
|
|
|
gst::trace!(CAT, imp: self, "No streams have data queued currently");
|
2022-08-17 10:19:08 +00:00
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Queue incoming buffer as individual GOPs.
|
2022-06-30 13:29:09 +00:00
|
|
|
fn queue_gops(
|
2021-10-18 06:42:42 +00:00
|
|
|
&self,
|
2022-05-12 10:44:20 +00:00
|
|
|
stream: &mut Stream,
|
2022-11-17 17:53:48 +00:00
|
|
|
mut pre_queued_buffer: PreQueuedBuffer,
|
2021-10-18 06:42:42 +00:00
|
|
|
) -> Result<(), gst::FlowError> {
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::trace!(CAT, obj: stream.sinkpad, "Handling buffer {:?}", pre_queued_buffer);
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-10-26 09:16:42 +00:00
|
|
|
let delta_frames = stream.delta_frames;
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
// Enforce monotonically increasing PTS for intra-only streams, and DTS otherwise
|
2022-10-26 09:16:42 +00:00
|
|
|
if !delta_frames.requires_dts() {
|
2022-11-17 17:53:48 +00:00
|
|
|
if pre_queued_buffer.pts < stream.current_position {
|
2022-06-01 17:04:58 +00:00
|
|
|
gst::warning!(
|
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-10-26 09:16:42 +00:00
|
|
|
"Decreasing PTS {} < {}",
|
2022-11-17 17:53:48 +00:00
|
|
|
pre_queued_buffer.pts,
|
2022-06-01 17:04:58 +00:00
|
|
|
stream.current_position,
|
|
|
|
);
|
2022-11-17 17:53:48 +00:00
|
|
|
pre_queued_buffer.pts = stream.current_position;
|
2022-06-01 17:04:58 +00:00
|
|
|
} else {
|
2022-11-17 17:53:48 +00:00
|
|
|
stream.current_position = pre_queued_buffer.pts;
|
2022-06-01 17:04:58 +00:00
|
|
|
}
|
2022-11-17 17:53:48 +00:00
|
|
|
pre_queued_buffer.end_pts =
|
|
|
|
std::cmp::max(pre_queued_buffer.end_pts, pre_queued_buffer.pts);
|
2021-10-18 06:42:42 +00:00
|
|
|
} else {
|
2022-05-12 10:44:20 +00:00
|
|
|
// Negative DTS are handled via the dts_offset and by having negative composition time
|
|
|
|
// offsets in the `trun` box. The smallest DTS here is shifted to zero.
|
2022-11-17 17:53:48 +00:00
|
|
|
let dts = match pre_queued_buffer.dts.unwrap() {
|
|
|
|
gst::Signed::Positive(dts) => {
|
2022-07-18 21:32:45 +00:00
|
|
|
if let Some(dts_offset) = stream.dts_offset {
|
|
|
|
dts + dts_offset
|
|
|
|
} else {
|
|
|
|
dts
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::Signed::Negative(dts) => {
|
2022-05-12 10:44:20 +00:00
|
|
|
if stream.dts_offset.is_none() {
|
|
|
|
stream.dts_offset = Some(dts);
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
let dts_offset = stream.dts_offset.unwrap();
|
2021-10-18 06:42:42 +00:00
|
|
|
if dts > dts_offset {
|
2022-10-23 18:46:18 +00:00
|
|
|
gst::warning!(CAT, obj: stream.sinkpad, "DTS before first DTS");
|
2021-10-18 06:42:42 +00:00
|
|
|
gst::ClockTime::ZERO
|
|
|
|
} else {
|
|
|
|
dts_offset - dts
|
|
|
|
}
|
|
|
|
}
|
2022-07-18 21:32:45 +00:00
|
|
|
};
|
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
let end_dts = match pre_queued_buffer.end_dts.unwrap() {
|
|
|
|
gst::Signed::Positive(dts) => {
|
2022-05-12 10:44:20 +00:00
|
|
|
if let Some(dts_offset) = stream.dts_offset {
|
2021-10-18 06:42:42 +00:00
|
|
|
dts + dts_offset
|
|
|
|
} else {
|
|
|
|
dts
|
|
|
|
}
|
|
|
|
}
|
2022-11-17 17:53:48 +00:00
|
|
|
gst::Signed::Negative(dts) => {
|
2022-05-12 10:44:20 +00:00
|
|
|
let dts_offset = stream.dts_offset.unwrap();
|
2021-10-18 06:42:42 +00:00
|
|
|
if dts > dts_offset {
|
2022-10-23 18:46:18 +00:00
|
|
|
gst::warning!(CAT, obj: stream.sinkpad, "End DTS before first DTS");
|
2021-10-18 06:42:42 +00:00
|
|
|
gst::ClockTime::ZERO
|
|
|
|
} else {
|
|
|
|
dts_offset - dts
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2022-06-01 17:04:58 +00:00
|
|
|
|
|
|
|
// Enforce monotonically increasing DTS for intra-only streams
|
|
|
|
// NOTE: PTS stays the same so this will cause a bigger PTS/DTS difference
|
|
|
|
// FIXME: Is this correct?
|
|
|
|
if dts < stream.current_position {
|
|
|
|
gst::warning!(
|
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-06-01 17:04:58 +00:00
|
|
|
"Decreasing DTS {} < {}",
|
|
|
|
dts,
|
|
|
|
stream.current_position,
|
|
|
|
);
|
2022-11-17 17:53:48 +00:00
|
|
|
pre_queued_buffer.dts = Some(gst::Signed::Positive(stream.current_position));
|
2022-06-01 17:04:58 +00:00
|
|
|
} else {
|
2022-11-17 17:53:48 +00:00
|
|
|
pre_queued_buffer.dts = Some(gst::Signed::Positive(dts));
|
2022-06-01 17:04:58 +00:00
|
|
|
stream.current_position = dts;
|
|
|
|
}
|
2022-11-17 17:53:48 +00:00
|
|
|
pre_queued_buffer.end_dts = Some(gst::Signed::Positive(std::cmp::max(end_dts, dts)));
|
|
|
|
}
|
2022-06-01 17:04:58 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
let PreQueuedBuffer {
|
|
|
|
buffer,
|
|
|
|
pts,
|
|
|
|
end_pts,
|
|
|
|
dts,
|
|
|
|
end_dts,
|
|
|
|
} = pre_queued_buffer;
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
let dts = dts.map(|v| v.positive().unwrap());
|
|
|
|
let end_dts = end_dts.map(|v| v.positive().unwrap());
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
let pts_position = buffer.pts().unwrap();
|
2021-10-18 06:42:42 +00:00
|
|
|
|
|
|
|
if !buffer.flags().contains(gst::BufferFlags::DELTA_UNIT) {
|
2022-02-21 17:43:46 +00:00
|
|
|
gst::debug!(
|
2021-10-18 06:42:42 +00:00
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-05-12 10:44:20 +00:00
|
|
|
"Starting new GOP at PTS {} DTS {} (DTS offset {})",
|
2021-10-18 06:42:42 +00:00
|
|
|
pts,
|
2022-05-12 10:44:20 +00:00
|
|
|
dts.display(),
|
|
|
|
stream.dts_offset.display(),
|
2021-10-18 06:42:42 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
let gop = Gop {
|
|
|
|
start_pts: pts,
|
|
|
|
start_dts: dts,
|
|
|
|
earliest_pts: pts,
|
2022-05-12 10:44:20 +00:00
|
|
|
earliest_pts_position: pts_position,
|
2022-10-26 09:16:42 +00:00
|
|
|
final_earliest_pts: !delta_frames.requires_dts(),
|
2021-10-18 06:42:42 +00:00
|
|
|
end_pts,
|
|
|
|
end_dts,
|
2022-05-12 10:44:20 +00:00
|
|
|
final_end_pts: false,
|
2023-01-23 18:43:26 +00:00
|
|
|
buffers: vec![GopBuffer {
|
|
|
|
buffer,
|
|
|
|
pts,
|
|
|
|
pts_position,
|
|
|
|
dts,
|
|
|
|
}],
|
2021-10-18 06:42:42 +00:00
|
|
|
};
|
2022-05-12 10:44:20 +00:00
|
|
|
stream.queued_gops.push_front(gop);
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
if let Some(prev_gop) = stream.queued_gops.get_mut(1) {
|
2022-02-21 17:43:46 +00:00
|
|
|
gst::debug!(
|
2021-10-18 06:42:42 +00:00
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2021-10-18 06:42:42 +00:00
|
|
|
"Updating previous GOP starting at PTS {} to end PTS {} DTS {}",
|
|
|
|
prev_gop.earliest_pts,
|
|
|
|
pts,
|
|
|
|
dts.display(),
|
|
|
|
);
|
2022-06-01 17:04:58 +00:00
|
|
|
|
|
|
|
prev_gop.end_pts = std::cmp::max(prev_gop.end_pts, pts);
|
|
|
|
prev_gop.end_dts = std::cmp::max(prev_gop.end_dts, dts);
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-10-26 09:16:42 +00:00
|
|
|
if !delta_frames.requires_dts() {
|
2022-05-12 10:44:20 +00:00
|
|
|
prev_gop.final_end_pts = true;
|
|
|
|
}
|
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
if !prev_gop.final_earliest_pts {
|
|
|
|
// Don't bother logging this for intra-only streams as it would be for every
|
|
|
|
// single buffer.
|
2022-10-26 09:16:42 +00:00
|
|
|
if delta_frames.requires_dts() {
|
2022-02-21 17:43:46 +00:00
|
|
|
gst::debug!(
|
2021-10-18 06:42:42 +00:00
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2021-10-18 06:42:42 +00:00
|
|
|
"Previous GOP has final earliest PTS at {}",
|
|
|
|
prev_gop.earliest_pts
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
prev_gop.final_earliest_pts = true;
|
2022-05-12 10:44:20 +00:00
|
|
|
if let Some(prev_prev_gop) = stream.queued_gops.get_mut(2) {
|
|
|
|
prev_prev_gop.final_end_pts = true;
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
2022-05-12 10:44:20 +00:00
|
|
|
} else if let Some(gop) = stream.queued_gops.front_mut() {
|
2022-10-26 09:16:42 +00:00
|
|
|
assert!(!delta_frames.intra_only());
|
2021-10-18 06:42:42 +00:00
|
|
|
|
|
|
|
gop.end_pts = std::cmp::max(gop.end_pts, end_pts);
|
2022-10-26 09:16:42 +00:00
|
|
|
gop.end_dts = gop.end_dts.opt_max(end_dts);
|
2023-01-23 18:43:26 +00:00
|
|
|
gop.buffers.push(GopBuffer {
|
|
|
|
buffer,
|
|
|
|
pts,
|
|
|
|
pts_position,
|
|
|
|
dts,
|
|
|
|
});
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-10-26 09:16:42 +00:00
|
|
|
if delta_frames.requires_dts() {
|
|
|
|
let dts = dts.unwrap();
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-10-26 09:16:42 +00:00
|
|
|
if gop.earliest_pts > pts && !gop.final_earliest_pts {
|
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Updating current GOP earliest PTS from {} to {}",
|
|
|
|
gop.earliest_pts,
|
|
|
|
pts
|
|
|
|
);
|
|
|
|
gop.earliest_pts = pts;
|
|
|
|
gop.earliest_pts_position = pts_position;
|
|
|
|
|
|
|
|
if let Some(prev_gop) = stream.queued_gops.get_mut(1) {
|
|
|
|
if prev_gop.end_pts < pts {
|
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Updating previous GOP starting PTS {} end time from {} to {}",
|
|
|
|
pts,
|
|
|
|
prev_gop.end_pts,
|
|
|
|
pts
|
|
|
|
);
|
|
|
|
prev_gop.end_pts = pts;
|
|
|
|
}
|
2022-06-01 17:04:58 +00:00
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
2022-10-26 09:16:42 +00:00
|
|
|
let gop = stream.queued_gops.front_mut().unwrap();
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-10-26 09:16:42 +00:00
|
|
|
// The earliest PTS is known when the current DTS is bigger or equal to the first
|
|
|
|
// PTS that was observed in this GOP. If there was another frame later that had a
|
|
|
|
// lower PTS then it wouldn't be possible to display it in time anymore, i.e. the
|
|
|
|
// stream would be invalid.
|
|
|
|
if gop.start_pts <= dts && !gop.final_earliest_pts {
|
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"GOP has final earliest PTS at {}",
|
|
|
|
gop.earliest_pts
|
|
|
|
);
|
|
|
|
gop.final_earliest_pts = true;
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-10-26 09:16:42 +00:00
|
|
|
if let Some(prev_gop) = stream.queued_gops.get_mut(1) {
|
|
|
|
prev_gop.final_end_pts = true;
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
2022-02-21 17:43:46 +00:00
|
|
|
gst::warning!(
|
2021-10-18 06:42:42 +00:00
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2021-10-18 06:42:42 +00:00
|
|
|
"Waiting for keyframe at the beginning of the stream"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
if let Some((prev_gop, first_gop)) = Option::zip(
|
|
|
|
stream.queued_gops.iter().find(|gop| gop.final_end_pts),
|
|
|
|
stream.queued_gops.back(),
|
|
|
|
) {
|
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-05-12 10:44:20 +00:00
|
|
|
"Queued full GOPs duration updated to {}",
|
2022-05-19 10:26:34 +00:00
|
|
|
prev_gop.end_pts.saturating_sub(first_gop.earliest_pts),
|
2022-05-12 10:44:20 +00:00
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-05-12 10:44:20 +00:00
|
|
|
"Queued duration updated to {}",
|
|
|
|
Option::zip(stream.queued_gops.front(), stream.queued_gops.back())
|
2022-05-19 10:26:34 +00:00
|
|
|
.map(|(end, start)| end.end_pts.saturating_sub(start.start_pts))
|
2022-05-12 10:44:20 +00:00
|
|
|
.unwrap_or(gst::ClockTime::ZERO)
|
|
|
|
);
|
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Queue buffers from all streams that are not filled for the current fragment yet
|
|
|
|
fn queue_available_buffers(
|
|
|
|
&self,
|
|
|
|
state: &mut State,
|
|
|
|
settings: &Settings,
|
|
|
|
timeout: bool,
|
|
|
|
) -> Result<(), gst::FlowError> {
|
|
|
|
let fragment_start_pts = state.fragment_start_pts;
|
|
|
|
let chunk_start_pts = state.chunk_start_pts;
|
|
|
|
|
|
|
|
// Always take a buffer from the stream with the earliest queued buffer to keep the
|
|
|
|
// fill-level at all sinkpads in sync.
|
|
|
|
while let Some(stream) =
|
|
|
|
self.find_earliest_stream(state, timeout, settings.fragment_duration)?
|
|
|
|
{
|
|
|
|
let pre_queued_buffer = Self::pop_buffer(self, stream);
|
|
|
|
|
|
|
|
// Queue up the buffer and update GOP tracking state
|
|
|
|
self.queue_gops(stream, pre_queued_buffer)?;
|
|
|
|
|
|
|
|
// Check if this stream is filled enough now.
|
|
|
|
self.check_stream_filled(settings, stream, fragment_start_pts, chunk_start_pts, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check if the stream is filled enough for the current chunk / fragment.
|
2023-01-23 18:43:26 +00:00
|
|
|
fn check_stream_filled(
|
|
|
|
&self,
|
|
|
|
settings: &Settings,
|
|
|
|
stream: &mut Stream,
|
|
|
|
fragment_start_pts: Option<gst::ClockTime>,
|
|
|
|
chunk_start_pts: Option<gst::ClockTime>,
|
|
|
|
all_eos: bool,
|
|
|
|
) {
|
|
|
|
// Either both are none or neither
|
|
|
|
let (chunk_start_pts, fragment_start_pts) = match (chunk_start_pts, fragment_start_pts) {
|
|
|
|
(Some(chunk_start_pts), Some(fragment_start_pts)) => {
|
|
|
|
(chunk_start_pts, fragment_start_pts)
|
|
|
|
}
|
|
|
|
_ => return,
|
|
|
|
};
|
|
|
|
|
|
|
|
// Check if this stream is filled enough now.
|
|
|
|
if let Some(chunk_duration) = settings.chunk_duration {
|
|
|
|
// In chunk mode
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Current chunk start {}, current fragment start {}",
|
|
|
|
chunk_start_pts,
|
|
|
|
fragment_start_pts,
|
|
|
|
);
|
|
|
|
|
|
|
|
let chunk_end_pts = chunk_start_pts + chunk_duration;
|
|
|
|
let fragment_end_pts = fragment_start_pts + settings.fragment_duration;
|
|
|
|
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Current chunk end {}, current fragment end {}",
|
|
|
|
chunk_end_pts,
|
|
|
|
fragment_end_pts,
|
|
|
|
);
|
|
|
|
|
|
|
|
// First check if the next split should be the end of a fragment or the end of a chunk.
|
|
|
|
// If both are the same then a fragment split has preference.
|
2023-02-06 17:11:03 +00:00
|
|
|
if fragment_end_pts <= chunk_end_pts {
|
|
|
|
// We can only finish a fragment if a full GOP with final end PTS is queued and it
|
|
|
|
// ends at or after the fragment end PTS.
|
|
|
|
if let Some((gop_idx, gop)) = stream
|
|
|
|
.queued_gops
|
|
|
|
.iter()
|
|
|
|
.enumerate()
|
|
|
|
.find(|(_idx, gop)| gop.final_end_pts || all_eos || stream.sinkpad.is_eos())
|
|
|
|
{
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"GOP {gop_idx} start PTS {}, GOP end PTS {}",
|
|
|
|
gop.start_pts,
|
|
|
|
gop.end_pts,
|
|
|
|
);
|
|
|
|
if gop.end_pts >= fragment_end_pts {
|
|
|
|
gst::debug!(CAT, obj: stream.sinkpad, "Stream queued enough data for finishing this fragment");
|
|
|
|
stream.fragment_filled = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if !stream.fragment_filled {
|
|
|
|
let (gop_idx, gop) = match stream.queued_gops.iter().enumerate().find(
|
|
|
|
|(_idx, gop)| gop.final_earliest_pts || all_eos || stream.sinkpad.is_eos(),
|
|
|
|
) {
|
|
|
|
Some(res) => res,
|
|
|
|
None => {
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Chunked mode and want to finish fragment but no GOP with final end PTS known yet",
|
|
|
|
);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"GOP {gop_idx} start PTS {}, GOP end PTS {} (final {})",
|
|
|
|
gop.start_pts,
|
|
|
|
gop.end_pts,
|
|
|
|
gop.final_end_pts || all_eos || stream.sinkpad.is_eos(),
|
|
|
|
);
|
2023-01-23 18:43:26 +00:00
|
|
|
let last_pts = gop.buffers.last().map(|b| b.pts);
|
|
|
|
|
|
|
|
if gop.end_pts >= chunk_end_pts
|
|
|
|
// only if there's another GOP or at least one further buffer
|
|
|
|
&& (gop_idx > 0
|
|
|
|
|| last_pts.map_or(false, |last_pts| last_pts.saturating_sub(chunk_start_pts) > chunk_duration))
|
|
|
|
{
|
|
|
|
gst::debug!(CAT, obj: stream.sinkpad, "Stream queued enough data for this chunk");
|
|
|
|
stream.chunk_filled = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
let gop = match stream
|
|
|
|
.queued_gops
|
|
|
|
.iter()
|
|
|
|
.find(|gop| gop.final_end_pts || all_eos || stream.sinkpad.is_eos())
|
|
|
|
{
|
|
|
|
Some(gop) => gop,
|
|
|
|
None => {
|
|
|
|
gst::trace!(CAT, obj: stream.sinkpad, "Fragment mode but no GOP with final end PTS known yet");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"GOP start PTS {}, GOP end PTS {}",
|
|
|
|
gop.start_pts,
|
|
|
|
gop.end_pts,
|
|
|
|
);
|
|
|
|
|
|
|
|
// Check if the end of the latest finalized GOP is after the fragment end
|
|
|
|
let fragment_end_pts = fragment_start_pts + settings.fragment_duration;
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
2023-02-08 14:18:22 +00:00
|
|
|
"Current fragment start {}, current fragment end {}",
|
2023-01-23 18:43:26 +00:00
|
|
|
fragment_start_pts,
|
|
|
|
fragment_start_pts + settings.fragment_duration,
|
|
|
|
);
|
|
|
|
|
|
|
|
if gop.end_pts >= fragment_end_pts {
|
|
|
|
gst::debug!(CAT, obj: stream.sinkpad, "Stream queued enough data for this fragment");
|
|
|
|
stream.fragment_filled = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Calculate earliest PTS, i.e. PTS of the very first fragment.
|
|
|
|
///
|
|
|
|
/// This also sends a force-keyunit event for the start of the second fragment.
|
2023-01-23 18:43:26 +00:00
|
|
|
fn calculate_earliest_pts(
|
|
|
|
&self,
|
|
|
|
settings: &Settings,
|
|
|
|
state: &mut State,
|
|
|
|
upstream_events: &mut Vec<(super::FMP4MuxPad, gst::Event)>,
|
|
|
|
all_eos: bool,
|
|
|
|
timeout: bool,
|
|
|
|
) {
|
|
|
|
if state.earliest_pts.is_some() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let fragment_start_pts = state.fragment_start_pts;
|
|
|
|
let chunk_start_pts = state.chunk_start_pts;
|
|
|
|
|
|
|
|
// Calculate the earliest PTS after queueing input if we can now.
|
|
|
|
let mut earliest_pts = None;
|
|
|
|
let mut start_dts = None;
|
|
|
|
for stream in &state.streams {
|
|
|
|
let (stream_earliest_pts, stream_start_dts) = match stream.queued_gops.back() {
|
|
|
|
None => {
|
|
|
|
if !all_eos && !timeout {
|
|
|
|
earliest_pts = None;
|
|
|
|
start_dts = None;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
Some(oldest_gop) => {
|
|
|
|
if !all_eos && !timeout && !oldest_gop.final_earliest_pts {
|
|
|
|
earliest_pts = None;
|
|
|
|
start_dts = None;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
(oldest_gop.earliest_pts, oldest_gop.start_dts)
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if earliest_pts.opt_gt(stream_earliest_pts).unwrap_or(true) {
|
|
|
|
earliest_pts = Some(stream_earliest_pts);
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(stream_start_dts) = stream_start_dts {
|
|
|
|
if start_dts.opt_gt(stream_start_dts).unwrap_or(true) {
|
|
|
|
start_dts = Some(stream_start_dts);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
let earliest_pts = match earliest_pts {
|
|
|
|
Some(earliest_pts) => earliest_pts,
|
|
|
|
None => return,
|
|
|
|
};
|
|
|
|
|
|
|
|
// The earliest PTS is known and as such the start of the first and second fragment.
|
|
|
|
gst::info!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Got earliest PTS {}, start DTS {} (timeout: {timeout}, all eos: {all_eos})",
|
|
|
|
earliest_pts,
|
|
|
|
start_dts.display()
|
|
|
|
);
|
|
|
|
state.earliest_pts = Some(earliest_pts);
|
|
|
|
state.start_dts = start_dts;
|
|
|
|
state.fragment_start_pts = Some(earliest_pts);
|
|
|
|
state.chunk_start_pts = Some(earliest_pts);
|
|
|
|
|
|
|
|
// Now send force-keyunit events for the second fragment start.
|
|
|
|
let fku_time = earliest_pts + settings.fragment_duration;
|
|
|
|
for stream in &state.streams {
|
|
|
|
let current_position = stream.current_position;
|
|
|
|
|
|
|
|
// In case of ONVIF this needs to be converted back from UTC time to
|
|
|
|
// the stream's running time
|
|
|
|
let (fku_time, current_position) =
|
|
|
|
if self.obj().class().as_ref().variant == super::Variant::ONVIF {
|
|
|
|
(
|
|
|
|
if let Some(fku_time) = utc_time_to_running_time(
|
|
|
|
fku_time,
|
|
|
|
stream.running_time_utc_time_mapping.unwrap(),
|
|
|
|
) {
|
|
|
|
fku_time
|
|
|
|
} else {
|
|
|
|
continue;
|
|
|
|
},
|
|
|
|
utc_time_to_running_time(
|
|
|
|
current_position,
|
|
|
|
stream.running_time_utc_time_mapping.unwrap(),
|
|
|
|
),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(fku_time, Some(current_position))
|
|
|
|
};
|
|
|
|
|
|
|
|
let fku_time =
|
|
|
|
if current_position.map_or(false, |current_position| current_position > fku_time) {
|
|
|
|
gst::warning!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Sending first force-keyunit event late for running time {} at {}",
|
|
|
|
fku_time,
|
|
|
|
current_position.display(),
|
|
|
|
);
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Sending first force-keyunit event for running time {}",
|
|
|
|
fku_time,
|
|
|
|
);
|
|
|
|
Some(fku_time)
|
|
|
|
};
|
|
|
|
|
|
|
|
let fku = gst_video::UpstreamForceKeyUnitEvent::builder()
|
|
|
|
.running_time(fku_time)
|
|
|
|
.all_headers(true)
|
|
|
|
.build();
|
|
|
|
|
|
|
|
upstream_events.push((stream.sinkpad.clone(), fku));
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if any of the streams are already filled enough for the first chunk/fragment.
|
|
|
|
for stream in &mut state.streams {
|
|
|
|
self.check_stream_filled(
|
|
|
|
settings,
|
|
|
|
stream,
|
|
|
|
fragment_start_pts,
|
|
|
|
chunk_start_pts,
|
|
|
|
all_eos,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Drain buffers from a single stream.
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
fn drain_buffers_one_stream(
|
|
|
|
&self,
|
|
|
|
settings: &Settings,
|
|
|
|
stream: &mut Stream,
|
|
|
|
timeout: bool,
|
|
|
|
all_eos: bool,
|
|
|
|
fragment_start_pts: gst::ClockTime,
|
|
|
|
chunk_start_pts: gst::ClockTime,
|
|
|
|
chunk_end_pts: Option<gst::ClockTime>,
|
|
|
|
fragment_start: bool,
|
|
|
|
fragment_filled: bool,
|
|
|
|
) -> Result<Vec<Gop>, gst::FlowError> {
|
|
|
|
assert!(
|
|
|
|
timeout
|
|
|
|
|| all_eos
|
|
|
|
|| stream.sinkpad.is_eos()
|
|
|
|
|| stream.queued_gops.get(1).map(|gop| gop.final_earliest_pts) == Some(true)
|
|
|
|
|| settings.chunk_duration.is_some()
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut gops = Vec::with_capacity(stream.queued_gops.len());
|
|
|
|
if stream.queued_gops.is_empty() {
|
|
|
|
return Ok(gops);
|
|
|
|
}
|
|
|
|
|
|
|
|
// For the first stream drain as much as necessary and decide the end of this
|
|
|
|
// fragment or chunk, for all other streams drain up to that position.
|
|
|
|
|
|
|
|
if let Some(chunk_duration) = settings.chunk_duration {
|
|
|
|
// Chunk mode
|
|
|
|
|
|
|
|
let dequeue_end_pts = if let Some(chunk_end_pts) = chunk_end_pts {
|
|
|
|
// Not the first stream
|
|
|
|
chunk_end_pts
|
|
|
|
} else if fragment_filled {
|
|
|
|
// Fragment is filled, so only dequeue everything until the latest GOP
|
|
|
|
fragment_start_pts + settings.fragment_duration
|
|
|
|
} else {
|
|
|
|
// Fragment is not filled and we either have a full chunk or timeout
|
|
|
|
chunk_start_pts + chunk_duration
|
|
|
|
};
|
|
|
|
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
2023-02-01 12:56:03 +00:00
|
|
|
"Draining from {} up to end PTS {} / duration {}",
|
|
|
|
chunk_start_pts,
|
2023-01-30 14:27:46 +00:00
|
|
|
dequeue_end_pts,
|
2023-02-01 12:56:03 +00:00
|
|
|
dequeue_end_pts.saturating_sub(chunk_start_pts),
|
2023-01-30 14:27:46 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
while let Some(gop) = stream.queued_gops.back() {
|
|
|
|
// If this should be the last chunk of a fragment then only drain every
|
|
|
|
// finished GOP until the chunk end PTS. If there is no finished GOP for
|
|
|
|
// this stream (it would be not the first stream then), then drain
|
|
|
|
// everything up to the chunk end PTS.
|
|
|
|
//
|
|
|
|
// If this chunk is not the last chunk of a fragment then simply dequeue
|
|
|
|
// everything up to the chunk end PTS.
|
|
|
|
if fragment_filled {
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Fragment filled, current GOP start {} end {} (final {})",
|
|
|
|
gop.start_pts, gop.end_pts,
|
|
|
|
gop.final_end_pts || all_eos || stream.sinkpad.is_eos()
|
|
|
|
);
|
|
|
|
|
2023-02-06 17:11:03 +00:00
|
|
|
// If we have a final GOP then include it as long as it's either
|
|
|
|
// - ending before the dequeue end PTS
|
|
|
|
// - no GOPs were dequeued yet and this is the first stream
|
|
|
|
//
|
|
|
|
// The second case would happen if no GOP ends between the last chunk of the
|
|
|
|
// fragment and the fragment duration.
|
2023-01-30 14:27:46 +00:00
|
|
|
if (gop.final_end_pts || all_eos || stream.sinkpad.is_eos())
|
2023-02-06 17:11:03 +00:00
|
|
|
&& (gop.end_pts <= dequeue_end_pts
|
|
|
|
|| (gops.is_empty() && chunk_end_pts.is_none()))
|
2023-01-30 14:27:46 +00:00
|
|
|
{
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Pushing whole GOP",
|
|
|
|
);
|
|
|
|
gops.push(stream.queued_gops.pop_back().unwrap());
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if !gops.is_empty() {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2023-02-06 17:11:03 +00:00
|
|
|
// Otherwise if this is the first stream and no full GOP is queued then we need
|
|
|
|
// to wait for more data.
|
|
|
|
//
|
|
|
|
// If this is not the first stream then take an incomplete GOP.
|
|
|
|
if chunk_end_pts.is_none() {
|
|
|
|
gst::info!(CAT, obj: stream.sinkpad, "Don't have a full GOP at the end of a fragment");
|
|
|
|
return Err(gst_base::AGGREGATOR_FLOW_NEED_DATA);
|
|
|
|
} else {
|
|
|
|
gst::info!(CAT, obj: stream.sinkpad, "Including incomplete GOP");
|
|
|
|
}
|
2023-01-30 14:27:46 +00:00
|
|
|
} else {
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Chunk filled, current GOP start {} end {} (final {})",
|
|
|
|
gop.start_pts, gop.end_pts,
|
|
|
|
gop.final_end_pts || all_eos || stream.sinkpad.is_eos()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
if gop.end_pts <= dequeue_end_pts
|
|
|
|
&& (gop.final_end_pts || all_eos || stream.sinkpad.is_eos())
|
|
|
|
{
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Pushing whole GOP",
|
|
|
|
);
|
|
|
|
gops.push(stream.queued_gops.pop_back().unwrap());
|
|
|
|
} else if gop.start_pts >= dequeue_end_pts
|
|
|
|
|| (!gop.final_earliest_pts && !all_eos && !stream.sinkpad.is_eos())
|
|
|
|
{
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"GOP starts after chunk end",
|
|
|
|
);
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
let gop = stream.queued_gops.back_mut().unwrap();
|
|
|
|
|
|
|
|
let start_pts = gop.start_pts;
|
|
|
|
let start_dts = gop.start_dts;
|
|
|
|
let earliest_pts = gop.earliest_pts;
|
|
|
|
let earliest_pts_position = gop.earliest_pts_position;
|
|
|
|
|
|
|
|
let mut split_index = None;
|
|
|
|
|
|
|
|
for (idx, buffer) in gop.buffers.iter().enumerate() {
|
|
|
|
if buffer.pts >= dequeue_end_pts {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
split_index = Some(idx);
|
|
|
|
}
|
|
|
|
let split_index = match split_index {
|
|
|
|
Some(split_index) => split_index,
|
|
|
|
None => {
|
|
|
|
// We have B frames and the first buffer of this GOP is too far
|
|
|
|
// in the future.
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"First buffer of GOP too far in the future",
|
|
|
|
);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// The last buffer of the GOP starts before the chunk end but ends
|
|
|
|
// after the end. We still take it here and remove the whole GOP.
|
|
|
|
if split_index == gop.buffers.len() - 1 {
|
|
|
|
if gop.final_end_pts || all_eos || stream.sinkpad.is_eos() {
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Pushing whole GOP",
|
|
|
|
);
|
|
|
|
gops.push(stream.queued_gops.pop_back().unwrap());
|
|
|
|
} else {
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Can't push whole GOP as it's not final yet",
|
|
|
|
);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
let mut buffers = mem::take(&mut gop.buffers);
|
|
|
|
// Contains all buffers from `split_index + 1` to the end
|
|
|
|
gop.buffers = buffers.split_off(split_index + 1);
|
|
|
|
|
|
|
|
gop.start_pts = gop.buffers[0].pts;
|
|
|
|
gop.start_dts = gop.buffers[0].dts;
|
|
|
|
gop.earliest_pts_position = gop.buffers[0].pts_position;
|
|
|
|
gop.earliest_pts = gop.buffers[0].pts;
|
|
|
|
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Splitting GOP and keeping PTS {}",
|
|
|
|
gop.buffers[0].pts,
|
|
|
|
);
|
|
|
|
|
|
|
|
let queue_gop = Gop {
|
|
|
|
start_pts,
|
|
|
|
start_dts,
|
|
|
|
earliest_pts,
|
|
|
|
final_earliest_pts: true,
|
|
|
|
end_pts: gop.start_pts,
|
|
|
|
final_end_pts: true,
|
|
|
|
end_dts: gop.start_dts,
|
|
|
|
earliest_pts_position,
|
|
|
|
buffers,
|
|
|
|
};
|
|
|
|
|
|
|
|
gops.push(queue_gop);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if fragment_start {
|
|
|
|
if let Some(first_buffer) = gops.first().and_then(|gop| gop.buffers.first()) {
|
|
|
|
if first_buffer
|
|
|
|
.buffer
|
|
|
|
.flags()
|
|
|
|
.contains(gst::BufferFlags::DELTA_UNIT)
|
|
|
|
{
|
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "First buffer of a new fragment is not a keyframe");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Non-chunk mode
|
|
|
|
|
|
|
|
let dequeue_end_pts = if let Some(chunk_end_pts) = chunk_end_pts {
|
|
|
|
// Not the first stream
|
|
|
|
chunk_end_pts
|
|
|
|
} else {
|
|
|
|
fragment_start_pts + settings.fragment_duration
|
|
|
|
};
|
|
|
|
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
2023-02-01 12:56:03 +00:00
|
|
|
"Draining from {} up to end PTS {} / duration {}",
|
|
|
|
chunk_start_pts,
|
2023-01-30 14:27:46 +00:00
|
|
|
dequeue_end_pts,
|
2023-02-01 12:56:03 +00:00
|
|
|
dequeue_end_pts.saturating_sub(chunk_start_pts),
|
2023-01-30 14:27:46 +00:00
|
|
|
);
|
|
|
|
|
|
|
|
while let Some(gop) = stream.queued_gops.back() {
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Current GOP start {} end {} (final {})",
|
|
|
|
gop.start_pts, gop.end_pts,
|
|
|
|
gop.final_end_pts || all_eos || stream.sinkpad.is_eos()
|
|
|
|
);
|
|
|
|
|
|
|
|
// If this GOP is not complete then we can't pop it yet.
|
|
|
|
//
|
|
|
|
// If there was no complete GOP at all yet then it might be bigger than the
|
|
|
|
// fragment duration. In this case we might not be able to handle the latency
|
|
|
|
// requirements in a live pipeline.
|
|
|
|
if !gop.final_end_pts && !all_eos && !stream.sinkpad.is_eos() {
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Not including GOP without final end PTS",
|
|
|
|
);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
// If this GOP starts after the fragment end then don't dequeue it yet unless this is
|
|
|
|
// the first stream and no GOPs were dequeued at all yet. This would mean that the
|
|
|
|
// GOP is bigger than the fragment duration.
|
|
|
|
if !all_eos
|
|
|
|
&& gop.end_pts > dequeue_end_pts
|
|
|
|
&& (chunk_end_pts.is_some() || !gops.is_empty())
|
|
|
|
{
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Not including GOP yet",
|
|
|
|
);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Pushing complete GOP",
|
|
|
|
);
|
|
|
|
gops.push(stream.queued_gops.pop_back().unwrap());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(gops)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Flatten all GOPs, remove any gaps and calculate durations.
|
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
fn flatten_gops(
|
|
|
|
&self,
|
|
|
|
idx: usize,
|
|
|
|
stream: &Stream,
|
|
|
|
gops: Vec<Gop>,
|
|
|
|
) -> Result<
|
|
|
|
Option<(
|
|
|
|
// All buffers of the GOPs without gaps
|
|
|
|
VecDeque<super::Buffer>,
|
|
|
|
// Earliest PTS
|
|
|
|
gst::ClockTime,
|
|
|
|
// Earliest PTS position
|
|
|
|
gst::ClockTime,
|
|
|
|
// End PTS
|
|
|
|
gst::ClockTime,
|
|
|
|
// Start DTS
|
|
|
|
Option<gst::ClockTime>,
|
|
|
|
// Start DTS position
|
|
|
|
Option<gst::ClockTime>,
|
|
|
|
// End DTS
|
|
|
|
Option<gst::ClockTime>,
|
|
|
|
)>,
|
|
|
|
gst::FlowError,
|
|
|
|
> {
|
|
|
|
let last_gop = gops.last().unwrap();
|
|
|
|
let end_pts = last_gop.end_pts;
|
|
|
|
let end_dts = last_gop.end_dts;
|
|
|
|
|
|
|
|
let mut gop_buffers = Vec::with_capacity(gops.iter().map(|g| g.buffers.len()).sum());
|
|
|
|
gop_buffers.extend(gops.into_iter().flat_map(|gop| gop.buffers.into_iter()));
|
|
|
|
|
|
|
|
// Then calculate durations for all of the buffers and get rid of any GAP buffers in
|
|
|
|
// the process.
|
|
|
|
// Also calculate the earliest PTS / start DTS here, which needs to consider GAP
|
|
|
|
// buffers too.
|
|
|
|
let mut buffers = VecDeque::with_capacity(gop_buffers.len());
|
|
|
|
let mut earliest_pts = None;
|
|
|
|
let mut earliest_pts_position = None;
|
|
|
|
let mut start_dts = None;
|
|
|
|
let mut start_dts_position = None;
|
|
|
|
|
|
|
|
let mut gop_buffers = gop_buffers.into_iter();
|
|
|
|
while let Some(buffer) = gop_buffers.next() {
|
|
|
|
// If this is a GAP buffer then skip it. Its duration was already considered
|
|
|
|
// below for the non-GAP buffer preceding it, and if there was none then the
|
|
|
|
// chunk start would be adjusted accordingly for this stream.
|
|
|
|
if buffer.buffer.flags().contains(gst::BufferFlags::GAP)
|
|
|
|
&& buffer.buffer.flags().contains(gst::BufferFlags::DROPPABLE)
|
|
|
|
&& buffer.buffer.size() == 0
|
|
|
|
{
|
|
|
|
gst::trace!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Skipping gap buffer {buffer:?}",
|
|
|
|
);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if earliest_pts.map_or(true, |earliest_pts| buffer.pts < earliest_pts) {
|
|
|
|
earliest_pts = Some(buffer.pts);
|
|
|
|
}
|
|
|
|
if earliest_pts_position.map_or(true, |earliest_pts_position| {
|
|
|
|
buffer.buffer.pts().unwrap() < earliest_pts_position
|
|
|
|
}) {
|
|
|
|
earliest_pts_position = Some(buffer.buffer.pts().unwrap());
|
|
|
|
}
|
|
|
|
if stream.delta_frames.requires_dts() && start_dts.is_none() {
|
|
|
|
start_dts = Some(buffer.dts.unwrap());
|
|
|
|
}
|
|
|
|
if stream.delta_frames.requires_dts() && start_dts_position.is_none() {
|
|
|
|
start_dts_position = Some(buffer.buffer.dts().unwrap());
|
|
|
|
}
|
|
|
|
|
|
|
|
let timestamp = if !stream.delta_frames.requires_dts() {
|
|
|
|
buffer.pts
|
|
|
|
} else {
|
|
|
|
buffer.dts.unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
// Take as end timestamp the timestamp of the next non-GAP buffer
|
|
|
|
let end_timestamp = match gop_buffers.as_slice().iter().find(|buf| {
|
|
|
|
!buf.buffer.flags().contains(gst::BufferFlags::GAP)
|
|
|
|
|| !buf.buffer.flags().contains(gst::BufferFlags::DROPPABLE)
|
|
|
|
|| buf.buffer.size() != 0
|
|
|
|
}) {
|
|
|
|
Some(buffer) => {
|
|
|
|
if !stream.delta_frames.requires_dts() {
|
|
|
|
buffer.pts
|
|
|
|
} else {
|
|
|
|
buffer.dts.unwrap()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
if !stream.delta_frames.requires_dts() {
|
|
|
|
end_pts
|
|
|
|
} else {
|
|
|
|
end_dts.unwrap()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Timestamps are enforced to monotonically increase when queueing buffers
|
|
|
|
let duration = end_timestamp
|
|
|
|
.checked_sub(timestamp)
|
|
|
|
.expect("Timestamps going backwards");
|
|
|
|
|
|
|
|
let composition_time_offset = if !stream.delta_frames.requires_dts() {
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
let pts = buffer.pts;
|
|
|
|
let dts = buffer.dts.unwrap();
|
|
|
|
|
|
|
|
Some(i64::try_from((pts - dts).nseconds()).map_err(|_| {
|
|
|
|
gst::error!(CAT, obj: stream.sinkpad, "Too big PTS/DTS difference");
|
|
|
|
gst::FlowError::Error
|
|
|
|
})?)
|
|
|
|
};
|
|
|
|
|
|
|
|
buffers.push_back(Buffer {
|
|
|
|
idx,
|
|
|
|
buffer: buffer.buffer,
|
|
|
|
timestamp,
|
|
|
|
duration,
|
|
|
|
composition_time_offset,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
if buffers.is_empty() {
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
|
|
|
|
let earliest_pts = earliest_pts.unwrap();
|
|
|
|
let earliest_pts_position = earliest_pts_position.unwrap();
|
|
|
|
if stream.delta_frames.requires_dts() {
|
|
|
|
assert!(start_dts.is_some());
|
|
|
|
assert!(start_dts_position.is_some());
|
|
|
|
}
|
|
|
|
let start_dts = start_dts;
|
|
|
|
let start_dts_position = start_dts_position;
|
|
|
|
|
|
|
|
Ok(Some((
|
|
|
|
buffers,
|
|
|
|
earliest_pts,
|
|
|
|
earliest_pts_position,
|
|
|
|
end_pts,
|
|
|
|
start_dts,
|
|
|
|
start_dts_position,
|
|
|
|
end_dts,
|
|
|
|
)))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Drain buffers from all streams for the current chunk.
|
|
|
|
///
|
|
|
|
/// Also removes gap buffers, calculates buffer durations and various timestamps relevant for
|
|
|
|
/// the current chunk.
|
2022-09-28 16:35:47 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
fn drain_buffers(
|
2021-10-18 06:42:42 +00:00
|
|
|
&self,
|
|
|
|
state: &mut State,
|
|
|
|
settings: &Settings,
|
2022-05-17 12:05:19 +00:00
|
|
|
timeout: bool,
|
2023-01-30 14:27:46 +00:00
|
|
|
all_eos: bool,
|
2022-09-28 16:35:47 +00:00
|
|
|
) -> Result<
|
|
|
|
(
|
2022-10-01 17:52:18 +00:00
|
|
|
// Drained streams
|
2022-11-07 17:47:31 +00:00
|
|
|
Vec<(super::FragmentHeaderStream, VecDeque<Buffer>)>,
|
2022-10-01 17:52:18 +00:00
|
|
|
// Minimum earliest PTS position of all streams
|
2022-09-28 16:35:47 +00:00
|
|
|
Option<gst::ClockTime>,
|
2022-10-01 17:52:18 +00:00
|
|
|
// Minimum earliest PTS of all streams
|
2022-09-28 16:35:47 +00:00
|
|
|
Option<gst::ClockTime>,
|
2022-10-01 17:52:18 +00:00
|
|
|
// Minimum start DTS position of all streams (if any stream has DTS)
|
2022-09-28 16:35:47 +00:00
|
|
|
Option<gst::ClockTime>,
|
2023-01-23 18:43:26 +00:00
|
|
|
// End PTS of this drained fragment or chunk, i.e. start PTS of the next fragment or
|
|
|
|
// chunk
|
2022-09-28 16:35:47 +00:00
|
|
|
Option<gst::ClockTime>,
|
2023-01-23 18:43:26 +00:00
|
|
|
// With these drained buffers the current fragment is filled
|
|
|
|
bool,
|
|
|
|
// These buffers make the start of a new fragment
|
|
|
|
bool,
|
2022-09-28 16:35:47 +00:00
|
|
|
),
|
|
|
|
gst::FlowError,
|
|
|
|
> {
|
|
|
|
let mut drained_streams = Vec::with_capacity(state.streams.len());
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
let mut min_earliest_pts_position = None;
|
|
|
|
let mut min_earliest_pts = None;
|
|
|
|
let mut min_start_dts_position = None;
|
2023-01-23 18:43:26 +00:00
|
|
|
let mut chunk_end_pts = None;
|
|
|
|
|
|
|
|
// In fragment mode, each chunk is a full fragment. Otherwise, in chunk mode,
|
|
|
|
// this fragment is filled if it is filled for the first non-EOS stream
|
|
|
|
let fragment_filled = settings.chunk_duration.is_none()
|
|
|
|
|| state
|
|
|
|
.streams
|
|
|
|
.iter()
|
|
|
|
.find(|s| !s.sinkpad.is_eos())
|
|
|
|
.map(|s| s.fragment_filled)
|
|
|
|
== Some(true);
|
2022-10-01 17:52:18 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let fragment_start_pts = state.fragment_start_pts.unwrap();
|
|
|
|
let chunk_start_pts = state.chunk_start_pts.unwrap();
|
|
|
|
let fragment_start = fragment_start_pts == chunk_start_pts;
|
|
|
|
|
2022-10-01 17:52:18 +00:00
|
|
|
// The first stream decides how much can be dequeued, if anything at all.
|
|
|
|
//
|
2023-01-23 18:43:26 +00:00
|
|
|
// In chunk mode:
|
|
|
|
// If more than the fragment duration has passed until the latest GOPs earliest PTS then
|
|
|
|
// the fragment is considered filled and all GOPs until that GOP are drained. The next
|
|
|
|
// chunk would start a new fragment, and would start with the keyframe at the beginning
|
|
|
|
// of that latest GOP.
|
|
|
|
//
|
|
|
|
// Otherwise if more than a chunk duration is currently queued in GOPs of which the
|
|
|
|
// earliest PTS is known then drain everything up to that position. If nothing can be
|
|
|
|
// drained at all then advance the timeout by 1s until something can be dequeued.
|
|
|
|
//
|
|
|
|
// Otherwise:
|
|
|
|
// All complete GOPs (or at EOS everything) up to the fragment duration will be dequeued
|
|
|
|
// but on timeout in live pipelines it might happen that the first stream does not have a
|
|
|
|
// complete GOP queued. In that case nothing is dequeued for any of the streams and the
|
|
|
|
// timeout is advanced by 1s until at least one complete GOP can be dequeued.
|
2022-10-01 17:52:18 +00:00
|
|
|
//
|
|
|
|
// If the first stream is already EOS then the next stream that is not EOS yet will be
|
|
|
|
// taken in its place.
|
|
|
|
gst::info!(
|
|
|
|
CAT,
|
2022-10-09 13:06:59 +00:00
|
|
|
imp: self,
|
2023-01-23 18:43:26 +00:00
|
|
|
"Starting to drain at {} (fragment start {}, fragment end {}, chunk start {}, chunk end {})",
|
2023-01-30 14:27:46 +00:00
|
|
|
chunk_start_pts,
|
|
|
|
fragment_start_pts,
|
|
|
|
fragment_start_pts + settings.fragment_duration,
|
|
|
|
chunk_start_pts.display(),
|
|
|
|
settings.chunk_duration.map(|duration| chunk_start_pts + duration).display(),
|
2022-10-01 17:52:18 +00:00
|
|
|
);
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2022-06-30 13:29:09 +00:00
|
|
|
for (idx, stream) in state.streams.iter_mut().enumerate() {
|
2022-11-07 17:47:31 +00:00
|
|
|
let stream_settings = stream.sinkpad.imp().settings.lock().unwrap().clone();
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let gops = self.drain_buffers_one_stream(
|
|
|
|
settings,
|
|
|
|
stream,
|
|
|
|
timeout,
|
|
|
|
all_eos,
|
|
|
|
fragment_start_pts,
|
|
|
|
chunk_start_pts,
|
|
|
|
chunk_end_pts,
|
|
|
|
fragment_start,
|
|
|
|
fragment_filled,
|
|
|
|
)?;
|
2022-05-12 10:44:20 +00:00
|
|
|
stream.fragment_filled = false;
|
2023-01-23 18:43:26 +00:00
|
|
|
stream.chunk_filled = false;
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2023-01-23 18:43:26 +00:00
|
|
|
// If we don't have a next chunk start PTS then this is the first stream as above.
|
|
|
|
if chunk_end_pts.is_none() {
|
2022-10-01 17:52:18 +00:00
|
|
|
if let Some(last_gop) = gops.last() {
|
|
|
|
// Dequeued something so let's take the end PTS of the last GOP
|
2023-01-23 18:43:26 +00:00
|
|
|
chunk_end_pts = Some(last_gop.end_pts);
|
2022-10-01 17:52:18 +00:00
|
|
|
gst::info!(
|
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2023-01-23 18:43:26 +00:00
|
|
|
"Draining up to PTS {} for this chunk",
|
2022-10-01 17:52:18 +00:00
|
|
|
last_gop.end_pts,
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
// If nothing was dequeued for the first stream then this is OK if we're at
|
|
|
|
// EOS: we just consider the next stream as first stream then.
|
2023-01-30 14:27:46 +00:00
|
|
|
if all_eos || stream.sinkpad.is_eos() {
|
2022-10-01 17:52:18 +00:00
|
|
|
// This is handled below generally if nothing was dequeued
|
|
|
|
} else {
|
2023-01-23 18:43:26 +00:00
|
|
|
if settings.chunk_duration.is_some() {
|
2023-02-06 17:11:03 +00:00
|
|
|
gst::debug!(
|
2023-01-23 18:43:26 +00:00
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Don't have anything to drain for the first stream on timeout in a live pipeline",
|
|
|
|
);
|
|
|
|
} else {
|
|
|
|
gst::warning!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Don't have a complete GOP for the first stream on timeout in a live pipeline",
|
|
|
|
);
|
|
|
|
}
|
2022-10-01 17:52:18 +00:00
|
|
|
|
|
|
|
// In this case we advance the timeout by 1s and hope that things are
|
|
|
|
// better then.
|
|
|
|
return Err(gst_base::AGGREGATOR_FLOW_NEED_DATA);
|
|
|
|
}
|
|
|
|
}
|
2023-01-30 14:27:46 +00:00
|
|
|
} else if all_eos {
|
2022-11-17 17:53:48 +00:00
|
|
|
if let Some(last_gop) = gops.last() {
|
2023-01-23 18:43:26 +00:00
|
|
|
if chunk_end_pts.map_or(true, |chunk_end_pts| chunk_end_pts < last_gop.end_pts)
|
2022-11-17 17:53:48 +00:00
|
|
|
{
|
2023-01-23 18:43:26 +00:00
|
|
|
chunk_end_pts = Some(last_gop.end_pts);
|
2022-11-17 17:53:48 +00:00
|
|
|
}
|
|
|
|
}
|
2022-10-01 17:52:18 +00:00
|
|
|
}
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
if gops.is_empty() {
|
2022-05-31 14:25:13 +00:00
|
|
|
gst::info!(
|
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-05-31 14:25:13 +00:00
|
|
|
"Draining no buffers",
|
|
|
|
);
|
|
|
|
|
2022-11-07 17:47:31 +00:00
|
|
|
drained_streams.push((
|
|
|
|
super::FragmentHeaderStream {
|
|
|
|
caps: stream.caps.clone(),
|
|
|
|
start_time: None,
|
|
|
|
delta_frames: stream.delta_frames,
|
|
|
|
trak_timescale: stream_settings.trak_timescale,
|
|
|
|
},
|
|
|
|
VecDeque::new(),
|
|
|
|
));
|
|
|
|
|
2022-09-28 16:35:47 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
2023-01-23 18:43:26 +00:00
|
|
|
assert!(chunk_end_pts.is_some());
|
2022-10-01 17:52:18 +00:00
|
|
|
|
2022-09-28 16:35:47 +00:00
|
|
|
if let Some((prev_gop, first_gop)) = Option::zip(
|
|
|
|
stream.queued_gops.iter().find(|gop| gop.final_end_pts),
|
|
|
|
stream.queued_gops.back(),
|
|
|
|
) {
|
2022-05-12 10:44:20 +00:00
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-09-28 16:35:47 +00:00
|
|
|
"Queued full GOPs duration updated to {}",
|
|
|
|
prev_gop.end_pts.saturating_sub(first_gop.earliest_pts),
|
2022-05-12 10:44:20 +00:00
|
|
|
);
|
2022-09-28 16:35:47 +00:00
|
|
|
}
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2022-09-28 16:35:47 +00:00
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
2022-10-23 18:46:18 +00:00
|
|
|
obj: stream.sinkpad,
|
2022-09-28 16:35:47 +00:00
|
|
|
"Queued duration updated to {}",
|
|
|
|
Option::zip(stream.queued_gops.front(), stream.queued_gops.back())
|
|
|
|
.map(|(end, start)| end.end_pts.saturating_sub(start.start_pts))
|
|
|
|
.unwrap_or(gst::ClockTime::ZERO)
|
|
|
|
);
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-12-16 14:54:38 +00:00
|
|
|
// First flatten all GOPs into a single `Vec`
|
2023-01-30 14:27:46 +00:00
|
|
|
let buffers = self.flatten_gops(idx, stream, gops)?;
|
|
|
|
let (
|
|
|
|
buffers,
|
|
|
|
earliest_pts,
|
|
|
|
earliest_pts_position,
|
|
|
|
end_pts,
|
|
|
|
start_dts,
|
|
|
|
start_dts_position,
|
|
|
|
_end_dts,
|
|
|
|
) = match buffers {
|
|
|
|
Some(res) => res,
|
|
|
|
None => {
|
|
|
|
gst::info!(
|
2022-12-16 14:54:38 +00:00
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
2023-01-30 14:27:46 +00:00
|
|
|
"Drained only gap buffers",
|
2022-12-16 14:54:38 +00:00
|
|
|
);
|
2022-06-30 13:29:09 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
drained_streams.push((
|
|
|
|
super::FragmentHeaderStream {
|
|
|
|
caps: stream.caps.clone(),
|
|
|
|
start_time: None,
|
|
|
|
delta_frames: stream.delta_frames,
|
|
|
|
trak_timescale: stream_settings.trak_timescale,
|
|
|
|
},
|
|
|
|
VecDeque::new(),
|
|
|
|
));
|
2022-12-16 14:54:38 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
2022-12-16 14:54:38 +00:00
|
|
|
|
|
|
|
gst::info!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Draining {} worth of buffers starting at PTS {} DTS {}, DTS offset {}",
|
|
|
|
end_pts.saturating_sub(earliest_pts),
|
|
|
|
earliest_pts,
|
|
|
|
start_dts.display(),
|
|
|
|
stream.dts_offset.display(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let start_time = if !stream.delta_frames.requires_dts() {
|
|
|
|
earliest_pts
|
|
|
|
} else {
|
|
|
|
start_dts.unwrap()
|
|
|
|
};
|
|
|
|
|
|
|
|
if min_earliest_pts.opt_gt(earliest_pts).unwrap_or(true) {
|
|
|
|
min_earliest_pts = Some(earliest_pts);
|
|
|
|
}
|
|
|
|
if min_earliest_pts_position
|
|
|
|
.opt_gt(earliest_pts_position)
|
|
|
|
.unwrap_or(true)
|
|
|
|
{
|
|
|
|
min_earliest_pts_position = Some(earliest_pts_position);
|
|
|
|
}
|
|
|
|
if let Some(start_dts_position) = start_dts_position {
|
|
|
|
if min_start_dts_position
|
|
|
|
.opt_gt(start_dts_position)
|
|
|
|
.unwrap_or(true)
|
|
|
|
{
|
|
|
|
min_start_dts_position = Some(start_dts_position);
|
2022-05-12 10:44:20 +00:00
|
|
|
}
|
|
|
|
}
|
2022-09-28 16:35:47 +00:00
|
|
|
|
|
|
|
drained_streams.push((
|
2022-11-07 17:47:31 +00:00
|
|
|
super::FragmentHeaderStream {
|
|
|
|
caps: stream.caps.clone(),
|
|
|
|
start_time: Some(start_time),
|
2022-10-26 09:16:42 +00:00
|
|
|
delta_frames: stream.delta_frames,
|
2022-11-07 17:47:31 +00:00
|
|
|
trak_timescale: stream_settings.trak_timescale,
|
|
|
|
},
|
2022-09-28 16:35:47 +00:00
|
|
|
buffers,
|
|
|
|
));
|
2022-05-12 10:44:20 +00:00
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-09-28 16:35:47 +00:00
|
|
|
Ok((
|
|
|
|
drained_streams,
|
|
|
|
min_earliest_pts_position,
|
|
|
|
min_earliest_pts,
|
|
|
|
min_start_dts_position,
|
2023-01-23 18:43:26 +00:00
|
|
|
chunk_end_pts,
|
|
|
|
fragment_filled,
|
|
|
|
fragment_start,
|
2022-09-28 16:35:47 +00:00
|
|
|
))
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Interleave drained buffers of each stream for this chunk according to the settings.
|
2022-09-28 16:35:47 +00:00
|
|
|
#[allow(clippy::type_complexity)]
|
|
|
|
fn interleave_buffers(
|
|
|
|
&self,
|
|
|
|
settings: &Settings,
|
2022-11-07 17:47:31 +00:00
|
|
|
mut drained_streams: Vec<(super::FragmentHeaderStream, VecDeque<Buffer>)>,
|
|
|
|
) -> Result<(Vec<Buffer>, Vec<super::FragmentHeaderStream>), gst::FlowError> {
|
2022-05-12 10:44:20 +00:00
|
|
|
let mut interleaved_buffers =
|
2022-11-07 17:47:31 +00:00
|
|
|
Vec::with_capacity(drained_streams.iter().map(|(_, bufs)| bufs.len()).sum());
|
|
|
|
while let Some((_idx, (_, bufs))) =
|
|
|
|
drained_streams
|
|
|
|
.iter_mut()
|
|
|
|
.enumerate()
|
|
|
|
.min_by(|(a_idx, (_, a)), (b_idx, (_, b))| {
|
|
|
|
let (a, b) = match (a.front(), b.front()) {
|
|
|
|
(None, None) => return std::cmp::Ordering::Equal,
|
|
|
|
(None, _) => return std::cmp::Ordering::Greater,
|
|
|
|
(_, None) => return std::cmp::Ordering::Less,
|
|
|
|
(Some(a), Some(b)) => (a, b),
|
|
|
|
};
|
2022-05-17 12:25:28 +00:00
|
|
|
|
2022-11-07 17:47:31 +00:00
|
|
|
match a.timestamp.cmp(&b.timestamp) {
|
|
|
|
std::cmp::Ordering::Equal => a_idx.cmp(b_idx),
|
|
|
|
cmp => cmp,
|
|
|
|
}
|
|
|
|
})
|
|
|
|
{
|
2022-09-28 16:35:47 +00:00
|
|
|
let start_time = match bufs.front() {
|
2022-05-17 12:25:28 +00:00
|
|
|
None => {
|
|
|
|
// No more buffers now
|
|
|
|
break;
|
|
|
|
}
|
2022-06-30 13:29:09 +00:00
|
|
|
Some(buf) => buf.timestamp,
|
2022-05-17 12:25:28 +00:00
|
|
|
};
|
|
|
|
let mut current_end_time = start_time;
|
|
|
|
let mut dequeued_bytes = 0;
|
|
|
|
|
|
|
|
while settings
|
|
|
|
.interleave_bytes
|
2022-07-15 21:29:45 +00:00
|
|
|
.opt_ge(dequeued_bytes)
|
|
|
|
.unwrap_or(true)
|
|
|
|
&& settings
|
|
|
|
.interleave_time
|
|
|
|
.opt_ge(current_end_time.saturating_sub(start_time))
|
|
|
|
.unwrap_or(true)
|
2022-05-17 12:25:28 +00:00
|
|
|
{
|
2022-09-28 16:35:47 +00:00
|
|
|
if let Some(buffer) = bufs.pop_front() {
|
2022-06-30 13:29:09 +00:00
|
|
|
current_end_time = buffer.timestamp + buffer.duration;
|
2022-05-17 12:25:28 +00:00
|
|
|
dequeued_bytes += buffer.buffer.size() as u64;
|
|
|
|
interleaved_buffers.push(buffer);
|
|
|
|
} else {
|
|
|
|
// No buffers left in this stream, go to next stream
|
|
|
|
break;
|
2022-05-12 10:44:20 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-09-28 16:35:47 +00:00
|
|
|
// All buffers should be consumed now
|
2022-11-07 17:47:31 +00:00
|
|
|
assert!(drained_streams.iter().all(|(_, bufs)| bufs.is_empty()));
|
2022-05-17 12:25:28 +00:00
|
|
|
|
2022-09-28 16:35:47 +00:00
|
|
|
let streams = drained_streams
|
|
|
|
.into_iter()
|
2022-11-07 17:47:31 +00:00
|
|
|
.map(|(stream, _)| stream)
|
2022-09-28 16:35:47 +00:00
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
Ok((interleaved_buffers, streams))
|
|
|
|
}
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Request a force-keyunit event for the start of the next fragment.
|
|
|
|
///
|
|
|
|
/// This is called whenever the last chunk of a fragment is pushed out.
|
|
|
|
///
|
|
|
|
/// `chunk_end_pts` gives the time of the previously drained chunk, which
|
|
|
|
/// ideally should be lower than the next fragment starts PTS.
|
|
|
|
fn request_force_keyunit_event(
|
|
|
|
&self,
|
|
|
|
state: &State,
|
|
|
|
settings: &Settings,
|
|
|
|
upstream_events: &mut Vec<(super::FMP4MuxPad, gst::Event)>,
|
|
|
|
chunk_end_pts: gst::ClockTime,
|
|
|
|
) {
|
|
|
|
let fku_time = chunk_end_pts + settings.fragment_duration;
|
|
|
|
|
|
|
|
for stream in &state.streams {
|
|
|
|
let current_position = stream.current_position;
|
|
|
|
|
|
|
|
// In case of ONVIF this needs to be converted back from UTC time to
|
|
|
|
// the stream's running time
|
|
|
|
let (fku_time, current_position) =
|
|
|
|
if self.obj().class().as_ref().variant == super::Variant::ONVIF {
|
|
|
|
(
|
|
|
|
if let Some(fku_time) = utc_time_to_running_time(
|
|
|
|
fku_time,
|
|
|
|
stream.running_time_utc_time_mapping.unwrap(),
|
|
|
|
) {
|
|
|
|
fku_time
|
|
|
|
} else {
|
|
|
|
continue;
|
|
|
|
},
|
|
|
|
utc_time_to_running_time(
|
|
|
|
current_position,
|
|
|
|
stream.running_time_utc_time_mapping.unwrap(),
|
|
|
|
),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
(fku_time, Some(current_position))
|
|
|
|
};
|
|
|
|
|
|
|
|
let fku_time =
|
|
|
|
if current_position.map_or(false, |current_position| current_position > fku_time) {
|
|
|
|
gst::warning!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Sending force-keyunit event late for running time {} at {}",
|
|
|
|
fku_time,
|
|
|
|
current_position.display(),
|
|
|
|
);
|
|
|
|
None
|
|
|
|
} else {
|
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
|
|
|
obj: stream.sinkpad,
|
|
|
|
"Sending force-keyunit event for running time {}",
|
|
|
|
fku_time,
|
|
|
|
);
|
|
|
|
Some(fku_time)
|
|
|
|
};
|
|
|
|
|
|
|
|
let fku = gst_video::UpstreamForceKeyUnitEvent::builder()
|
|
|
|
.running_time(fku_time)
|
|
|
|
.all_headers(true)
|
|
|
|
.build();
|
|
|
|
|
|
|
|
upstream_events.push((stream.sinkpad.clone(), fku));
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-01-23 18:43:26 +00:00
|
|
|
/// Fills upstream events as needed and returns the caps the first time draining can happen.
|
|
|
|
///
|
|
|
|
/// If it returns `(_, None)` then there's currently nothing to drain anymore.
|
2023-01-30 14:27:46 +00:00
|
|
|
fn drain_one_chunk(
|
2022-09-28 16:35:47 +00:00
|
|
|
&self,
|
|
|
|
state: &mut State,
|
|
|
|
settings: &Settings,
|
|
|
|
timeout: bool,
|
|
|
|
at_eos: bool,
|
2022-11-07 17:47:31 +00:00
|
|
|
upstream_events: &mut Vec<(super::FMP4MuxPad, gst::Event)>,
|
2022-09-28 16:35:47 +00:00
|
|
|
) -> Result<(Option<gst::Caps>, Option<gst::BufferList>), gst::FlowError> {
|
|
|
|
if at_eos {
|
2022-10-09 13:06:59 +00:00
|
|
|
gst::info!(CAT, imp: self, "Draining at EOS");
|
2022-09-28 16:35:47 +00:00
|
|
|
} else if timeout {
|
2022-10-09 13:06:59 +00:00
|
|
|
gst::info!(CAT, imp: self, "Draining at timeout");
|
2022-09-28 16:35:47 +00:00
|
|
|
} else {
|
|
|
|
for stream in &state.streams {
|
2023-01-23 18:43:26 +00:00
|
|
|
if !stream.chunk_filled && !stream.fragment_filled && !stream.sinkpad.is_eos() {
|
2022-09-28 16:35:47 +00:00
|
|
|
return Ok((None, None));
|
|
|
|
}
|
|
|
|
}
|
2023-01-23 18:43:26 +00:00
|
|
|
gst::info!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Draining because all streams have enough data queued"
|
|
|
|
);
|
2022-09-28 16:35:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Collect all buffers and their timing information that are to be drained right now.
|
|
|
|
let (
|
2022-12-16 14:54:38 +00:00
|
|
|
drained_streams,
|
2022-09-28 16:35:47 +00:00
|
|
|
min_earliest_pts_position,
|
|
|
|
min_earliest_pts,
|
|
|
|
min_start_dts_position,
|
2023-01-23 18:43:26 +00:00
|
|
|
chunk_end_pts,
|
|
|
|
fragment_filled,
|
|
|
|
fragment_start,
|
2022-10-09 13:06:59 +00:00
|
|
|
) = self.drain_buffers(state, settings, timeout, at_eos)?;
|
2022-09-28 16:35:47 +00:00
|
|
|
|
|
|
|
// Create header now if it was not created before and return the caps
|
|
|
|
let mut caps = None;
|
|
|
|
if state.stream_header.is_none() {
|
2022-10-09 13:06:59 +00:00
|
|
|
let (_, new_caps) = self.update_header(state, settings, false)?.unwrap();
|
2022-09-28 16:35:47 +00:00
|
|
|
caps = Some(new_caps);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Interleave buffers according to the settings into a single vec
|
2022-11-17 17:53:48 +00:00
|
|
|
let (mut interleaved_buffers, mut streams) =
|
2022-10-09 13:06:59 +00:00
|
|
|
self.interleave_buffers(settings, drained_streams)?;
|
2022-09-28 16:35:47 +00:00
|
|
|
|
2023-01-25 09:59:52 +00:00
|
|
|
// Offset stream start time to start at 0 in ONVIF mode, or if 'offset-to-zero' is enabled,
|
|
|
|
// instead of using the UTC time verbatim. This would be used for the tfdt box later.
|
2022-11-17 17:53:48 +00:00
|
|
|
// FIXME: Should this use the original DTS-or-PTS running time instead?
|
|
|
|
// That might be negative though!
|
2023-01-25 09:59:52 +00:00
|
|
|
if self.obj().class().as_ref().variant == super::Variant::ONVIF || settings.offset_to_zero {
|
2022-11-17 17:53:48 +00:00
|
|
|
let offset = if let Some(start_dts) = state.start_dts {
|
|
|
|
std::cmp::min(start_dts, state.earliest_pts.unwrap())
|
|
|
|
} else {
|
|
|
|
state.earliest_pts.unwrap()
|
|
|
|
};
|
|
|
|
for stream in &mut streams {
|
|
|
|
if let Some(start_time) = stream.start_time {
|
|
|
|
stream.start_time = Some(start_time.checked_sub(offset).unwrap());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
if interleaved_buffers.is_empty() {
|
2022-10-01 17:52:18 +00:00
|
|
|
assert!(at_eos);
|
2023-01-30 14:27:46 +00:00
|
|
|
return Ok((caps, None));
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// If there are actual buffers to output then create headers as needed and create a
|
|
|
|
// bufferlist for all buffers that have to be output.
|
|
|
|
let min_earliest_pts_position = min_earliest_pts_position.unwrap();
|
|
|
|
let min_earliest_pts = min_earliest_pts.unwrap();
|
|
|
|
let chunk_end_pts = chunk_end_pts.unwrap();
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-02-06 17:11:03 +00:00
|
|
|
gst::debug!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
concat!(
|
|
|
|
"Draining chunk (fragment start: {} fragment end: {}) ",
|
|
|
|
"from PTS {} to {}"
|
|
|
|
),
|
|
|
|
fragment_start,
|
|
|
|
fragment_filled,
|
|
|
|
min_earliest_pts,
|
|
|
|
chunk_end_pts,
|
|
|
|
);
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let mut fmp4_header = None;
|
|
|
|
if !state.sent_headers {
|
|
|
|
let mut buffer = state.stream_header.as_ref().unwrap().copy();
|
|
|
|
{
|
|
|
|
let buffer = buffer.get_mut().unwrap();
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
buffer.set_pts(min_earliest_pts_position);
|
|
|
|
buffer.set_dts(min_start_dts_position);
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// Header is DISCONT|HEADER
|
|
|
|
buffer.set_flags(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER);
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
fmp4_header = Some(buffer);
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
state.sent_headers = true;
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// TODO: Write prft boxes before moof
|
|
|
|
// TODO: Write sidx boxes before moof and rewrite once offsets are known
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// First sequence number must be 1
|
|
|
|
if state.sequence_number == 0 {
|
|
|
|
state.sequence_number = 1;
|
|
|
|
}
|
|
|
|
let sequence_number = state.sequence_number;
|
|
|
|
// If this is the last chunk of a fragment then increment the sequence number for the
|
|
|
|
// start of the next fragment.
|
|
|
|
if fragment_filled {
|
|
|
|
state.sequence_number += 1;
|
|
|
|
}
|
|
|
|
let (mut fmp4_fragment_header, moof_offset) =
|
|
|
|
boxes::create_fmp4_fragment_header(super::FragmentHeaderConfiguration {
|
|
|
|
variant: self.obj().class().as_ref().variant,
|
|
|
|
sequence_number,
|
|
|
|
chunk: !fragment_start,
|
|
|
|
streams: streams.as_slice(),
|
|
|
|
buffers: interleaved_buffers.as_slice(),
|
|
|
|
})
|
|
|
|
.map_err(|err| {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Failed to create FMP4 fragment header: {}",
|
|
|
|
err
|
2022-05-12 10:44:20 +00:00
|
|
|
);
|
2023-01-30 14:27:46 +00:00
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
|
|
|
|
|
|
|
{
|
|
|
|
let buffer = fmp4_fragment_header.get_mut().unwrap();
|
|
|
|
buffer.set_pts(min_earliest_pts_position);
|
|
|
|
buffer.set_dts(min_start_dts_position);
|
|
|
|
buffer.set_duration(chunk_end_pts.checked_sub(min_earliest_pts));
|
|
|
|
|
|
|
|
// Fragment and chunk header is HEADER
|
|
|
|
buffer.set_flags(gst::BufferFlags::HEADER);
|
|
|
|
// Chunk header is DELTA_UNIT
|
|
|
|
if !fragment_start {
|
|
|
|
buffer.set_flags(gst::BufferFlags::DELTA_UNIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Copy metas from the first actual buffer to the fragment header. This allows
|
|
|
|
// getting things like the reference timestamp meta or the timecode meta to identify
|
|
|
|
// the fragment.
|
|
|
|
let _ = interleaved_buffers[0].buffer.copy_into(
|
|
|
|
buffer,
|
|
|
|
gst::BufferCopyFlags::META,
|
|
|
|
0,
|
|
|
|
None,
|
|
|
|
);
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let moof_offset = state.current_offset
|
|
|
|
+ fmp4_header.as_ref().map(|h| h.size()).unwrap_or(0) as u64
|
|
|
|
+ moof_offset;
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let buffers_len = interleaved_buffers.len();
|
|
|
|
for (idx, buffer) in interleaved_buffers.iter_mut().enumerate() {
|
|
|
|
// Fix up buffer flags, all other buffers are DELTA_UNIT
|
|
|
|
let buffer_ref = buffer.buffer.make_mut();
|
|
|
|
buffer_ref.unset_flags(gst::BufferFlags::all());
|
|
|
|
buffer_ref.set_flags(gst::BufferFlags::DELTA_UNIT);
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// Set the marker flag for the last buffer of the segment
|
|
|
|
if idx == buffers_len - 1 {
|
|
|
|
buffer_ref.set_flags(gst::BufferFlags::MARKER);
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
2023-01-30 14:27:46 +00:00
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let buffer_list = fmp4_header
|
|
|
|
.into_iter()
|
|
|
|
.chain(Some(fmp4_fragment_header))
|
|
|
|
.chain(interleaved_buffers.into_iter().map(|buffer| buffer.buffer))
|
|
|
|
.inspect(|b| {
|
|
|
|
state.current_offset += b.size() as u64;
|
|
|
|
})
|
|
|
|
.collect::<gst::BufferList>();
|
|
|
|
|
|
|
|
if settings.write_mfra && fragment_start {
|
|
|
|
// Write mfra only for the main stream on fragment starts, and if there are no
|
|
|
|
// buffers for the main stream in this segment then don't write anything.
|
|
|
|
if let Some(super::FragmentHeaderStream {
|
|
|
|
start_time: Some(start_time),
|
|
|
|
..
|
|
|
|
}) = streams.get(0)
|
|
|
|
{
|
|
|
|
state.fragment_offsets.push(super::FragmentOffset {
|
|
|
|
time: *start_time,
|
|
|
|
offset: moof_offset,
|
|
|
|
});
|
2022-05-12 10:44:20 +00:00
|
|
|
}
|
2023-01-30 14:27:46 +00:00
|
|
|
}
|
2022-10-01 17:52:18 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
state.end_pts = Some(chunk_end_pts);
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// Update for the start PTS of the next fragment / chunk
|
2022-05-31 14:25:29 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
if fragment_filled {
|
|
|
|
state.fragment_start_pts = Some(chunk_end_pts);
|
|
|
|
gst::info!(CAT, imp: self, "Starting new fragment at {}", chunk_end_pts,);
|
|
|
|
} else {
|
|
|
|
gst::info!(CAT, imp: self, "Starting new chunk at {}", chunk_end_pts,);
|
|
|
|
}
|
|
|
|
state.chunk_start_pts = Some(chunk_end_pts);
|
2023-01-23 18:43:26 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// If the current fragment is filled we already have the next fragment's start
|
|
|
|
// keyframe and can request the following one.
|
|
|
|
if fragment_filled {
|
|
|
|
self.request_force_keyunit_event(state, settings, upstream_events, chunk_end_pts);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Reset timeout delay now that we've output an actual fragment or chunk
|
|
|
|
state.timeout_delay = gst::ClockTime::ZERO;
|
|
|
|
|
|
|
|
// TODO: Write edit list at EOS
|
|
|
|
// TODO: Rewrite bitrates at EOS
|
|
|
|
|
|
|
|
Ok((caps, Some(buffer_list)))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Drain all chunks that can currently be drained.
|
|
|
|
///
|
|
|
|
/// On error the `caps`, `buffers` or `upstream_events` can contain data of already finished
|
|
|
|
/// chunks that were complete before the error.
|
|
|
|
#[allow(clippy::too_many_arguments)]
|
|
|
|
fn drain(
|
|
|
|
&self,
|
|
|
|
state: &mut State,
|
|
|
|
settings: &Settings,
|
|
|
|
all_eos: bool,
|
|
|
|
mut timeout: bool,
|
|
|
|
caps: &mut Option<gst::Caps>,
|
|
|
|
buffers: &mut Vec<gst::BufferList>,
|
|
|
|
upstream_events: &mut Vec<(super::FMP4MuxPad, gst::Event)>,
|
|
|
|
) -> Result<(), gst::FlowError> {
|
|
|
|
// Loop as long as new chunks can be drained.
|
|
|
|
loop {
|
|
|
|
// If enough GOPs were queued, drain and create the output fragment or chunk
|
|
|
|
let res = self.drain_one_chunk(state, settings, timeout, all_eos, upstream_events);
|
|
|
|
let mut buffer_list = match res {
|
|
|
|
Ok((new_caps, buffer_list)) => {
|
|
|
|
if caps.is_none() {
|
|
|
|
*caps = new_caps;
|
|
|
|
}
|
|
|
|
|
|
|
|
buffer_list
|
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
if err == gst_base::AGGREGATOR_FLOW_NEED_DATA {
|
|
|
|
assert!(!all_eos);
|
2023-02-06 17:11:03 +00:00
|
|
|
gst::debug!(CAT, imp: self, "Need more data");
|
2023-01-30 14:27:46 +00:00
|
|
|
state.timeout_delay += 1.seconds();
|
|
|
|
}
|
2022-11-17 17:53:48 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
return Err(err);
|
|
|
|
}
|
|
|
|
};
|
2022-11-17 17:53:48 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// If nothing can't be drained anymore then break the loop, and if all streams are
|
|
|
|
// EOS add the footers.
|
|
|
|
if buffer_list.is_none() {
|
|
|
|
if settings.write_mfra && all_eos {
|
|
|
|
gst::debug!(CAT, imp: self, "Writing mfra box");
|
|
|
|
match boxes::create_mfra(&state.streams[0].caps, &state.fragment_offsets) {
|
|
|
|
Ok(mut mfra) => {
|
|
|
|
{
|
|
|
|
let mfra = mfra.get_mut().unwrap();
|
|
|
|
// mfra is DELTA_UNIT like other buffers
|
|
|
|
mfra.set_flags(gst::BufferFlags::DELTA_UNIT);
|
|
|
|
}
|
|
|
|
|
|
|
|
if buffer_list.is_none() {
|
|
|
|
buffer_list = Some(gst::BufferList::new_sized(1));
|
|
|
|
}
|
|
|
|
buffer_list.as_mut().unwrap().get_mut().unwrap().add(mfra);
|
|
|
|
buffers.extend(buffer_list);
|
|
|
|
}
|
|
|
|
Err(err) => {
|
|
|
|
gst::error!(CAT, imp: self, "Failed to create mfra box: {}", err);
|
|
|
|
}
|
|
|
|
}
|
2023-01-23 18:43:26 +00:00
|
|
|
}
|
2023-01-30 14:27:46 +00:00
|
|
|
|
|
|
|
break Ok(());
|
2022-10-01 17:52:18 +00:00
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// Otherwise extend the list of bufferlists and check again if something can be
|
|
|
|
// drained.
|
|
|
|
buffers.extend(buffer_list);
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// Only the first iteration is considered a timeout.
|
|
|
|
timeout = false;
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let fragment_start_pts = state.fragment_start_pts;
|
|
|
|
let chunk_start_pts = state.chunk_start_pts;
|
|
|
|
for stream in &mut state.streams {
|
|
|
|
// Check if this stream is still filled enough now.
|
|
|
|
self.check_stream_filled(
|
|
|
|
settings,
|
|
|
|
stream,
|
|
|
|
fragment_start_pts,
|
|
|
|
chunk_start_pts,
|
|
|
|
all_eos,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// And try draining a fragment again
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Create all streams.
|
2022-10-09 13:06:59 +00:00
|
|
|
fn create_streams(&self, state: &mut State) -> Result<(), gst::FlowError> {
|
|
|
|
for pad in self
|
2022-10-23 20:03:22 +00:00
|
|
|
.obj()
|
2022-05-12 10:44:20 +00:00
|
|
|
.sink_pads()
|
|
|
|
.into_iter()
|
2022-11-07 17:47:31 +00:00
|
|
|
.map(|pad| pad.downcast::<super::FMP4MuxPad>().unwrap())
|
2022-05-12 10:44:20 +00:00
|
|
|
{
|
|
|
|
let caps = match pad.current_caps() {
|
|
|
|
Some(caps) => caps,
|
|
|
|
None => {
|
2022-10-23 18:46:18 +00:00
|
|
|
gst::warning!(CAT, obj: pad, "Skipping pad without caps");
|
2022-05-12 10:44:20 +00:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-10-23 18:46:18 +00:00
|
|
|
gst::info!(CAT, obj: pad, "Configuring caps {:?}", caps);
|
2022-05-12 10:44:20 +00:00
|
|
|
|
|
|
|
let s = caps.structure(0).unwrap();
|
|
|
|
|
2022-10-26 09:16:42 +00:00
|
|
|
let mut delta_frames = DeltaFrames::IntraOnly;
|
2022-05-12 10:44:20 +00:00
|
|
|
match s.name() {
|
|
|
|
"video/x-h264" | "video/x-h265" => {
|
|
|
|
if !s.has_field_with_type("codec_data", gst::Buffer::static_type()) {
|
2022-10-23 18:46:18 +00:00
|
|
|
gst::error!(CAT, obj: pad, "Received caps without codec_data");
|
2022-05-12 10:44:20 +00:00
|
|
|
return Err(gst::FlowError::NotNegotiated);
|
|
|
|
}
|
2022-10-26 09:16:42 +00:00
|
|
|
delta_frames = DeltaFrames::Bidirectional;
|
2022-05-12 10:44:20 +00:00
|
|
|
}
|
2023-02-02 13:02:44 +00:00
|
|
|
"video/x-vp8" => {
|
|
|
|
delta_frames = DeltaFrames::PredictiveOnly;
|
|
|
|
}
|
2022-10-26 09:16:42 +00:00
|
|
|
"video/x-vp9" => {
|
|
|
|
if !s.has_field_with_type("colorimetry", str::static_type()) {
|
|
|
|
gst::error!(CAT, obj: pad, "Received caps without colorimetry");
|
|
|
|
return Err(gst::FlowError::NotNegotiated);
|
|
|
|
}
|
|
|
|
delta_frames = DeltaFrames::PredictiveOnly;
|
2022-05-13 08:45:01 +00:00
|
|
|
}
|
2023-02-02 14:12:31 +00:00
|
|
|
"video/x-av1" => {
|
|
|
|
delta_frames = DeltaFrames::PredictiveOnly;
|
|
|
|
}
|
2022-10-26 09:16:42 +00:00
|
|
|
"image/jpeg" => (),
|
2022-05-12 10:44:20 +00:00
|
|
|
"audio/mpeg" => {
|
|
|
|
if !s.has_field_with_type("codec_data", gst::Buffer::static_type()) {
|
2022-10-23 18:46:18 +00:00
|
|
|
gst::error!(CAT, obj: pad, "Received caps without codec_data");
|
2022-05-12 10:44:20 +00:00
|
|
|
return Err(gst::FlowError::NotNegotiated);
|
|
|
|
}
|
2022-05-13 09:13:54 +00:00
|
|
|
}
|
2022-11-03 14:01:35 +00:00
|
|
|
"audio/x-opus" => {
|
|
|
|
if let Some(header) = s
|
|
|
|
.get::<gst::ArrayRef>("streamheader")
|
|
|
|
.ok()
|
|
|
|
.and_then(|a| a.get(0).and_then(|v| v.get::<gst::Buffer>().ok()))
|
|
|
|
{
|
|
|
|
if gst_pbutils::codec_utils_opus_parse_header(&header, None).is_err() {
|
|
|
|
gst::error!(CAT, obj: pad, "Received invalid Opus header");
|
|
|
|
return Err(gst::FlowError::NotNegotiated);
|
|
|
|
}
|
|
|
|
} else if gst_pbutils::codec_utils_opus_parse_caps(&caps, None).is_err() {
|
|
|
|
gst::error!(CAT, obj: pad, "Received invalid Opus caps");
|
|
|
|
return Err(gst::FlowError::NotNegotiated);
|
|
|
|
}
|
|
|
|
}
|
2022-10-26 09:16:42 +00:00
|
|
|
"audio/x-alaw" | "audio/x-mulaw" => (),
|
|
|
|
"audio/x-adpcm" => (),
|
|
|
|
"application/x-onvif-metadata" => (),
|
2022-05-12 10:44:20 +00:00
|
|
|
_ => unreachable!(),
|
|
|
|
}
|
|
|
|
|
|
|
|
state.streams.push(Stream {
|
|
|
|
sinkpad: pad,
|
|
|
|
caps,
|
2022-10-26 09:16:42 +00:00
|
|
|
delta_frames,
|
2022-11-17 17:53:48 +00:00
|
|
|
pre_queue: VecDeque::new(),
|
2022-05-12 10:44:20 +00:00
|
|
|
queued_gops: VecDeque::new(),
|
|
|
|
fragment_filled: false,
|
2023-01-23 18:43:26 +00:00
|
|
|
chunk_filled: false,
|
2022-05-12 10:44:20 +00:00
|
|
|
dts_offset: None,
|
2022-06-01 17:04:58 +00:00
|
|
|
current_position: gst::ClockTime::ZERO,
|
2022-11-17 17:53:48 +00:00
|
|
|
running_time_utc_time_mapping: None,
|
2022-05-12 10:44:20 +00:00
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
if state.streams.is_empty() {
|
2022-10-09 13:06:59 +00:00
|
|
|
gst::error!(CAT, imp: self, "No streams available");
|
2022-05-12 10:44:20 +00:00
|
|
|
return Err(gst::FlowError::Error);
|
|
|
|
}
|
|
|
|
|
2022-05-13 09:13:54 +00:00
|
|
|
// Sort video streams first and then audio streams and then metadata streams, and each group by pad name.
|
2022-05-12 10:44:20 +00:00
|
|
|
state.streams.sort_by(|a, b| {
|
2022-05-13 09:13:54 +00:00
|
|
|
let order_of_caps = |caps: &gst::CapsRef| {
|
2022-05-12 10:44:20 +00:00
|
|
|
let s = caps.structure(0).unwrap();
|
|
|
|
|
|
|
|
if s.name().starts_with("video/") {
|
2022-05-13 09:13:54 +00:00
|
|
|
0
|
2022-05-12 10:44:20 +00:00
|
|
|
} else if s.name().starts_with("audio/") {
|
2022-05-13 09:13:54 +00:00
|
|
|
1
|
|
|
|
} else if s.name().starts_with("application/x-onvif-metadata") {
|
|
|
|
2
|
2022-05-12 10:44:20 +00:00
|
|
|
} else {
|
|
|
|
unimplemented!();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2022-05-13 09:13:54 +00:00
|
|
|
let st_a = order_of_caps(&a.caps);
|
|
|
|
let st_b = order_of_caps(&b.caps);
|
2022-05-12 10:44:20 +00:00
|
|
|
|
|
|
|
if st_a == st_b {
|
|
|
|
return a.sinkpad.name().cmp(&b.sinkpad.name());
|
|
|
|
}
|
|
|
|
|
2022-05-13 09:13:54 +00:00
|
|
|
st_a.cmp(&st_b)
|
2022-05-12 10:44:20 +00:00
|
|
|
});
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
/// Generate an updated header at the end and the corresponding caps with the new streamheader.
|
2021-10-18 06:42:42 +00:00
|
|
|
fn update_header(
|
|
|
|
&self,
|
|
|
|
state: &mut State,
|
|
|
|
settings: &Settings,
|
|
|
|
at_eos: bool,
|
|
|
|
) -> Result<Option<(gst::BufferList, gst::Caps)>, gst::FlowError> {
|
2022-10-23 20:03:22 +00:00
|
|
|
let aggregator = self.obj();
|
2022-10-09 13:06:59 +00:00
|
|
|
let class = aggregator.class();
|
2021-10-18 06:42:42 +00:00
|
|
|
let variant = class.as_ref().variant;
|
|
|
|
|
|
|
|
if settings.header_update_mode == super::HeaderUpdateMode::None && at_eos {
|
|
|
|
return Ok(None);
|
|
|
|
}
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
assert!(!at_eos || state.streams.iter().all(|s| s.queued_gops.is_empty()));
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-11-17 17:53:48 +00:00
|
|
|
let duration = state
|
|
|
|
.end_pts
|
|
|
|
.opt_checked_sub(state.earliest_pts)
|
|
|
|
.ok()
|
|
|
|
.flatten();
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-06-30 13:29:09 +00:00
|
|
|
let streams = state
|
|
|
|
.streams
|
|
|
|
.iter()
|
2022-11-07 17:47:31 +00:00
|
|
|
.map(|s| super::HeaderStream {
|
|
|
|
trak_timescale: s.sinkpad.imp().settings.lock().unwrap().trak_timescale,
|
|
|
|
delta_frames: s.delta_frames,
|
|
|
|
caps: s.caps.clone(),
|
|
|
|
})
|
2022-06-30 13:29:09 +00:00
|
|
|
.collect::<Vec<_>>();
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
let mut buffer = boxes::create_fmp4_header(super::HeaderConfiguration {
|
|
|
|
variant,
|
|
|
|
update: at_eos,
|
2022-11-07 17:47:31 +00:00
|
|
|
movie_timescale: settings.movie_timescale,
|
|
|
|
streams,
|
2021-10-18 06:42:42 +00:00
|
|
|
write_mehd: settings.write_mehd,
|
|
|
|
duration: if at_eos { duration } else { None },
|
2022-11-17 17:53:48 +00:00
|
|
|
start_utc_time: if variant == super::Variant::ONVIF {
|
|
|
|
state
|
|
|
|
.earliest_pts
|
|
|
|
.map(|unix| unix.nseconds() / 100 + UNIX_1601_OFFSET * 10_000_000)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
},
|
2021-10-18 06:42:42 +00:00
|
|
|
})
|
|
|
|
.map_err(|err| {
|
2022-10-09 13:06:59 +00:00
|
|
|
gst::error!(CAT, imp: self, "Failed to create FMP4 header: {}", err);
|
2021-10-18 06:42:42 +00:00
|
|
|
gst::FlowError::Error
|
|
|
|
})?;
|
|
|
|
|
|
|
|
{
|
|
|
|
let buffer = buffer.get_mut().unwrap();
|
|
|
|
|
|
|
|
// No timestamps
|
|
|
|
|
|
|
|
// Header is DISCONT|HEADER
|
|
|
|
buffer.set_flags(gst::BufferFlags::DISCONT | gst::BufferFlags::HEADER);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remember stream header for later
|
|
|
|
state.stream_header = Some(buffer.clone());
|
|
|
|
|
|
|
|
let variant = match variant {
|
2022-05-13 08:45:01 +00:00
|
|
|
super::Variant::ISO | super::Variant::DASH | super::Variant::ONVIF => "iso-fragmented",
|
2021-10-18 06:42:42 +00:00
|
|
|
super::Variant::CMAF => "cmaf",
|
|
|
|
};
|
|
|
|
let caps = gst::Caps::builder("video/quicktime")
|
|
|
|
.field("variant", variant)
|
2022-11-01 08:27:48 +00:00
|
|
|
.field("streamheader", gst::Array::new([&buffer]))
|
2021-10-18 06:42:42 +00:00
|
|
|
.build();
|
|
|
|
|
|
|
|
let mut list = gst::BufferList::new_sized(1);
|
|
|
|
{
|
|
|
|
let list = list.get_mut().unwrap();
|
|
|
|
list.add(buffer);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(Some((list, caps)))
|
|
|
|
}
|
2023-01-30 14:27:46 +00:00
|
|
|
|
|
|
|
/// Finish the stream be rewriting / updating headers.
|
|
|
|
fn finish(&self, settings: &Settings) {
|
|
|
|
// Do remaining EOS handling after the end of the stream was pushed.
|
|
|
|
gst::debug!(CAT, imp: self, "Doing EOS handling");
|
|
|
|
|
|
|
|
if settings.header_update_mode == super::HeaderUpdateMode::None {
|
|
|
|
// Need to output new headers if started again after EOS
|
|
|
|
self.state.lock().unwrap().sent_headers = false;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let updated_header = self.update_header(&mut self.state.lock().unwrap(), settings, true);
|
|
|
|
match updated_header {
|
|
|
|
Ok(Some((buffer_list, caps))) => {
|
|
|
|
match settings.header_update_mode {
|
|
|
|
super::HeaderUpdateMode::None => unreachable!(),
|
|
|
|
super::HeaderUpdateMode::Rewrite => {
|
|
|
|
let mut q = gst::query::Seeking::new(gst::Format::Bytes);
|
|
|
|
if self.obj().src_pad().peer_query(&mut q) && q.result().0 {
|
|
|
|
let aggregator = self.obj();
|
|
|
|
|
|
|
|
aggregator.set_src_caps(&caps);
|
|
|
|
|
|
|
|
// Seek to the beginning with a default bytes segment
|
|
|
|
aggregator.update_segment(
|
|
|
|
&gst::FormattedSegment::<gst::format::Bytes>::new(),
|
|
|
|
);
|
|
|
|
|
|
|
|
if let Err(err) = aggregator.finish_buffer_list(buffer_list) {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Failed pushing updated header buffer downstream: {:?}",
|
|
|
|
err,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Can't rewrite header because downstream is not seekable"
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
super::HeaderUpdateMode::Update => {
|
|
|
|
let aggregator = self.obj();
|
|
|
|
|
|
|
|
aggregator.set_src_caps(&caps);
|
|
|
|
if let Err(err) = aggregator.finish_buffer_list(buffer_list) {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Failed pushing updated header buffer downstream: {:?}",
|
|
|
|
err,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(None) => {}
|
|
|
|
Err(err) => {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
|
|
|
imp: self,
|
|
|
|
"Failed to generate updated header: {:?}",
|
|
|
|
err
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Need to output new headers if started again after EOS
|
|
|
|
self.state.lock().unwrap().sent_headers = false;
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
#[glib::object_subclass]
|
|
|
|
impl ObjectSubclass for FMP4Mux {
|
|
|
|
const NAME: &'static str = "GstFMP4Mux";
|
|
|
|
type Type = super::FMP4Mux;
|
2022-05-05 12:09:19 +00:00
|
|
|
type ParentType = gst_base::Aggregator;
|
2021-10-18 06:42:42 +00:00
|
|
|
type Class = Class;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ObjectImpl for FMP4Mux {
|
|
|
|
fn properties() -> &'static [glib::ParamSpec] {
|
|
|
|
static PROPERTIES: Lazy<Vec<glib::ParamSpec>> = Lazy::new(|| {
|
|
|
|
vec![
|
2022-08-18 12:04:15 +00:00
|
|
|
glib::ParamSpecUInt64::builder("fragment-duration")
|
|
|
|
.nick("Fragment Duration")
|
|
|
|
.blurb("Duration for each FMP4 fragment")
|
|
|
|
.default_value(DEFAULT_FRAGMENT_DURATION.nseconds())
|
|
|
|
.mutable_ready()
|
|
|
|
.build(),
|
2023-01-23 18:43:26 +00:00
|
|
|
glib::ParamSpecUInt64::builder("chunk-duration")
|
|
|
|
.nick("Chunk Duration")
|
|
|
|
.blurb("Duration for each FMP4 chunk (default = no chunks)")
|
|
|
|
.default_value(u64::MAX)
|
|
|
|
.mutable_ready()
|
|
|
|
.build(),
|
2022-09-05 08:45:47 +00:00
|
|
|
glib::ParamSpecEnum::builder::<super::HeaderUpdateMode>("header-update-mode", DEFAULT_HEADER_UPDATE_MODE)
|
2022-08-18 12:04:15 +00:00
|
|
|
.nick("Header update mode")
|
|
|
|
.blurb("Mode for updating the header at the end of the stream")
|
|
|
|
.mutable_ready()
|
|
|
|
.build(),
|
|
|
|
glib::ParamSpecBoolean::builder("write-mfra")
|
|
|
|
.nick("Write mfra box")
|
|
|
|
.blurb("Write fragment random access box at the end of the stream")
|
|
|
|
.default_value(DEFAULT_WRITE_MFRA)
|
|
|
|
.mutable_ready()
|
|
|
|
.build(),
|
|
|
|
glib::ParamSpecBoolean::builder("write-mehd")
|
|
|
|
.nick("Write mehd box")
|
|
|
|
.blurb("Write movie extends header box with the duration at the end of the stream (needs a header-update-mode enabled)")
|
|
|
|
.default_value(DEFAULT_WRITE_MFRA)
|
|
|
|
.mutable_ready()
|
|
|
|
.build(),
|
|
|
|
glib::ParamSpecUInt64::builder("interleave-bytes")
|
|
|
|
.nick("Interleave Bytes")
|
|
|
|
.blurb("Interleave between streams in bytes")
|
|
|
|
.default_value(DEFAULT_INTERLEAVE_BYTES.unwrap_or(0))
|
|
|
|
.mutable_ready()
|
|
|
|
.build(),
|
|
|
|
glib::ParamSpecUInt64::builder("interleave-time")
|
|
|
|
.nick("Interleave Time")
|
|
|
|
.blurb("Interleave between streams in nanoseconds")
|
|
|
|
.default_value(DEFAULT_INTERLEAVE_TIME.map(gst::ClockTime::nseconds).unwrap_or(u64::MAX))
|
|
|
|
.mutable_ready()
|
|
|
|
.build(),
|
2022-11-07 17:47:31 +00:00
|
|
|
glib::ParamSpecUInt::builder("movie-timescale")
|
|
|
|
.nick("Movie Timescale")
|
|
|
|
.blurb("Timescale to use for the movie (units per second, 0 is automatic)")
|
|
|
|
.mutable_ready()
|
|
|
|
.build(),
|
2021-10-18 06:42:42 +00:00
|
|
|
]
|
|
|
|
});
|
|
|
|
|
2022-11-01 08:27:48 +00:00
|
|
|
&PROPERTIES
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) {
|
2021-10-18 06:42:42 +00:00
|
|
|
match pspec.name() {
|
|
|
|
"fragment-duration" => {
|
|
|
|
let mut settings = self.settings.lock().unwrap();
|
2022-05-05 12:09:19 +00:00
|
|
|
let fragment_duration = value.get().expect("type checked upstream");
|
|
|
|
if settings.fragment_duration != fragment_duration {
|
|
|
|
settings.fragment_duration = fragment_duration;
|
2023-01-23 18:43:26 +00:00
|
|
|
let latency = settings
|
|
|
|
.chunk_duration
|
|
|
|
.unwrap_or(settings.fragment_duration);
|
|
|
|
drop(settings);
|
|
|
|
self.obj().set_latency(latency, None);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
"chunk-duration" => {
|
|
|
|
let mut settings = self.settings.lock().unwrap();
|
|
|
|
let chunk_duration = value.get().expect("type checked upstream");
|
|
|
|
if settings.chunk_duration != chunk_duration {
|
|
|
|
settings.chunk_duration = chunk_duration;
|
|
|
|
let latency = settings
|
|
|
|
.chunk_duration
|
|
|
|
.unwrap_or(settings.fragment_duration);
|
2022-05-05 12:09:19 +00:00
|
|
|
drop(settings);
|
2023-01-23 18:43:26 +00:00
|
|
|
self.obj().set_latency(latency, None);
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
"header-update-mode" => {
|
|
|
|
let mut settings = self.settings.lock().unwrap();
|
|
|
|
settings.header_update_mode = value.get().expect("type checked upstream");
|
|
|
|
}
|
|
|
|
|
|
|
|
"write-mfra" => {
|
|
|
|
let mut settings = self.settings.lock().unwrap();
|
|
|
|
settings.write_mfra = value.get().expect("type checked upstream");
|
|
|
|
}
|
|
|
|
|
|
|
|
"write-mehd" => {
|
|
|
|
let mut settings = self.settings.lock().unwrap();
|
|
|
|
settings.write_mehd = value.get().expect("type checked upstream");
|
|
|
|
}
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
"interleave-bytes" => {
|
|
|
|
let mut settings = self.settings.lock().unwrap();
|
|
|
|
settings.interleave_bytes = match value.get().expect("type checked upstream") {
|
|
|
|
0 => None,
|
|
|
|
v => Some(v),
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
|
|
|
"interleave-time" => {
|
|
|
|
let mut settings = self.settings.lock().unwrap();
|
|
|
|
settings.interleave_time = match value.get().expect("type checked upstream") {
|
|
|
|
Some(gst::ClockTime::ZERO) | None => None,
|
|
|
|
v => v,
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2022-11-07 17:47:31 +00:00
|
|
|
"movie-timescale" => {
|
|
|
|
let mut settings = self.settings.lock().unwrap();
|
|
|
|
settings.movie_timescale = value.get().expect("type checked upstream");
|
|
|
|
}
|
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value {
|
2021-10-18 06:42:42 +00:00
|
|
|
match pspec.name() {
|
|
|
|
"fragment-duration" => {
|
|
|
|
let settings = self.settings.lock().unwrap();
|
|
|
|
settings.fragment_duration.to_value()
|
|
|
|
}
|
|
|
|
|
2023-01-23 18:43:26 +00:00
|
|
|
"chunk-duration" => {
|
|
|
|
let settings = self.settings.lock().unwrap();
|
|
|
|
settings.chunk_duration.to_value()
|
|
|
|
}
|
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
"header-update-mode" => {
|
|
|
|
let settings = self.settings.lock().unwrap();
|
|
|
|
settings.header_update_mode.to_value()
|
|
|
|
}
|
|
|
|
|
|
|
|
"write-mfra" => {
|
|
|
|
let settings = self.settings.lock().unwrap();
|
|
|
|
settings.write_mfra.to_value()
|
|
|
|
}
|
|
|
|
|
|
|
|
"write-mehd" => {
|
|
|
|
let settings = self.settings.lock().unwrap();
|
|
|
|
settings.write_mehd.to_value()
|
|
|
|
}
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
"interleave-bytes" => {
|
|
|
|
let settings = self.settings.lock().unwrap();
|
|
|
|
settings.interleave_bytes.unwrap_or(0).to_value()
|
|
|
|
}
|
|
|
|
|
|
|
|
"interleave-time" => {
|
|
|
|
let settings = self.settings.lock().unwrap();
|
|
|
|
settings.interleave_time.to_value()
|
|
|
|
}
|
|
|
|
|
2022-11-07 17:47:31 +00:00
|
|
|
"movie-timescale" => {
|
|
|
|
let settings = self.settings.lock().unwrap();
|
|
|
|
settings.movie_timescale.to_value()
|
|
|
|
}
|
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn constructed(&self) {
|
|
|
|
self.parent_constructed();
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-10-23 20:03:22 +00:00
|
|
|
let obj = self.obj();
|
2022-05-12 10:44:20 +00:00
|
|
|
let class = obj.class();
|
|
|
|
for templ in class.pad_template_list().filter(|templ| {
|
|
|
|
templ.presence() == gst::PadPresence::Always
|
|
|
|
&& templ.direction() == gst::PadDirection::Sink
|
|
|
|
}) {
|
|
|
|
let sinkpad =
|
|
|
|
gst::PadBuilder::<gst_base::AggregatorPad>::from_template(&templ, Some("sink"))
|
|
|
|
.flags(gst::PadFlags::ACCEPT_INTERSECT)
|
|
|
|
.build();
|
|
|
|
|
|
|
|
obj.add_pad(&sinkpad).unwrap();
|
|
|
|
}
|
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
obj.set_latency(Settings::default().fragment_duration, None);
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl GstObjectImpl for FMP4Mux {}
|
|
|
|
|
|
|
|
impl ElementImpl for FMP4Mux {
|
2022-05-05 12:09:19 +00:00
|
|
|
fn request_new_pad(
|
2021-10-18 06:42:42 +00:00
|
|
|
&self,
|
2022-05-12 10:44:20 +00:00
|
|
|
templ: &gst::PadTemplate,
|
2022-10-20 18:18:35 +00:00
|
|
|
name: Option<&str>,
|
2022-05-12 10:44:20 +00:00
|
|
|
caps: Option<&gst::Caps>,
|
2022-05-05 12:09:19 +00:00
|
|
|
) -> Option<gst::Pad> {
|
2022-05-12 10:44:20 +00:00
|
|
|
let state = self.state.lock().unwrap();
|
|
|
|
if state.stream_header.is_some() {
|
|
|
|
gst::error!(
|
|
|
|
CAT,
|
2022-10-09 13:06:59 +00:00
|
|
|
imp: self,
|
2022-05-12 10:44:20 +00:00
|
|
|
"Can't request new pads after header was generated"
|
|
|
|
);
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
self.parent_request_new_pad(templ, name, caps)
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AggregatorImpl for FMP4Mux {
|
2022-10-09 13:06:59 +00:00
|
|
|
fn next_time(&self) -> Option<gst::ClockTime> {
|
2022-05-17 12:05:19 +00:00
|
|
|
let state = self.state.lock().unwrap();
|
2023-01-23 18:43:26 +00:00
|
|
|
state.chunk_start_pts.opt_add(state.timeout_delay)
|
2022-05-17 12:05:19 +00:00
|
|
|
}
|
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
fn sink_query(
|
|
|
|
&self,
|
|
|
|
aggregator_pad: &gst_base::AggregatorPad,
|
|
|
|
query: &mut gst::QueryRef,
|
|
|
|
) -> bool {
|
|
|
|
use gst::QueryViewMut;
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
gst::trace!(CAT, obj: aggregator_pad, "Handling query {:?}", query);
|
|
|
|
|
|
|
|
match query.view_mut() {
|
|
|
|
QueryViewMut::Caps(q) => {
|
2023-02-15 17:00:29 +00:00
|
|
|
let mut allowed_caps = aggregator_pad
|
2022-05-12 10:44:20 +00:00
|
|
|
.current_caps()
|
|
|
|
.unwrap_or_else(|| aggregator_pad.pad_template_caps());
|
2022-05-05 12:09:19 +00:00
|
|
|
|
2023-02-15 17:00:29 +00:00
|
|
|
// Allow framerate change
|
|
|
|
for s in allowed_caps.make_mut().iter_mut() {
|
|
|
|
s.remove_field("framerate");
|
|
|
|
}
|
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
if let Some(filter_caps) = q.filter() {
|
|
|
|
let res = filter_caps
|
|
|
|
.intersect_with_mode(&allowed_caps, gst::CapsIntersectMode::First);
|
2022-10-20 18:18:35 +00:00
|
|
|
q.set_result(&res);
|
2022-05-05 12:09:19 +00:00
|
|
|
} else {
|
2022-10-20 18:18:35 +00:00
|
|
|
q.set_result(&allowed_caps);
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
true
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
2022-10-09 13:06:59 +00:00
|
|
|
_ => self.parent_sink_query(aggregator_pad, query),
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fn sink_event_pre_queue(
|
|
|
|
&self,
|
|
|
|
aggregator_pad: &gst_base::AggregatorPad,
|
|
|
|
mut event: gst::Event,
|
|
|
|
) -> Result<gst::FlowSuccess, gst::FlowError> {
|
|
|
|
use gst::EventView;
|
2021-10-18 06:42:42 +00:00
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
gst::trace!(CAT, obj: aggregator_pad, "Handling event {:?}", event);
|
|
|
|
|
|
|
|
match event.view() {
|
|
|
|
EventView::Segment(ev) => {
|
|
|
|
if ev.segment().format() != gst::Format::Time {
|
|
|
|
gst::warning!(
|
|
|
|
CAT,
|
|
|
|
obj: aggregator_pad,
|
|
|
|
"Received non-TIME segment, replacing with default TIME segment"
|
|
|
|
);
|
|
|
|
let segment = gst::FormattedSegment::<gst::ClockTime>::new();
|
|
|
|
event = gst::event::Segment::builder(&segment)
|
|
|
|
.seqnum(event.seqnum())
|
|
|
|
.build();
|
|
|
|
}
|
2022-10-09 13:06:59 +00:00
|
|
|
self.parent_sink_event_pre_queue(aggregator_pad, event)
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
2022-10-09 13:06:59 +00:00
|
|
|
_ => self.parent_sink_event_pre_queue(aggregator_pad, event),
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn sink_event(&self, aggregator_pad: &gst_base::AggregatorPad, event: gst::Event) -> bool {
|
2022-05-05 12:09:19 +00:00
|
|
|
use gst::EventView;
|
|
|
|
|
|
|
|
gst::trace!(CAT, obj: aggregator_pad, "Handling event {:?}", event);
|
|
|
|
|
|
|
|
match event.view() {
|
|
|
|
EventView::Segment(ev) => {
|
|
|
|
// Already fixed-up above to always be a TIME segment
|
|
|
|
let segment = ev
|
|
|
|
.segment()
|
|
|
|
.clone()
|
|
|
|
.downcast::<gst::ClockTime>()
|
|
|
|
.expect("non-TIME segment");
|
|
|
|
gst::info!(CAT, obj: aggregator_pad, "Received segment {:?}", segment);
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
// Only forward the segment event verbatim if this is a single stream variant.
|
|
|
|
// Otherwise we have to produce a default segment and re-timestamp all buffers
|
|
|
|
// with their running time.
|
2022-10-23 20:03:22 +00:00
|
|
|
let aggregator = self.obj();
|
2022-05-12 10:44:20 +00:00
|
|
|
let class = aggregator.class();
|
|
|
|
if class.as_ref().variant.is_single_stream() {
|
|
|
|
aggregator.update_segment(&segment);
|
|
|
|
}
|
2022-05-05 12:09:19 +00:00
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
self.parent_sink_event(aggregator_pad, event)
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
EventView::Tag(_ev) => {
|
|
|
|
// TODO: Maybe store for putting into the headers of the next fragment?
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
self.parent_sink_event(aggregator_pad, event)
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
2022-10-09 13:06:59 +00:00
|
|
|
_ => self.parent_sink_event(aggregator_pad, event),
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn src_query(&self, query: &mut gst::QueryRef) -> bool {
|
2022-05-05 12:09:19 +00:00
|
|
|
use gst::QueryViewMut;
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
gst::trace!(CAT, imp: self, "Handling query {:?}", query);
|
2022-05-05 12:09:19 +00:00
|
|
|
|
|
|
|
match query.view_mut() {
|
|
|
|
QueryViewMut::Seeking(q) => {
|
|
|
|
// We can't really handle seeking, it would break everything
|
2022-07-04 16:04:11 +00:00
|
|
|
q.set(false, gst::ClockTime::ZERO, gst::ClockTime::NONE);
|
2022-05-05 12:09:19 +00:00
|
|
|
true
|
|
|
|
}
|
2022-10-09 13:06:59 +00:00
|
|
|
_ => self.parent_src_query(query),
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn src_event(&self, event: gst::Event) -> bool {
|
2022-05-05 12:09:19 +00:00
|
|
|
use gst::EventView;
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
gst::trace!(CAT, imp: self, "Handling event {:?}", event);
|
2022-05-05 12:09:19 +00:00
|
|
|
|
|
|
|
match event.view() {
|
|
|
|
EventView::Seek(_ev) => false,
|
2022-10-09 13:06:59 +00:00
|
|
|
_ => self.parent_src_event(event),
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn flush(&self) -> Result<gst::FlowSuccess, gst::FlowError> {
|
2022-05-05 12:09:19 +00:00
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
for stream in &mut state.streams {
|
|
|
|
stream.queued_gops.clear();
|
|
|
|
stream.dts_offset = None;
|
2022-06-01 17:04:58 +00:00
|
|
|
stream.current_position = gst::ClockTime::ZERO;
|
2022-05-12 10:44:20 +00:00
|
|
|
stream.fragment_filled = false;
|
2022-11-17 17:53:48 +00:00
|
|
|
stream.pre_queue.clear();
|
|
|
|
stream.running_time_utc_time_mapping = None;
|
2022-05-12 10:44:20 +00:00
|
|
|
}
|
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
state.current_offset = 0;
|
|
|
|
state.fragment_offsets.clear();
|
|
|
|
|
2022-11-07 17:47:31 +00:00
|
|
|
drop(state);
|
|
|
|
|
|
|
|
self.parent_flush()
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn stop(&self) -> Result<(), gst::ErrorMessage> {
|
|
|
|
gst::trace!(CAT, imp: self, "Stopping");
|
2022-05-05 12:09:19 +00:00
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
let _ = self.parent_stop();
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
*self.state.lock().unwrap() = State::default();
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn start(&self) -> Result<(), gst::ErrorMessage> {
|
|
|
|
gst::trace!(CAT, imp: self, "Starting");
|
2022-05-05 12:09:19 +00:00
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
self.parent_start()?;
|
2022-05-12 10:44:20 +00:00
|
|
|
|
|
|
|
// For non-single-stream variants configure a default segment that allows for negative
|
|
|
|
// DTS so that we can correctly re-timestamp buffers with their running times.
|
2022-10-23 20:03:22 +00:00
|
|
|
let aggregator = self.obj();
|
2022-05-12 10:44:20 +00:00
|
|
|
let class = aggregator.class();
|
|
|
|
if !class.as_ref().variant.is_single_stream() {
|
|
|
|
let mut segment = gst::FormattedSegment::<gst::ClockTime>::new();
|
|
|
|
segment.set_start(SEGMENT_OFFSET);
|
|
|
|
segment.set_position(SEGMENT_OFFSET);
|
|
|
|
aggregator.update_segment(&segment);
|
|
|
|
}
|
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
*self.state.lock().unwrap() = State::default();
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn negotiate(&self) -> bool {
|
2022-05-05 12:09:19 +00:00
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2022-10-09 13:06:59 +00:00
|
|
|
fn aggregate(&self, timeout: bool) -> Result<gst::FlowSuccess, gst::FlowError> {
|
2022-05-05 12:09:19 +00:00
|
|
|
let settings = self.settings.lock().unwrap().clone();
|
|
|
|
|
2022-08-17 10:19:08 +00:00
|
|
|
let all_eos;
|
2023-01-23 18:43:26 +00:00
|
|
|
let mut caps = None;
|
|
|
|
let mut buffers = vec![];
|
2023-01-30 14:27:46 +00:00
|
|
|
let mut upstream_events = vec![];
|
|
|
|
let res = {
|
2022-05-05 12:09:19 +00:00
|
|
|
let mut state = self.state.lock().unwrap();
|
|
|
|
|
2022-06-30 13:29:09 +00:00
|
|
|
// Create streams
|
|
|
|
if state.streams.is_empty() {
|
2022-10-09 13:06:59 +00:00
|
|
|
self.create_streams(&mut state)?;
|
2022-06-30 13:29:09 +00:00
|
|
|
}
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
self.queue_available_buffers(&mut state, &settings, timeout)?;
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2022-11-19 17:24:43 +00:00
|
|
|
all_eos = state.streams.iter().all(|stream| stream.sinkpad.is_eos());
|
|
|
|
if all_eos {
|
|
|
|
gst::debug!(CAT, imp: self, "All streams are EOS now");
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
let fragment_start_pts = state.fragment_start_pts;
|
|
|
|
let chunk_start_pts = state.chunk_start_pts;
|
|
|
|
|
2023-01-23 18:43:26 +00:00
|
|
|
for stream in &mut state.streams {
|
|
|
|
// Check if this stream is filled enough now that everything is EOS.
|
|
|
|
self.check_stream_filled(
|
|
|
|
&settings,
|
|
|
|
stream,
|
|
|
|
fragment_start_pts,
|
|
|
|
chunk_start_pts,
|
|
|
|
true,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2023-01-23 18:43:26 +00:00
|
|
|
// Calculate the earliest PTS, i.e. the start of the first fragment, if not known yet.
|
|
|
|
self.calculate_earliest_pts(
|
|
|
|
&settings,
|
|
|
|
&mut state,
|
|
|
|
&mut upstream_events,
|
|
|
|
all_eos,
|
|
|
|
timeout,
|
|
|
|
);
|
2022-05-12 10:44:20 +00:00
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// Drain everything that can be drained at this point
|
|
|
|
self.drain(
|
|
|
|
&mut state,
|
|
|
|
&settings,
|
|
|
|
all_eos,
|
|
|
|
timeout,
|
|
|
|
&mut caps,
|
|
|
|
&mut buffers,
|
|
|
|
&mut upstream_events,
|
|
|
|
)
|
|
|
|
};
|
2022-05-05 12:09:19 +00:00
|
|
|
|
2022-05-12 10:44:20 +00:00
|
|
|
for (sinkpad, event) in upstream_events {
|
|
|
|
sinkpad.push_event(event);
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
|
2022-05-27 08:56:12 +00:00
|
|
|
if let Some(caps) = caps {
|
2022-10-09 13:06:59 +00:00
|
|
|
gst::debug!(CAT, imp: self, "Setting caps on source pad: {:?}", caps);
|
2022-10-23 20:03:22 +00:00
|
|
|
self.obj().set_src_caps(&caps);
|
2022-05-27 08:56:12 +00:00
|
|
|
}
|
|
|
|
|
2023-01-23 18:43:26 +00:00
|
|
|
for buffer_list in buffers {
|
|
|
|
gst::trace!(CAT, imp: self, "Pushing buffer list {:?}", buffer_list);
|
|
|
|
self.obj().finish_buffer_list(buffer_list)?;
|
2022-05-05 12:09:19 +00:00
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// If an error happened above while draining, return this now after pushing
|
|
|
|
// any output that was produced before the error.
|
|
|
|
res?;
|
|
|
|
|
2023-01-23 18:43:26 +00:00
|
|
|
if !all_eos {
|
|
|
|
return Ok(gst::FlowSuccess::Ok);
|
|
|
|
}
|
|
|
|
|
2023-01-30 14:27:46 +00:00
|
|
|
// Finish the stream.
|
|
|
|
self.finish(&settings);
|
2022-05-05 12:09:19 +00:00
|
|
|
|
2023-01-23 18:43:26 +00:00
|
|
|
Err(gst::FlowError::Eos)
|
2021-10-18 06:42:42 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[repr(C)]
|
|
|
|
pub(crate) struct Class {
|
2022-05-05 12:09:19 +00:00
|
|
|
parent: gst_base::ffi::GstAggregatorClass,
|
2021-10-18 06:42:42 +00:00
|
|
|
variant: super::Variant,
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl ClassStruct for Class {
|
|
|
|
type Type = FMP4Mux;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl std::ops::Deref for Class {
|
2022-05-05 12:09:19 +00:00
|
|
|
type Target = glib::Class<gst_base::Aggregator>;
|
2021-10-18 06:42:42 +00:00
|
|
|
|
|
|
|
fn deref(&self) -> &Self::Target {
|
|
|
|
unsafe { &*(&self.parent as *const _ as *const _) }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl<T: FMP4MuxImpl> IsSubclassable<T> for super::FMP4Mux {
|
|
|
|
fn class_init(class: &mut glib::Class<Self>) {
|
|
|
|
Self::parent_class_init::<T>(class);
|
|
|
|
|
|
|
|
let class = class.as_mut();
|
|
|
|
class.variant = T::VARIANT;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
pub(crate) trait FMP4MuxImpl: AggregatorImpl {
|
2021-10-18 06:42:42 +00:00
|
|
|
const VARIANT: super::Variant;
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Default)]
|
|
|
|
pub(crate) struct ISOFMP4Mux;
|
|
|
|
|
|
|
|
#[glib::object_subclass]
|
|
|
|
impl ObjectSubclass for ISOFMP4Mux {
|
|
|
|
const NAME: &'static str = "GstISOFMP4Mux";
|
|
|
|
type Type = super::ISOFMP4Mux;
|
|
|
|
type ParentType = super::FMP4Mux;
|
|
|
|
}
|
|
|
|
|
2023-01-25 09:59:52 +00:00
|
|
|
impl ObjectImpl for ISOFMP4Mux {
|
|
|
|
fn properties() -> &'static [glib::ParamSpec] {
|
|
|
|
static PROPERTIES: Lazy<Vec<glib::ParamSpec>> = Lazy::new(|| {
|
|
|
|
vec![glib::ParamSpecBoolean::builder("offset-to-zero")
|
|
|
|
.nick("Offset to Zero")
|
|
|
|
.blurb("Offsets all streams so that the earliest stream starts at 0")
|
|
|
|
.mutable_ready()
|
|
|
|
.build()]
|
|
|
|
});
|
|
|
|
|
|
|
|
&PROPERTIES
|
|
|
|
}
|
|
|
|
|
|
|
|
fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value {
|
|
|
|
let obj = self.obj();
|
|
|
|
let fmp4mux = obj.upcast_ref::<super::FMP4Mux>().imp();
|
|
|
|
|
|
|
|
match pspec.name() {
|
|
|
|
"offset-to-zero" => {
|
|
|
|
let settings = fmp4mux.settings.lock().unwrap();
|
|
|
|
settings.offset_to_zero.to_value()
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) {
|
|
|
|
let obj = self.obj();
|
|
|
|
let fmp4mux = obj.upcast_ref::<super::FMP4Mux>().imp();
|
|
|
|
|
|
|
|
match pspec.name() {
|
|
|
|
"offset-to-zero" => {
|
|
|
|
let mut settings = fmp4mux.settings.lock().unwrap();
|
|
|
|
settings.offset_to_zero = value.get().expect("type checked upstream");
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-10-18 06:42:42 +00:00
|
|
|
|
|
|
|
impl GstObjectImpl for ISOFMP4Mux {}
|
|
|
|
|
|
|
|
impl ElementImpl for ISOFMP4Mux {
|
|
|
|
fn metadata() -> Option<&'static gst::subclass::ElementMetadata> {
|
|
|
|
static ELEMENT_METADATA: Lazy<gst::subclass::ElementMetadata> = Lazy::new(|| {
|
|
|
|
gst::subclass::ElementMetadata::new(
|
|
|
|
"ISOFMP4Mux",
|
|
|
|
"Codec/Muxer",
|
|
|
|
"ISO fragmented MP4 muxer",
|
|
|
|
"Sebastian Dröge <sebastian@centricular.com>",
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
Some(&*ELEMENT_METADATA)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pad_templates() -> &'static [gst::PadTemplate] {
|
|
|
|
static PAD_TEMPLATES: Lazy<Vec<gst::PadTemplate>> = Lazy::new(|| {
|
|
|
|
let src_pad_template = gst::PadTemplate::new(
|
|
|
|
"src",
|
|
|
|
gst::PadDirection::Src,
|
|
|
|
gst::PadPresence::Always,
|
|
|
|
&gst::Caps::builder("video/quicktime")
|
|
|
|
.field("variant", "iso-fragmented")
|
|
|
|
.build(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2022-11-07 17:47:31 +00:00
|
|
|
let sink_pad_template = gst::PadTemplate::with_gtype(
|
2022-05-12 10:44:20 +00:00
|
|
|
"sink_%u",
|
2021-10-18 06:42:42 +00:00
|
|
|
gst::PadDirection::Sink,
|
2022-05-12 10:44:20 +00:00
|
|
|
gst::PadPresence::Request,
|
2021-10-18 06:42:42 +00:00
|
|
|
&[
|
|
|
|
gst::Structure::builder("video/x-h264")
|
|
|
|
.field("stream-format", gst::List::new(["avc", "avc3"]))
|
|
|
|
.field("alignment", "au")
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("video/x-h265")
|
|
|
|
.field("stream-format", gst::List::new(["hvc1", "hev1"]))
|
|
|
|
.field("alignment", "au")
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
2023-02-02 13:02:44 +00:00
|
|
|
gst::Structure::builder("video/x-vp8")
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
2022-10-25 06:55:50 +00:00
|
|
|
gst::Structure::builder("video/x-vp9")
|
|
|
|
.field("profile", gst::List::new(["0", "1", "2", "3"]))
|
|
|
|
.field("chroma-format", gst::List::new(["4:2:0", "4:2:2", "4:4:4"]))
|
|
|
|
.field("bit-depth-luma", gst::List::new([8u32, 10u32, 12u32]))
|
|
|
|
.field("bit-depth-chroma", gst::List::new([8u32, 10u32, 12u32]))
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
2023-02-02 14:12:31 +00:00
|
|
|
gst::Structure::builder("video/x-av1")
|
|
|
|
.field("stream-format", "obu-stream")
|
|
|
|
.field("alignment", "tu")
|
|
|
|
.field("profile", gst::List::new(["main", "high", "professional"]))
|
|
|
|
.field(
|
|
|
|
"chroma-format",
|
|
|
|
gst::List::new(["4:0:0", "4:2:0", "4:2:2", "4:4:4"]),
|
|
|
|
)
|
|
|
|
.field("bit-depth-luma", gst::List::new([8u32, 10u32, 12u32]))
|
|
|
|
.field("bit-depth-chroma", gst::List::new([8u32, 10u32, 12u32]))
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
2021-10-18 06:42:42 +00:00
|
|
|
gst::Structure::builder("audio/mpeg")
|
|
|
|
.field("mpegversion", 4i32)
|
|
|
|
.field("stream-format", "raw")
|
|
|
|
.field("channels", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("rate", gst::IntRange::new(1, i32::MAX))
|
|
|
|
.build(),
|
2022-11-03 14:01:35 +00:00
|
|
|
gst::Structure::builder("audio/x-opus")
|
|
|
|
.field("channel-mapping-family", gst::IntRange::new(0i32, 255))
|
|
|
|
.field("channels", gst::IntRange::new(1i32, 8))
|
|
|
|
.field("rate", gst::IntRange::new(1, i32::MAX))
|
|
|
|
.build(),
|
2021-10-18 06:42:42 +00:00
|
|
|
]
|
|
|
|
.into_iter()
|
|
|
|
.collect::<gst::Caps>(),
|
2022-11-07 17:47:31 +00:00
|
|
|
super::FMP4MuxPad::static_type(),
|
2021-10-18 06:42:42 +00:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
vec![src_pad_template, sink_pad_template]
|
|
|
|
});
|
|
|
|
|
|
|
|
PAD_TEMPLATES.as_ref()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
impl AggregatorImpl for ISOFMP4Mux {}
|
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
impl FMP4MuxImpl for ISOFMP4Mux {
|
|
|
|
const VARIANT: super::Variant = super::Variant::ISO;
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Default)]
|
|
|
|
pub(crate) struct CMAFMux;
|
|
|
|
|
|
|
|
#[glib::object_subclass]
|
|
|
|
impl ObjectSubclass for CMAFMux {
|
|
|
|
const NAME: &'static str = "GstCMAFMux";
|
|
|
|
type Type = super::CMAFMux;
|
|
|
|
type ParentType = super::FMP4Mux;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ObjectImpl for CMAFMux {}
|
|
|
|
|
|
|
|
impl GstObjectImpl for CMAFMux {}
|
|
|
|
|
|
|
|
impl ElementImpl for CMAFMux {
|
|
|
|
fn metadata() -> Option<&'static gst::subclass::ElementMetadata> {
|
|
|
|
static ELEMENT_METADATA: Lazy<gst::subclass::ElementMetadata> = Lazy::new(|| {
|
|
|
|
gst::subclass::ElementMetadata::new(
|
|
|
|
"CMAFMux",
|
|
|
|
"Codec/Muxer",
|
|
|
|
"CMAF fragmented MP4 muxer",
|
|
|
|
"Sebastian Dröge <sebastian@centricular.com>",
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
Some(&*ELEMENT_METADATA)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pad_templates() -> &'static [gst::PadTemplate] {
|
|
|
|
static PAD_TEMPLATES: Lazy<Vec<gst::PadTemplate>> = Lazy::new(|| {
|
|
|
|
let src_pad_template = gst::PadTemplate::new(
|
|
|
|
"src",
|
|
|
|
gst::PadDirection::Src,
|
|
|
|
gst::PadPresence::Always,
|
|
|
|
&gst::Caps::builder("video/quicktime")
|
|
|
|
.field("variant", "cmaf")
|
|
|
|
.build(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2022-11-07 17:47:31 +00:00
|
|
|
let sink_pad_template = gst::PadTemplate::with_gtype(
|
2021-10-18 06:42:42 +00:00
|
|
|
"sink",
|
|
|
|
gst::PadDirection::Sink,
|
|
|
|
gst::PadPresence::Always,
|
|
|
|
&[
|
|
|
|
gst::Structure::builder("video/x-h264")
|
|
|
|
.field("stream-format", gst::List::new(["avc", "avc3"]))
|
|
|
|
.field("alignment", "au")
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("video/x-h265")
|
|
|
|
.field("stream-format", gst::List::new(["hvc1", "hev1"]))
|
|
|
|
.field("alignment", "au")
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("audio/mpeg")
|
|
|
|
.field("mpegversion", 4i32)
|
|
|
|
.field("stream-format", "raw")
|
|
|
|
.field("channels", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("rate", gst::IntRange::new(1, i32::MAX))
|
|
|
|
.build(),
|
|
|
|
]
|
|
|
|
.into_iter()
|
|
|
|
.collect::<gst::Caps>(),
|
2022-11-07 17:47:31 +00:00
|
|
|
super::FMP4MuxPad::static_type(),
|
2021-10-18 06:42:42 +00:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
vec![src_pad_template, sink_pad_template]
|
|
|
|
});
|
|
|
|
|
|
|
|
PAD_TEMPLATES.as_ref()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
impl AggregatorImpl for CMAFMux {}
|
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
impl FMP4MuxImpl for CMAFMux {
|
|
|
|
const VARIANT: super::Variant = super::Variant::CMAF;
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Default)]
|
|
|
|
pub(crate) struct DASHMP4Mux;
|
|
|
|
|
|
|
|
#[glib::object_subclass]
|
|
|
|
impl ObjectSubclass for DASHMP4Mux {
|
|
|
|
const NAME: &'static str = "GstDASHMP4Mux";
|
|
|
|
type Type = super::DASHMP4Mux;
|
|
|
|
type ParentType = super::FMP4Mux;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ObjectImpl for DASHMP4Mux {}
|
|
|
|
|
|
|
|
impl GstObjectImpl for DASHMP4Mux {}
|
|
|
|
|
|
|
|
impl ElementImpl for DASHMP4Mux {
|
|
|
|
fn metadata() -> Option<&'static gst::subclass::ElementMetadata> {
|
|
|
|
static ELEMENT_METADATA: Lazy<gst::subclass::ElementMetadata> = Lazy::new(|| {
|
|
|
|
gst::subclass::ElementMetadata::new(
|
|
|
|
"DASHMP4Mux",
|
|
|
|
"Codec/Muxer",
|
|
|
|
"DASH fragmented MP4 muxer",
|
|
|
|
"Sebastian Dröge <sebastian@centricular.com>",
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
Some(&*ELEMENT_METADATA)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pad_templates() -> &'static [gst::PadTemplate] {
|
|
|
|
static PAD_TEMPLATES: Lazy<Vec<gst::PadTemplate>> = Lazy::new(|| {
|
|
|
|
let src_pad_template = gst::PadTemplate::new(
|
|
|
|
"src",
|
|
|
|
gst::PadDirection::Src,
|
|
|
|
gst::PadPresence::Always,
|
|
|
|
&gst::Caps::builder("video/quicktime")
|
|
|
|
.field("variant", "iso-fragmented")
|
|
|
|
.build(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2022-11-07 17:47:31 +00:00
|
|
|
let sink_pad_template = gst::PadTemplate::with_gtype(
|
2021-10-18 06:42:42 +00:00
|
|
|
"sink",
|
|
|
|
gst::PadDirection::Sink,
|
|
|
|
gst::PadPresence::Always,
|
2022-01-12 17:51:08 +00:00
|
|
|
&[
|
2021-10-18 06:42:42 +00:00
|
|
|
gst::Structure::builder("video/x-h264")
|
2022-11-01 08:27:48 +00:00
|
|
|
.field("stream-format", gst::List::new(["avc", "avc3"]))
|
2021-10-18 06:42:42 +00:00
|
|
|
.field("alignment", "au")
|
|
|
|
.field("width", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("video/x-h265")
|
2022-11-01 08:27:48 +00:00
|
|
|
.field("stream-format", gst::List::new(["hvc1", "hev1"]))
|
2021-10-18 06:42:42 +00:00
|
|
|
.field("alignment", "au")
|
|
|
|
.field("width", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
2023-02-02 13:02:44 +00:00
|
|
|
gst::Structure::builder("video/x-vp8")
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
2022-10-25 06:55:50 +00:00
|
|
|
gst::Structure::builder("video/x-vp9")
|
|
|
|
.field("profile", gst::List::new(["0", "1", "2", "3"]))
|
|
|
|
.field("chroma-format", gst::List::new(["4:2:0", "4:2:2", "4:4:4"]))
|
|
|
|
.field("bit-depth-luma", gst::List::new([8u32, 10u32, 12u32]))
|
|
|
|
.field("bit-depth-chroma", gst::List::new([8u32, 10u32, 12u32]))
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
2023-02-02 14:12:31 +00:00
|
|
|
gst::Structure::builder("video/x-av1")
|
|
|
|
.field("stream-format", "obu-stream")
|
|
|
|
.field("alignment", "tu")
|
|
|
|
.field("profile", gst::List::new(["main", "high", "professional"]))
|
|
|
|
.field(
|
|
|
|
"chroma-format",
|
|
|
|
gst::List::new(["4:0:0", "4:2:0", "4:2:2", "4:4:4"]),
|
|
|
|
)
|
|
|
|
.field("bit-depth-luma", gst::List::new([8u32, 10u32, 12u32]))
|
|
|
|
.field("bit-depth-chroma", gst::List::new([8u32, 10u32, 12u32]))
|
|
|
|
.field("width", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
2021-10-18 06:42:42 +00:00
|
|
|
gst::Structure::builder("audio/mpeg")
|
|
|
|
.field("mpegversion", 4i32)
|
|
|
|
.field("stream-format", "raw")
|
|
|
|
.field("channels", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.field("rate", gst::IntRange::<i32>::new(1, i32::MAX))
|
|
|
|
.build(),
|
2022-11-03 14:01:35 +00:00
|
|
|
gst::Structure::builder("audio/x-opus")
|
|
|
|
.field("channel-mapping-family", gst::IntRange::new(0i32, 255))
|
|
|
|
.field("channels", gst::IntRange::new(1i32, 8))
|
|
|
|
.field("rate", gst::IntRange::new(1, i32::MAX))
|
|
|
|
.build(),
|
2022-01-12 17:51:08 +00:00
|
|
|
]
|
|
|
|
.into_iter()
|
2021-10-18 06:42:42 +00:00
|
|
|
.collect::<gst::Caps>(),
|
2022-11-07 17:47:31 +00:00
|
|
|
super::FMP4MuxPad::static_type(),
|
2021-10-18 06:42:42 +00:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
vec![src_pad_template, sink_pad_template]
|
|
|
|
});
|
|
|
|
|
|
|
|
PAD_TEMPLATES.as_ref()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-05 12:09:19 +00:00
|
|
|
impl AggregatorImpl for DASHMP4Mux {}
|
|
|
|
|
2021-10-18 06:42:42 +00:00
|
|
|
impl FMP4MuxImpl for DASHMP4Mux {
|
|
|
|
const VARIANT: super::Variant = super::Variant::DASH;
|
|
|
|
}
|
2022-05-13 08:45:01 +00:00
|
|
|
|
|
|
|
#[derive(Default)]
|
|
|
|
pub(crate) struct ONVIFFMP4Mux;
|
|
|
|
|
|
|
|
#[glib::object_subclass]
|
|
|
|
impl ObjectSubclass for ONVIFFMP4Mux {
|
|
|
|
const NAME: &'static str = "GstONVIFFMP4Mux";
|
|
|
|
type Type = super::ONVIFFMP4Mux;
|
|
|
|
type ParentType = super::FMP4Mux;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ObjectImpl for ONVIFFMP4Mux {}
|
|
|
|
|
|
|
|
impl GstObjectImpl for ONVIFFMP4Mux {}
|
|
|
|
|
|
|
|
impl ElementImpl for ONVIFFMP4Mux {
|
|
|
|
fn metadata() -> Option<&'static gst::subclass::ElementMetadata> {
|
|
|
|
static ELEMENT_METADATA: Lazy<gst::subclass::ElementMetadata> = Lazy::new(|| {
|
|
|
|
gst::subclass::ElementMetadata::new(
|
|
|
|
"ONVIFFMP4Mux",
|
|
|
|
"Codec/Muxer",
|
|
|
|
"ONVIF fragmented MP4 muxer",
|
|
|
|
"Sebastian Dröge <sebastian@centricular.com>",
|
|
|
|
)
|
|
|
|
});
|
|
|
|
|
|
|
|
Some(&*ELEMENT_METADATA)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn pad_templates() -> &'static [gst::PadTemplate] {
|
|
|
|
static PAD_TEMPLATES: Lazy<Vec<gst::PadTemplate>> = Lazy::new(|| {
|
|
|
|
let src_pad_template = gst::PadTemplate::new(
|
|
|
|
"src",
|
|
|
|
gst::PadDirection::Src,
|
|
|
|
gst::PadPresence::Always,
|
|
|
|
&gst::Caps::builder("video/quicktime")
|
|
|
|
.field("variant", "iso-fragmented")
|
|
|
|
.build(),
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
2022-11-07 17:47:31 +00:00
|
|
|
let sink_pad_template = gst::PadTemplate::with_gtype(
|
2022-05-13 08:45:01 +00:00
|
|
|
"sink_%u",
|
|
|
|
gst::PadDirection::Sink,
|
|
|
|
gst::PadPresence::Request,
|
|
|
|
&[
|
|
|
|
gst::Structure::builder("video/x-h264")
|
2022-11-01 08:27:48 +00:00
|
|
|
.field("stream-format", gst::List::new(["avc", "avc3"]))
|
2022-05-13 08:45:01 +00:00
|
|
|
.field("alignment", "au")
|
|
|
|
.field("width", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("video/x-h265")
|
2022-11-01 08:27:48 +00:00
|
|
|
.field("stream-format", gst::List::new(["hvc1", "hev1"]))
|
2022-05-13 08:45:01 +00:00
|
|
|
.field("alignment", "au")
|
|
|
|
.field("width", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("image/jpeg")
|
|
|
|
.field("width", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.field("height", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("audio/mpeg")
|
|
|
|
.field("mpegversion", 4i32)
|
|
|
|
.field("stream-format", "raw")
|
|
|
|
.field("channels", gst::IntRange::<i32>::new(1, u16::MAX as i32))
|
|
|
|
.field("rate", gst::IntRange::<i32>::new(1, i32::MAX))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("audio/x-alaw")
|
|
|
|
.field("channels", gst::IntRange::<i32>::new(1, 2))
|
|
|
|
.field("rate", gst::IntRange::<i32>::new(1, i32::MAX))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("audio/x-mulaw")
|
|
|
|
.field("channels", gst::IntRange::<i32>::new(1, 2))
|
|
|
|
.field("rate", gst::IntRange::<i32>::new(1, i32::MAX))
|
|
|
|
.build(),
|
|
|
|
gst::Structure::builder("audio/x-adpcm")
|
|
|
|
.field("layout", "g726")
|
|
|
|
.field("channels", 1i32)
|
|
|
|
.field("rate", 8000i32)
|
|
|
|
.field("bitrate", gst::List::new([16000i32, 24000, 32000, 40000]))
|
|
|
|
.build(),
|
2022-05-13 09:13:54 +00:00
|
|
|
gst::Structure::builder("application/x-onvif-metadata")
|
2022-08-11 17:47:36 +00:00
|
|
|
.field("parsed", true)
|
2022-05-13 09:13:54 +00:00
|
|
|
.build(),
|
2022-05-13 08:45:01 +00:00
|
|
|
]
|
|
|
|
.into_iter()
|
|
|
|
.collect::<gst::Caps>(),
|
2022-11-07 17:47:31 +00:00
|
|
|
super::FMP4MuxPad::static_type(),
|
2022-05-13 08:45:01 +00:00
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
vec![src_pad_template, sink_pad_template]
|
|
|
|
});
|
|
|
|
|
|
|
|
PAD_TEMPLATES.as_ref()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl AggregatorImpl for ONVIFFMP4Mux {}
|
|
|
|
|
|
|
|
impl FMP4MuxImpl for ONVIFFMP4Mux {
|
|
|
|
const VARIANT: super::Variant = super::Variant::ONVIF;
|
|
|
|
}
|
2022-11-07 17:47:31 +00:00
|
|
|
|
|
|
|
#[derive(Default, Clone)]
|
|
|
|
struct PadSettings {
|
|
|
|
trak_timescale: u32,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Default)]
|
|
|
|
pub(crate) struct FMP4MuxPad {
|
|
|
|
settings: Mutex<PadSettings>,
|
|
|
|
}
|
|
|
|
|
|
|
|
#[glib::object_subclass]
|
|
|
|
impl ObjectSubclass for FMP4MuxPad {
|
|
|
|
const NAME: &'static str = "GstFMP4MuxPad";
|
|
|
|
type Type = super::FMP4MuxPad;
|
|
|
|
type ParentType = gst_base::AggregatorPad;
|
|
|
|
}
|
|
|
|
|
|
|
|
impl ObjectImpl for FMP4MuxPad {
|
|
|
|
fn properties() -> &'static [glib::ParamSpec] {
|
|
|
|
static PROPERTIES: Lazy<Vec<glib::ParamSpec>> = Lazy::new(|| {
|
|
|
|
vec![glib::ParamSpecUInt::builder("trak-timescale")
|
|
|
|
.nick("Track Timescale")
|
|
|
|
.blurb("Timescale to use for the track (units per second, 0 is automatic)")
|
|
|
|
.mutable_ready()
|
|
|
|
.build()]
|
|
|
|
});
|
|
|
|
|
|
|
|
&PROPERTIES
|
|
|
|
}
|
|
|
|
|
|
|
|
fn set_property(&self, _id: usize, value: &glib::Value, pspec: &glib::ParamSpec) {
|
|
|
|
match pspec.name() {
|
|
|
|
"trak-timescale" => {
|
|
|
|
let mut settings = self.settings.lock().unwrap();
|
|
|
|
settings.trak_timescale = value.get().expect("type checked upstream");
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn property(&self, _id: usize, pspec: &glib::ParamSpec) -> glib::Value {
|
|
|
|
match pspec.name() {
|
|
|
|
"trak-timescale" => {
|
|
|
|
let settings = self.settings.lock().unwrap();
|
|
|
|
settings.trak_timescale.to_value()
|
|
|
|
}
|
|
|
|
|
|
|
|
_ => unimplemented!(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl GstObjectImpl for FMP4MuxPad {}
|
|
|
|
|
|
|
|
impl PadImpl for FMP4MuxPad {}
|
|
|
|
|
|
|
|
impl AggregatorPadImpl for FMP4MuxPad {
|
|
|
|
fn flush(&self, aggregator: &gst_base::Aggregator) -> Result<gst::FlowSuccess, gst::FlowError> {
|
|
|
|
let mux = aggregator.downcast_ref::<super::FMP4Mux>().unwrap();
|
|
|
|
let mut mux_state = mux.imp().state.lock().unwrap();
|
|
|
|
|
|
|
|
for stream in &mut mux_state.streams {
|
|
|
|
if stream.sinkpad == *self.obj() {
|
|
|
|
stream.queued_gops.clear();
|
|
|
|
stream.dts_offset = None;
|
|
|
|
stream.current_position = gst::ClockTime::ZERO;
|
|
|
|
stream.fragment_filled = false;
|
2022-11-17 17:53:48 +00:00
|
|
|
stream.pre_queue.clear();
|
|
|
|
stream.running_time_utc_time_mapping = None;
|
2022-11-07 17:47:31 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
drop(mux_state);
|
|
|
|
|
|
|
|
self.parent_flush(aggregator)
|
|
|
|
}
|
|
|
|
}
|