mirror of
https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs.git
synced 2025-09-02 01:33:47 +00:00
aws: s3hlssink: Write to S3 on OutputStream flush
It is quite brittle to try to depend on signal order from splitmuxsink or hlssink3 (or worse, when S3Upload gets drop()ed). We implement flush() on the stream instead, with a simple check to make sure there's new data to avoid duplicate uploads. If we get the ordering wrong, we upload new segments _after_ the corresponding playlist update referencing that segment, which outside of being wrong, could break players. I had done this in the initial implementation, but iirc the flush() was insufficient for reasons I don't fully recall. Doing a flush in both flush() and drop() should hopefully cover all the cases that I found to not work. In addition, since this is an append-only stream, we can do a simple length check to avoid redundant uploads. Fixes: https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/issues/701 Part-of: <https://gitlab.freedesktop.org/gstreamer/gst-plugins-rs/-/merge_requests/2330>
This commit is contained in:
parent
b271edc3f8
commit
efaab53ab3
1 changed files with 41 additions and 17 deletions
|
@ -108,6 +108,7 @@ struct S3Upload {
|
|||
s3_acl: ObjectCannedAcl,
|
||||
s3_tx: SyncSender<S3Request>,
|
||||
s3_data: Vec<u8>,
|
||||
last_flush_len: usize,
|
||||
}
|
||||
|
||||
struct S3UploadReq {
|
||||
|
@ -170,26 +171,28 @@ impl S3Upload {
|
|||
s3_acl,
|
||||
s3_data: Vec::new(),
|
||||
s3_tx,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for S3Upload {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
gst::log!(CAT, "Write {}, {}", self.s3_key, buf.len());
|
||||
self.s3_data.extend_from_slice(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
Ok(())
|
||||
last_flush_len: 0,
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for S3Upload {
|
||||
fn drop(&mut self) {
|
||||
let s3_data: Vec<u8> = self.s3_data.drain(0..).collect();
|
||||
let s3_data_len = s3_data.len();
|
||||
fn flush(&mut self, consume: bool) {
|
||||
let s3_data_len = self.s3_data.len();
|
||||
if self.last_flush_len == s3_data_len {
|
||||
// Nothing new was written, nothing to flush
|
||||
gst::debug!(
|
||||
CAT,
|
||||
"Buffer length ({}) unchanged, skipping flush for {}",
|
||||
s3_data_len,
|
||||
self.s3_key
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let s3_data: Vec<u8> = if consume {
|
||||
self.s3_data.drain(0..).collect()
|
||||
} else {
|
||||
self.s3_data.clone()
|
||||
};
|
||||
let s3_tx = &mut self.s3_tx;
|
||||
let s3_channel = S3UploadReq {
|
||||
s3_client: self.s3_client.clone(),
|
||||
|
@ -201,12 +204,14 @@ impl Drop for S3Upload {
|
|||
|
||||
gst::debug!(
|
||||
CAT,
|
||||
"Submitting upload request for key: {}",
|
||||
"Submitting {}consuming upload request for key: {}",
|
||||
if consume { "" } else { "non-" },
|
||||
s3_channel.s3_key,
|
||||
);
|
||||
|
||||
match s3_tx.send(S3Request::Upload(s3_channel)) {
|
||||
Ok(()) => {
|
||||
self.last_flush_len = s3_data_len;
|
||||
gst::debug!(
|
||||
CAT,
|
||||
"Send S3 key {} of data length {} succeeded",
|
||||
|
@ -231,6 +236,25 @@ impl Drop for S3Upload {
|
|||
}
|
||||
}
|
||||
|
||||
impl Write for S3Upload {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
gst::log!(CAT, "Write {}, {}", self.s3_key, buf.len());
|
||||
self.s3_data.extend_from_slice(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
self.flush(false);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for S3Upload {
|
||||
fn drop(&mut self) {
|
||||
self.flush(true);
|
||||
}
|
||||
}
|
||||
|
||||
impl S3HlsSink {
|
||||
fn s3_request(&self, rxc: Receiver<S3RequestControl>, rx: Receiver<S3Request>) {
|
||||
loop {
|
||||
|
|
Loading…
Reference in a new issue