mirror of
https://git.asonix.dog/asonix/pict-rs.git
synced 2024-11-28 04:21:12 +00:00
Merge pull request 'Consolidate a bunch of parameters into a single type' (#50) from asonix/fewer-arguments into main
Reviewed-on: https://git.asonix.dog/asonix/pict-rs/pulls/50
This commit is contained in:
commit
0bc14c810d
23 changed files with 783 additions and 1582 deletions
|
@ -3,6 +3,7 @@ use std::sync::Arc;
|
|||
use crate::{
|
||||
error::Error,
|
||||
repo::{ArcRepo, UploadId},
|
||||
state::State,
|
||||
store::Store,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
|
@ -30,23 +31,23 @@ impl Backgrounded {
|
|||
self.identifier.as_ref()
|
||||
}
|
||||
|
||||
pub(crate) async fn proxy<S, P>(repo: ArcRepo, store: S, stream: P) -> Result<Self, Error>
|
||||
pub(crate) async fn proxy<S, P>(state: &State<S>, stream: P) -> Result<Self, Error>
|
||||
where
|
||||
S: Store,
|
||||
P: Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
{
|
||||
let mut this = Self {
|
||||
repo,
|
||||
repo: state.repo.clone(),
|
||||
identifier: None,
|
||||
upload_id: None,
|
||||
};
|
||||
|
||||
this.do_proxy(store, stream).await?;
|
||||
this.do_proxy(&state.store, stream).await?;
|
||||
|
||||
Ok(this)
|
||||
}
|
||||
|
||||
async fn do_proxy<S, P>(&mut self, store: S, stream: P) -> Result<(), Error>
|
||||
async fn do_proxy<S, P>(&mut self, store: &S, stream: P) -> Result<(), Error>
|
||||
where
|
||||
S: Store,
|
||||
P: Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
|
|
|
@ -12,8 +12,8 @@ use defaults::Defaults;
|
|||
|
||||
pub(crate) use commandline::Operation;
|
||||
pub(crate) use file::{
|
||||
Animation, ConfigFile as Configuration, Image, Media, ObjectStorage, OpenTelemetry, Postgres,
|
||||
Repo, Sled, Store, Tracing, Video,
|
||||
Animation, ConfigFile as Configuration, Media, ObjectStorage, OpenTelemetry, Postgres, Repo,
|
||||
Sled, Store, Tracing, Video,
|
||||
};
|
||||
pub(crate) use primitives::{Filesystem, LogFormat};
|
||||
|
||||
|
|
|
@ -2,9 +2,8 @@ use crate::{
|
|||
discover::Discovery,
|
||||
error::Error,
|
||||
formats::{InternalFormat, InternalVideoFormat},
|
||||
magick::PolicyDir,
|
||||
serde_str::Serde,
|
||||
tmp_file::TmpDir,
|
||||
state::State,
|
||||
};
|
||||
use actix_web::web;
|
||||
use time::{format_description::well_known::Rfc3339, OffsetDateTime};
|
||||
|
@ -81,18 +80,13 @@ impl Details {
|
|||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
pub(crate) async fn from_bytes(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
timeout: u64,
|
||||
input: web::Bytes,
|
||||
) -> Result<Self, Error> {
|
||||
pub(crate) async fn from_bytes<S>(state: &State<S>, input: web::Bytes) -> Result<Self, Error> {
|
||||
let Discovery {
|
||||
input,
|
||||
width,
|
||||
height,
|
||||
frames,
|
||||
} = crate::discover::discover_bytes(tmp_dir, policy_dir, timeout, input).await?;
|
||||
} = crate::discover::discover_bytes(state, input).await?;
|
||||
|
||||
Ok(Details::from_parts(
|
||||
input.internal_format(),
|
||||
|
|
|
@ -4,7 +4,7 @@ mod magick;
|
|||
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
use crate::{formats::InputFile, magick::PolicyDir, tmp_file::TmpDir};
|
||||
use crate::{formats::InputFile, state::State};
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub(crate) struct Discovery {
|
||||
|
@ -27,18 +27,16 @@ pub(crate) enum DiscoverError {
|
|||
}
|
||||
|
||||
#[tracing::instrument(level = "trace", skip_all)]
|
||||
pub(crate) async fn discover_bytes(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
timeout: u64,
|
||||
pub(crate) async fn discover_bytes<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
) -> Result<Discovery, crate::error::Error> {
|
||||
let discovery = ffmpeg::discover_bytes(tmp_dir, timeout, bytes.clone()).await?;
|
||||
let discovery = ffmpeg::discover_bytes(state, bytes.clone()).await?;
|
||||
|
||||
let discovery = magick::confirm_bytes(state, discovery, bytes.clone()).await?;
|
||||
|
||||
let discovery =
|
||||
magick::confirm_bytes(tmp_dir, policy_dir, discovery, timeout, bytes.clone()).await?;
|
||||
|
||||
let discovery = exiftool::check_reorient(discovery, timeout, bytes).await?;
|
||||
exiftool::check_reorient(discovery, bytes, state.config.media.process_timeout).await?;
|
||||
|
||||
Ok(discovery)
|
||||
}
|
||||
|
|
|
@ -16,8 +16,8 @@ pub(super) async fn check_reorient(
|
|||
height,
|
||||
frames,
|
||||
}: Discovery,
|
||||
timeout: u64,
|
||||
bytes: Bytes,
|
||||
timeout: u64,
|
||||
) -> Result<Discovery, ExifError> {
|
||||
let input = match input {
|
||||
InputFile::Image(ImageInput { format, .. }) => {
|
||||
|
|
|
@ -10,7 +10,7 @@ use crate::{
|
|||
Mp4AudioCodec, Mp4Codec, WebmAlphaCodec, WebmAudioCodec, WebmCodec,
|
||||
},
|
||||
process::Process,
|
||||
tmp_file::TmpDir,
|
||||
state::State,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
|
@ -158,12 +158,11 @@ struct Flags {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(super) async fn discover_bytes(
|
||||
tmp_dir: &TmpDir,
|
||||
timeout: u64,
|
||||
pub(super) async fn discover_bytes<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
) -> Result<Option<Discovery>, FfMpegError> {
|
||||
discover_file(tmp_dir, timeout, move |mut file| {
|
||||
discover_file(state, move |mut file| {
|
||||
let bytes = bytes.clone();
|
||||
|
||||
async move {
|
||||
|
@ -191,16 +190,12 @@ async fn allows_alpha(pixel_format: &str, timeout: u64) -> Result<bool, FfMpegEr
|
|||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn discover_file<F, Fut>(
|
||||
tmp_dir: &TmpDir,
|
||||
timeout: u64,
|
||||
f: F,
|
||||
) -> Result<Option<Discovery>, FfMpegError>
|
||||
async fn discover_file<S, F, Fut>(state: &State<S>, f: F) -> Result<Option<Discovery>, FfMpegError>
|
||||
where
|
||||
F: FnOnce(crate::file::File) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<crate::file::File, FfMpegError>>,
|
||||
{
|
||||
let input_file = tmp_dir.tmp_file(None);
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateDir)?;
|
||||
|
@ -226,7 +221,7 @@ where
|
|||
input_file.as_os_str(),
|
||||
],
|
||||
&[],
|
||||
timeout,
|
||||
state.config.media.process_timeout,
|
||||
)?
|
||||
.read()
|
||||
.into_vec()
|
||||
|
@ -250,7 +245,7 @@ where
|
|||
..
|
||||
}) = &mut discovery.input
|
||||
{
|
||||
*alpha = allows_alpha(&pixel_format, timeout).await?;
|
||||
*alpha = allows_alpha(&pixel_format, state.config.media.process_timeout).await?;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@ use actix_web::web::Bytes;
|
|||
use crate::{
|
||||
discover::DiscoverError,
|
||||
formats::{AnimationFormat, ImageFormat, ImageInput, InputFile},
|
||||
magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
process::Process,
|
||||
tmp_file::TmpDir,
|
||||
state::State,
|
||||
};
|
||||
|
||||
use super::Discovery;
|
||||
|
@ -31,11 +31,9 @@ struct Geometry {
|
|||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(super) async fn confirm_bytes(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
pub(super) async fn confirm_bytes<S>(
|
||||
state: &State<S>,
|
||||
discovery: Option<Discovery>,
|
||||
timeout: u64,
|
||||
bytes: Bytes,
|
||||
) -> Result<Discovery, MagickError> {
|
||||
match discovery {
|
||||
|
@ -51,7 +49,7 @@ pub(super) async fn confirm_bytes(
|
|||
}
|
||||
}
|
||||
|
||||
discover_file(tmp_dir, policy_dir, timeout, move |mut file| async move {
|
||||
discover_file(state, move |mut file| async move {
|
||||
file.write_from_bytes(bytes)
|
||||
.await
|
||||
.map_err(MagickError::Write)?;
|
||||
|
@ -62,22 +60,18 @@ pub(super) async fn confirm_bytes(
|
|||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
async fn discover_file<F, Fut>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
timeout: u64,
|
||||
f: F,
|
||||
) -> Result<Discovery, MagickError>
|
||||
async fn discover_file<S, F, Fut>(state: &State<S>, f: F) -> Result<Discovery, MagickError>
|
||||
where
|
||||
F: FnOnce(crate::file::File) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
||||
{
|
||||
let temporary_path = tmp_dir
|
||||
let temporary_path = state
|
||||
.tmp_dir
|
||||
.tmp_folder()
|
||||
.await
|
||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||
|
||||
let input_file = tmp_dir.tmp_file(None);
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateDir)?;
|
||||
|
@ -90,7 +84,7 @@ where
|
|||
|
||||
let envs = [
|
||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, policy_dir.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||
];
|
||||
|
||||
let res = Process::run(
|
||||
|
@ -102,7 +96,7 @@ where
|
|||
"JSON:".as_ref(),
|
||||
],
|
||||
&envs,
|
||||
timeout,
|
||||
state.config.media.process_timeout,
|
||||
)?
|
||||
.read()
|
||||
.into_string()
|
||||
|
|
|
@ -12,13 +12,6 @@ pub(crate) use video::{
|
|||
OutputVideo, VideoCodec, WebmAlphaCodec, WebmAudioCodec, WebmCodec,
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct Validations<'a> {
|
||||
pub(crate) image: &'a crate::config::Image,
|
||||
pub(crate) animation: &'a crate::config::Animation,
|
||||
pub(crate) video: &'a crate::config::Video,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub(crate) enum InputFile {
|
||||
Image(ImageInput),
|
||||
|
|
130
src/generate.rs
130
src/generate.rs
|
@ -7,10 +7,9 @@ use crate::{
|
|||
error::{Error, UploadError},
|
||||
formats::{ImageFormat, InputProcessableFormat, InternalVideoFormat, ProcessableFormat},
|
||||
future::{WithMetrics, WithTimeout},
|
||||
magick::PolicyDir,
|
||||
repo::{ArcRepo, Hash, VariantAlreadyExists},
|
||||
repo::{Hash, VariantAlreadyExists},
|
||||
state::State,
|
||||
store::Store,
|
||||
tmp_file::TmpDir,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
use std::{
|
||||
|
@ -49,47 +48,43 @@ impl Drop for MetricsGuard {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(tmp_dir, policy_dir, repo, store, hash, process_map, config))]
|
||||
#[tracing::instrument(skip(state, process_map, hash))]
|
||||
pub(crate) async fn generate<S: Store + 'static>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
state: &State<S>,
|
||||
process_map: &ProcessMap,
|
||||
format: InputProcessableFormat,
|
||||
thumbnail_path: PathBuf,
|
||||
thumbnail_args: Vec<String>,
|
||||
original_details: &Details,
|
||||
config: &crate::config::Configuration,
|
||||
hash: Hash,
|
||||
) -> Result<(Details, Bytes), Error> {
|
||||
if config.server.danger_dummy_mode {
|
||||
let identifier = repo
|
||||
if state.config.server.danger_dummy_mode {
|
||||
let identifier = state
|
||||
.repo
|
||||
.identifier(hash)
|
||||
.await?
|
||||
.ok_or(UploadError::MissingIdentifier)?;
|
||||
|
||||
let bytes = store.to_bytes(&identifier, None, None).await?.into_bytes();
|
||||
let bytes = state
|
||||
.store
|
||||
.to_bytes(&identifier, None, None)
|
||||
.await?
|
||||
.into_bytes();
|
||||
|
||||
Ok((original_details.clone(), bytes))
|
||||
} else {
|
||||
let process_fut = process(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
store,
|
||||
state,
|
||||
format,
|
||||
thumbnail_path.clone(),
|
||||
thumbnail_args,
|
||||
original_details,
|
||||
config,
|
||||
hash.clone(),
|
||||
);
|
||||
|
||||
let (details, bytes) = process_map
|
||||
.process(hash, thumbnail_path, process_fut)
|
||||
.with_timeout(Duration::from_secs(config.media.process_timeout * 4))
|
||||
.with_timeout(Duration::from_secs(state.config.media.process_timeout * 4))
|
||||
.with_metrics("pict-rs.generate.process")
|
||||
.await
|
||||
.map_err(|_| UploadError::ProcessTimeout)??;
|
||||
|
@ -98,38 +93,21 @@ pub(crate) async fn generate<S: Store + 'static>(
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(tmp_dir, policy_dir, repo, store, hash, config))]
|
||||
#[tracing::instrument(skip(state, hash))]
|
||||
async fn process<S: Store + 'static>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
state: &State<S>,
|
||||
output_format: InputProcessableFormat,
|
||||
thumbnail_path: PathBuf,
|
||||
thumbnail_args: Vec<String>,
|
||||
original_details: &Details,
|
||||
config: &crate::config::Configuration,
|
||||
hash: Hash,
|
||||
) -> Result<(Details, Bytes), Error> {
|
||||
let guard = MetricsGuard::guard();
|
||||
let permit = crate::process_semaphore().acquire().await?;
|
||||
|
||||
let identifier = input_identifier(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
store,
|
||||
output_format,
|
||||
hash.clone(),
|
||||
original_details,
|
||||
&config.media,
|
||||
)
|
||||
.await?;
|
||||
let identifier = input_identifier(state, output_format, hash.clone(), original_details).await?;
|
||||
|
||||
let input_details =
|
||||
crate::ensure_details_identifier(tmp_dir, policy_dir, repo, store, config, &identifier)
|
||||
.await?;
|
||||
let input_details = crate::ensure_details_identifier(state, &identifier).await?;
|
||||
|
||||
let input_format = input_details
|
||||
.internal_format()
|
||||
|
@ -139,21 +117,19 @@ async fn process<S: Store + 'static>(
|
|||
let format = input_format.process_to(output_format);
|
||||
|
||||
let quality = match format {
|
||||
ProcessableFormat::Image(format) => config.media.image.quality_for(format),
|
||||
ProcessableFormat::Animation(format) => config.media.animation.quality_for(format),
|
||||
ProcessableFormat::Image(format) => state.config.media.image.quality_for(format),
|
||||
ProcessableFormat::Animation(format) => state.config.media.animation.quality_for(format),
|
||||
};
|
||||
|
||||
let stream = store.to_stream(&identifier, None, None).await?;
|
||||
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||
|
||||
let vec = crate::magick::process_image_stream_read(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
state,
|
||||
stream,
|
||||
thumbnail_args,
|
||||
input_format,
|
||||
format,
|
||||
quality,
|
||||
config.media.process_timeout,
|
||||
)
|
||||
.await?
|
||||
.into_vec()
|
||||
|
@ -164,19 +140,15 @@ async fn process<S: Store + 'static>(
|
|||
|
||||
drop(permit);
|
||||
|
||||
let details = Details::from_bytes(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
config.media.process_timeout,
|
||||
bytes.clone(),
|
||||
)
|
||||
.await?;
|
||||
let details = Details::from_bytes(state, bytes.clone()).await?;
|
||||
|
||||
let identifier = store
|
||||
let identifier = state
|
||||
.store
|
||||
.save_bytes(bytes.clone(), details.media_type())
|
||||
.await?;
|
||||
|
||||
if let Err(VariantAlreadyExists) = repo
|
||||
if let Err(VariantAlreadyExists) = state
|
||||
.repo
|
||||
.relate_variant_identifier(
|
||||
hash,
|
||||
thumbnail_path.to_string_lossy().to_string(),
|
||||
|
@ -184,27 +156,22 @@ async fn process<S: Store + 'static>(
|
|||
)
|
||||
.await?
|
||||
{
|
||||
store.remove(&identifier).await?;
|
||||
state.store.remove(&identifier).await?;
|
||||
}
|
||||
|
||||
repo.relate_details(&identifier, &details).await?;
|
||||
state.repo.relate_details(&identifier, &details).await?;
|
||||
|
||||
guard.disarm();
|
||||
|
||||
Ok((details, bytes)) as Result<(Details, Bytes), Error>
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn input_identifier<S>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
state: &State<S>,
|
||||
output_format: InputProcessableFormat,
|
||||
hash: Hash,
|
||||
original_details: &Details,
|
||||
media: &crate::config::Media,
|
||||
) -> Result<Arc<str>, Error>
|
||||
where
|
||||
S: Store + 'static,
|
||||
|
@ -220,11 +187,12 @@ where
|
|||
};
|
||||
|
||||
if should_thumbnail {
|
||||
if let Some(identifier) = repo.motion_identifier(hash.clone()).await? {
|
||||
if let Some(identifier) = state.repo.motion_identifier(hash.clone()).await? {
|
||||
return Ok(identifier);
|
||||
};
|
||||
|
||||
let identifier = repo
|
||||
let identifier = state
|
||||
.repo
|
||||
.identifier(hash.clone())
|
||||
.await?
|
||||
.ok_or(UploadError::MissingIdentifier)?;
|
||||
|
@ -232,24 +200,16 @@ where
|
|||
let (reader, media_type) = if let Some(processable_format) =
|
||||
original_details.internal_format().processable_format()
|
||||
{
|
||||
let thumbnail_format = media.image.format.unwrap_or(ImageFormat::Webp);
|
||||
let thumbnail_format = state.config.media.image.format.unwrap_or(ImageFormat::Webp);
|
||||
|
||||
let stream = store.to_stream(&identifier, None, None).await?;
|
||||
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||
|
||||
let reader = magick::thumbnail(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
stream,
|
||||
processable_format,
|
||||
ProcessableFormat::Image(thumbnail_format),
|
||||
media.image.quality_for(thumbnail_format),
|
||||
media.process_timeout,
|
||||
)
|
||||
.await?;
|
||||
let reader =
|
||||
magick::thumbnail(state, stream, processable_format, thumbnail_format).await?;
|
||||
|
||||
(reader, thumbnail_format.media_type())
|
||||
} else {
|
||||
let thumbnail_format = match media.image.format {
|
||||
let thumbnail_format = match state.config.media.image.format {
|
||||
Some(ImageFormat::Webp | ImageFormat::Avif | ImageFormat::Jxl) => {
|
||||
ffmpeg::ThumbnailFormat::Webp
|
||||
}
|
||||
|
@ -258,14 +218,12 @@ where
|
|||
};
|
||||
|
||||
let reader = ffmpeg::thumbnail(
|
||||
tmp_dir,
|
||||
store.clone(),
|
||||
state,
|
||||
identifier,
|
||||
original_details
|
||||
.video_format()
|
||||
.unwrap_or(InternalVideoFormat::Mp4),
|
||||
thumbnail_format,
|
||||
media.process_timeout,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
@ -273,16 +231,20 @@ where
|
|||
};
|
||||
|
||||
let motion_identifier = reader
|
||||
.with_stdout(|stdout| async { store.save_async_read(stdout, media_type).await })
|
||||
.with_stdout(|stdout| async { state.store.save_async_read(stdout, media_type).await })
|
||||
.await??;
|
||||
|
||||
repo.relate_motion_identifier(hash, &motion_identifier)
|
||||
state
|
||||
.repo
|
||||
.relate_motion_identifier(hash, &motion_identifier)
|
||||
.await?;
|
||||
|
||||
return Ok(motion_identifier);
|
||||
}
|
||||
|
||||
repo.identifier(hash)
|
||||
state
|
||||
.repo
|
||||
.identifier(hash)
|
||||
.await?
|
||||
.ok_or(UploadError::MissingIdentifier)
|
||||
.map_err(From::from)
|
||||
|
|
|
@ -6,8 +6,8 @@ use crate::{
|
|||
ffmpeg::FfMpegError,
|
||||
formats::InternalVideoFormat,
|
||||
process::{Process, ProcessRead},
|
||||
state::State,
|
||||
store::Store,
|
||||
tmp_file::TmpDir,
|
||||
};
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
|
@ -50,21 +50,19 @@ impl ThumbnailFormat {
|
|||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(tmp_dir, store, timeout))]
|
||||
#[tracing::instrument(skip(state))]
|
||||
pub(super) async fn thumbnail<S: Store>(
|
||||
tmp_dir: &TmpDir,
|
||||
store: S,
|
||||
state: &State<S>,
|
||||
from: Arc<str>,
|
||||
input_format: InternalVideoFormat,
|
||||
format: ThumbnailFormat,
|
||||
timeout: u64,
|
||||
) -> Result<ProcessRead, FfMpegError> {
|
||||
let input_file = tmp_dir.tmp_file(Some(input_format.file_extension()));
|
||||
let input_file = state.tmp_dir.tmp_file(Some(input_format.file_extension()));
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateDir)?;
|
||||
|
||||
let output_file = tmp_dir.tmp_file(Some(format.to_file_extension()));
|
||||
let output_file = state.tmp_dir.tmp_file(Some(format.to_file_extension()));
|
||||
crate::store::file_store::safe_create_parent(&output_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateDir)?;
|
||||
|
@ -72,7 +70,8 @@ pub(super) async fn thumbnail<S: Store>(
|
|||
let mut tmp_one = crate::file::File::create(&input_file)
|
||||
.await
|
||||
.map_err(FfMpegError::CreateFile)?;
|
||||
let stream = store
|
||||
let stream = state
|
||||
.store
|
||||
.to_stream(&from, None, None)
|
||||
.await
|
||||
.map_err(FfMpegError::Store)?;
|
||||
|
@ -99,7 +98,7 @@ pub(super) async fn thumbnail<S: Store>(
|
|||
output_file.as_os_str(),
|
||||
],
|
||||
&[],
|
||||
timeout,
|
||||
state.config.media.process_timeout,
|
||||
)?;
|
||||
|
||||
let res = process.wait().await;
|
||||
|
|
|
@ -3,32 +3,33 @@ use std::ffi::OsStr;
|
|||
use actix_web::web::Bytes;
|
||||
|
||||
use crate::{
|
||||
formats::ProcessableFormat,
|
||||
magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
formats::{ImageFormat, ProcessableFormat},
|
||||
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
process::{Process, ProcessRead},
|
||||
state::State,
|
||||
stream::LocalBoxStream,
|
||||
tmp_file::TmpDir,
|
||||
};
|
||||
|
||||
async fn thumbnail_animation<F, Fut>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
async fn thumbnail_animation<S, F, Fut>(
|
||||
state: &State<S>,
|
||||
input_format: ProcessableFormat,
|
||||
format: ProcessableFormat,
|
||||
quality: Option<u8>,
|
||||
timeout: u64,
|
||||
thumbnail_format: ImageFormat,
|
||||
write_file: F,
|
||||
) -> Result<ProcessRead, MagickError>
|
||||
where
|
||||
F: FnOnce(crate::file::File) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
||||
{
|
||||
let temporary_path = tmp_dir
|
||||
let format = ProcessableFormat::Image(thumbnail_format);
|
||||
let quality = state.config.media.image.quality_for(thumbnail_format);
|
||||
|
||||
let temporary_path = state
|
||||
.tmp_dir
|
||||
.tmp_folder()
|
||||
.await
|
||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||
|
||||
let input_file = tmp_dir.tmp_file(None);
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateDir)?;
|
||||
|
@ -62,10 +63,10 @@ where
|
|||
|
||||
let envs = [
|
||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, policy_dir.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||
];
|
||||
|
||||
let reader = Process::run("magick", &args, &envs, timeout)?
|
||||
let reader = Process::run("magick", &args, &envs, state.config.media.process_timeout)?
|
||||
.read()
|
||||
.add_extras(input_file)
|
||||
.add_extras(temporary_path);
|
||||
|
@ -73,22 +74,16 @@ where
|
|||
Ok(reader)
|
||||
}
|
||||
|
||||
pub(super) async fn thumbnail(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
pub(super) async fn thumbnail<S>(
|
||||
state: &State<S>,
|
||||
stream: LocalBoxStream<'static, std::io::Result<Bytes>>,
|
||||
input_format: ProcessableFormat,
|
||||
format: ProcessableFormat,
|
||||
quality: Option<u8>,
|
||||
timeout: u64,
|
||||
thumbnail_format: ImageFormat,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
thumbnail_animation(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
state,
|
||||
input_format,
|
||||
format,
|
||||
quality,
|
||||
timeout,
|
||||
thumbnail_format,
|
||||
|mut tmp_file| async move {
|
||||
tmp_file
|
||||
.write_from_stream(stream)
|
||||
|
|
136
src/ingest.rs
136
src/ingest.rs
|
@ -4,22 +4,21 @@ use crate::{
|
|||
bytes_stream::BytesStream,
|
||||
details::Details,
|
||||
error::{Error, UploadError},
|
||||
formats::{InternalFormat, Validations},
|
||||
formats::InternalFormat,
|
||||
future::WithMetrics,
|
||||
magick::PolicyDir,
|
||||
repo::{Alias, ArcRepo, DeleteToken, Hash},
|
||||
state::State,
|
||||
store::Store,
|
||||
tmp_file::TmpDir,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
use futures_core::Stream;
|
||||
use reqwest::Body;
|
||||
use reqwest_middleware::ClientWithMiddleware;
|
||||
|
||||
use streem::IntoStreamer;
|
||||
use tracing::{Instrument, Span};
|
||||
|
||||
mod hasher;
|
||||
use hasher::{Hasher, State};
|
||||
use hasher::Hasher;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Session {
|
||||
|
@ -50,12 +49,17 @@ where
|
|||
}
|
||||
|
||||
async fn process_ingest<S>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
store: &S,
|
||||
state: &State<S>,
|
||||
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
media: &crate::config::Media,
|
||||
) -> Result<(InternalFormat, Arc<str>, Details, Rc<RefCell<State>>), Error>
|
||||
) -> Result<
|
||||
(
|
||||
InternalFormat,
|
||||
Arc<str>,
|
||||
Details,
|
||||
Rc<RefCell<hasher::State>>,
|
||||
),
|
||||
Error,
|
||||
>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
|
@ -65,43 +69,30 @@ where
|
|||
|
||||
let permit = crate::process_semaphore().acquire().await?;
|
||||
|
||||
let prescribed = Validations {
|
||||
image: &media.image,
|
||||
animation: &media.animation,
|
||||
video: &media.video,
|
||||
};
|
||||
|
||||
tracing::trace!("Validating bytes");
|
||||
let (input_type, process_read) = crate::validate::validate_bytes(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
bytes,
|
||||
prescribed,
|
||||
media.process_timeout,
|
||||
)
|
||||
.await?;
|
||||
let (input_type, process_read) = crate::validate::validate_bytes(state, bytes).await?;
|
||||
|
||||
let process_read = if let Some(operations) = media.preprocess_steps() {
|
||||
let process_read = if let Some(operations) = state.config.media.preprocess_steps() {
|
||||
if let Some(format) = input_type.processable_format() {
|
||||
let (_, magick_args) =
|
||||
crate::processor::build_chain(operations, format.file_extension())?;
|
||||
|
||||
let quality = match format {
|
||||
crate::formats::ProcessableFormat::Image(format) => media.image.quality_for(format),
|
||||
crate::formats::ProcessableFormat::Image(format) => {
|
||||
state.config.media.image.quality_for(format)
|
||||
}
|
||||
crate::formats::ProcessableFormat::Animation(format) => {
|
||||
media.animation.quality_for(format)
|
||||
state.config.media.animation.quality_for(format)
|
||||
}
|
||||
};
|
||||
|
||||
crate::magick::process_image_process_read(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
state,
|
||||
process_read,
|
||||
magick_args,
|
||||
format,
|
||||
format,
|
||||
quality,
|
||||
media.process_timeout,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
|
@ -111,36 +102,39 @@ where
|
|||
process_read
|
||||
};
|
||||
|
||||
let (state, identifier) = process_read
|
||||
let (hash_state, identifier) = process_read
|
||||
.with_stdout(|stdout| async move {
|
||||
let hasher_reader = Hasher::new(stdout);
|
||||
let state = hasher_reader.state();
|
||||
let hash_state = hasher_reader.state();
|
||||
|
||||
store
|
||||
state
|
||||
.store
|
||||
.save_async_read(hasher_reader, input_type.media_type())
|
||||
.await
|
||||
.map(move |identifier| (state, identifier))
|
||||
.map(move |identifier| (hash_state, identifier))
|
||||
})
|
||||
.await??;
|
||||
|
||||
let bytes_stream = store.to_bytes(&identifier, None, None).await?;
|
||||
let details = Details::from_bytes(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
media.process_timeout,
|
||||
bytes_stream.into_bytes(),
|
||||
)
|
||||
.await?;
|
||||
let bytes_stream = state.store.to_bytes(&identifier, None, None).await?;
|
||||
let details = Details::from_bytes(state, bytes_stream.into_bytes()).await?;
|
||||
|
||||
drop(permit);
|
||||
|
||||
Ok((input_type, identifier, details, state))
|
||||
Ok((input_type, identifier, details, hash_state))
|
||||
}
|
||||
|
||||
async fn dummy_ingest<S>(
|
||||
store: &S,
|
||||
state: &State<S>,
|
||||
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
) -> Result<(InternalFormat, Arc<str>, Details, Rc<RefCell<State>>), Error>
|
||||
) -> Result<
|
||||
(
|
||||
InternalFormat,
|
||||
Arc<str>,
|
||||
Details,
|
||||
Rc<RefCell<hasher::State>>,
|
||||
),
|
||||
Error,
|
||||
>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
|
@ -152,55 +146,51 @@ where
|
|||
let reader = Box::pin(tokio_util::io::StreamReader::new(stream));
|
||||
|
||||
let hasher_reader = Hasher::new(reader);
|
||||
let state = hasher_reader.state();
|
||||
let hash_state = hasher_reader.state();
|
||||
|
||||
let input_type = InternalFormat::Image(crate::formats::ImageFormat::Png);
|
||||
|
||||
let identifier = store
|
||||
let identifier = state
|
||||
.store
|
||||
.save_async_read(hasher_reader, input_type.media_type())
|
||||
.await?;
|
||||
|
||||
let details = Details::danger_dummy(input_type);
|
||||
|
||||
Ok((input_type, identifier, details, state))
|
||||
Ok((input_type, identifier, details, hash_state))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(tmp_dir, policy_dir, repo, store, client, stream, config))]
|
||||
#[tracing::instrument(skip(state, stream))]
|
||||
pub(crate) async fn ingest<S>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
client: &ClientWithMiddleware,
|
||||
state: &State<S>,
|
||||
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
||||
declared_alias: Option<Alias>,
|
||||
config: &crate::config::Configuration,
|
||||
) -> Result<Session, Error>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
let (input_type, identifier, details, state) = if config.server.danger_dummy_mode {
|
||||
dummy_ingest(store, stream).await?
|
||||
let (input_type, identifier, details, hash_state) = if state.config.server.danger_dummy_mode {
|
||||
dummy_ingest(state, stream).await?
|
||||
} else {
|
||||
process_ingest(tmp_dir, policy_dir, store, stream, &config.media).await?
|
||||
process_ingest(state, stream).await?
|
||||
};
|
||||
|
||||
let mut session = Session {
|
||||
repo: repo.clone(),
|
||||
repo: state.repo.clone(),
|
||||
delete_token: DeleteToken::generate(),
|
||||
hash: None,
|
||||
alias: None,
|
||||
identifier: Some(identifier.clone()),
|
||||
};
|
||||
|
||||
if let Some(endpoint) = &config.media.external_validation {
|
||||
let stream = store.to_stream(&identifier, None, None).await?;
|
||||
if let Some(endpoint) = &state.config.media.external_validation {
|
||||
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||
|
||||
let response = client
|
||||
let response = state
|
||||
.client
|
||||
.post(endpoint.as_str())
|
||||
.timeout(Duration::from_secs(
|
||||
config.media.external_validation_timeout,
|
||||
state.config.media.external_validation_timeout,
|
||||
))
|
||||
.header("Content-Type", input_type.media_type().as_ref())
|
||||
.body(Body::wrap_stream(crate::stream::make_send(stream)))
|
||||
|
@ -214,13 +204,13 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
let (hash, size) = state.borrow_mut().finalize_reset();
|
||||
let (hash, size) = hash_state.borrow_mut().finalize_reset();
|
||||
|
||||
let hash = Hash::new(hash, size, input_type);
|
||||
|
||||
save_upload(&mut session, repo, store, hash.clone(), &identifier).await?;
|
||||
save_upload(&mut session, state, hash.clone(), &identifier).await?;
|
||||
|
||||
repo.relate_details(&identifier, &details).await?;
|
||||
state.repo.relate_details(&identifier, &details).await?;
|
||||
|
||||
if let Some(alias) = declared_alias {
|
||||
session.add_existing_alias(hash, alias).await?
|
||||
|
@ -234,17 +224,21 @@ where
|
|||
#[tracing::instrument(level = "trace", skip_all)]
|
||||
async fn save_upload<S>(
|
||||
session: &mut Session,
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
state: &State<S>,
|
||||
hash: Hash,
|
||||
identifier: &Arc<str>,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: Store,
|
||||
{
|
||||
if repo.create_hash(hash.clone(), identifier).await?.is_err() {
|
||||
if state
|
||||
.repo
|
||||
.create_hash(hash.clone(), identifier)
|
||||
.await?
|
||||
.is_err()
|
||||
{
|
||||
// duplicate upload
|
||||
store.remove(identifier).await?;
|
||||
state.store.remove(identifier).await?;
|
||||
session.identifier.take();
|
||||
return Ok(());
|
||||
}
|
||||
|
|
1127
src/lib.rs
1127
src/lib.rs
File diff suppressed because it is too large
Load diff
|
@ -7,6 +7,7 @@ use crate::{
|
|||
error_code::ErrorCode,
|
||||
formats::ProcessableFormat,
|
||||
process::{Process, ProcessError, ProcessRead},
|
||||
state::State,
|
||||
stream::LocalBoxStream,
|
||||
tmp_file::{TmpDir, TmpFolder},
|
||||
};
|
||||
|
@ -85,27 +86,25 @@ impl MagickError {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn process_image<F, Fut>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
async fn process_image<S, F, Fut>(
|
||||
state: &State<S>,
|
||||
process_args: Vec<String>,
|
||||
input_format: ProcessableFormat,
|
||||
format: ProcessableFormat,
|
||||
quality: Option<u8>,
|
||||
timeout: u64,
|
||||
write_file: F,
|
||||
) -> Result<ProcessRead, MagickError>
|
||||
where
|
||||
F: FnOnce(crate::file::File) -> Fut,
|
||||
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
||||
{
|
||||
let temporary_path = tmp_dir
|
||||
let temporary_path = state
|
||||
.tmp_dir
|
||||
.tmp_folder()
|
||||
.await
|
||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||
|
||||
let input_file = tmp_dir.tmp_file(None);
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
.map_err(MagickError::CreateDir)?;
|
||||
|
@ -143,10 +142,10 @@ where
|
|||
|
||||
let envs = [
|
||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, policy_dir.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||
];
|
||||
|
||||
let reader = Process::run("magick", &args, &envs, timeout)?
|
||||
let reader = Process::run("magick", &args, &envs, state.config.media.process_timeout)?
|
||||
.read()
|
||||
.add_extras(input_file)
|
||||
.add_extras(temporary_path);
|
||||
|
@ -154,25 +153,20 @@ where
|
|||
Ok(reader)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn process_image_stream_read(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
pub(crate) async fn process_image_stream_read<S>(
|
||||
state: &State<S>,
|
||||
stream: LocalBoxStream<'static, std::io::Result<Bytes>>,
|
||||
args: Vec<String>,
|
||||
input_format: ProcessableFormat,
|
||||
format: ProcessableFormat,
|
||||
quality: Option<u8>,
|
||||
timeout: u64,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
process_image(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
state,
|
||||
args,
|
||||
input_format,
|
||||
format,
|
||||
quality,
|
||||
timeout,
|
||||
|mut tmp_file| async move {
|
||||
tmp_file
|
||||
.write_from_stream(stream)
|
||||
|
@ -184,25 +178,20 @@ pub(crate) async fn process_image_stream_read(
|
|||
.await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn process_image_process_read(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
pub(crate) async fn process_image_process_read<S>(
|
||||
state: &State<S>,
|
||||
process_read: ProcessRead,
|
||||
args: Vec<String>,
|
||||
input_format: ProcessableFormat,
|
||||
format: ProcessableFormat,
|
||||
quality: Option<u8>,
|
||||
timeout: u64,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
process_image(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
state,
|
||||
args,
|
||||
input_format,
|
||||
format,
|
||||
quality,
|
||||
timeout,
|
||||
|mut tmp_file| async move {
|
||||
process_read
|
||||
.with_stdout(|stdout| async {
|
||||
|
|
|
@ -12,21 +12,15 @@ use streem::IntoStreamer;
|
|||
use crate::{
|
||||
details::Details,
|
||||
error::{Error, UploadError},
|
||||
magick::{ArcPolicyDir, PolicyDir},
|
||||
repo::{ArcRepo, Hash},
|
||||
state::State,
|
||||
store::Store,
|
||||
tmp_file::{ArcTmpDir, TmpDir},
|
||||
};
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(super) async fn migrate_store<S1, S2>(
|
||||
tmp_dir: ArcTmpDir,
|
||||
policy_dir: ArcPolicyDir,
|
||||
repo: ArcRepo,
|
||||
from: S1,
|
||||
to: S2,
|
||||
to: State<S2>,
|
||||
skip_missing_files: bool,
|
||||
timeout: u64,
|
||||
concurrency: usize,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
|
@ -39,7 +33,7 @@ where
|
|||
tracing::warn!("Old store is not configured correctly");
|
||||
return Err(e.into());
|
||||
}
|
||||
if let Err(e) = to.health_check().await {
|
||||
if let Err(e) = to.repo.health_check().await {
|
||||
tracing::warn!("New store is not configured correctly");
|
||||
return Err(e.into());
|
||||
}
|
||||
|
@ -48,17 +42,8 @@ where
|
|||
|
||||
let mut failure_count = 0;
|
||||
|
||||
while let Err(e) = do_migrate_store(
|
||||
tmp_dir.clone(),
|
||||
policy_dir.clone(),
|
||||
repo.clone(),
|
||||
from.clone(),
|
||||
to.clone(),
|
||||
skip_missing_files,
|
||||
timeout,
|
||||
concurrency,
|
||||
)
|
||||
.await
|
||||
while let Err(e) =
|
||||
do_migrate_store(from.clone(), to.clone(), skip_missing_files, concurrency).await
|
||||
{
|
||||
tracing::error!("Migration failed with {}", format!("{e:?}"));
|
||||
|
||||
|
@ -78,11 +63,8 @@ where
|
|||
}
|
||||
|
||||
struct MigrateState<S1, S2> {
|
||||
tmp_dir: ArcTmpDir,
|
||||
policy_dir: ArcPolicyDir,
|
||||
repo: ArcRepo,
|
||||
from: S1,
|
||||
to: S2,
|
||||
to: State<S2>,
|
||||
continuing_migration: bool,
|
||||
skip_missing_files: bool,
|
||||
initial_repo_size: u64,
|
||||
|
@ -90,26 +72,20 @@ struct MigrateState<S1, S2> {
|
|||
pct: AtomicU64,
|
||||
index: AtomicU64,
|
||||
started_at: Instant,
|
||||
timeout: u64,
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn do_migrate_store<S1, S2>(
|
||||
tmp_dir: ArcTmpDir,
|
||||
policy_dir: ArcPolicyDir,
|
||||
repo: ArcRepo,
|
||||
from: S1,
|
||||
to: S2,
|
||||
to: State<S2>,
|
||||
skip_missing_files: bool,
|
||||
timeout: u64,
|
||||
concurrency: usize,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S1: Store + 'static,
|
||||
S2: Store + 'static,
|
||||
{
|
||||
let continuing_migration = repo.is_continuing_migration().await?;
|
||||
let initial_repo_size = repo.size().await?;
|
||||
let continuing_migration = to.repo.is_continuing_migration().await?;
|
||||
let initial_repo_size = to.repo.size().await?;
|
||||
|
||||
if continuing_migration {
|
||||
tracing::warn!("Continuing previous migration of {initial_repo_size} total hashes");
|
||||
|
@ -122,15 +98,12 @@ where
|
|||
}
|
||||
|
||||
// Hashes are read in a consistent order
|
||||
let stream = std::pin::pin!(repo.hashes());
|
||||
let stream = std::pin::pin!(to.repo.hashes());
|
||||
let mut stream = stream.into_streamer();
|
||||
|
||||
let state = Rc::new(MigrateState {
|
||||
tmp_dir: tmp_dir.clone(),
|
||||
policy_dir: policy_dir.clone(),
|
||||
repo: repo.clone(),
|
||||
from,
|
||||
to,
|
||||
to: to.clone(),
|
||||
continuing_migration,
|
||||
skip_missing_files,
|
||||
initial_repo_size,
|
||||
|
@ -138,7 +111,6 @@ where
|
|||
pct: AtomicU64::new(initial_repo_size / 100),
|
||||
index: AtomicU64::new(0),
|
||||
started_at: Instant::now(),
|
||||
timeout,
|
||||
});
|
||||
|
||||
let mut joinset = tokio::task::JoinSet::new();
|
||||
|
@ -165,7 +137,7 @@ where
|
|||
}
|
||||
|
||||
// clean up the migration table to avoid interfering with future migrations
|
||||
repo.clear().await?;
|
||||
to.repo.clear().await?;
|
||||
|
||||
tracing::warn!("Migration completed successfully");
|
||||
|
||||
|
@ -179,9 +151,6 @@ where
|
|||
S2: Store,
|
||||
{
|
||||
let MigrateState {
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
from,
|
||||
to,
|
||||
continuing_migration,
|
||||
|
@ -191,24 +160,23 @@ where
|
|||
pct,
|
||||
index,
|
||||
started_at,
|
||||
timeout,
|
||||
} = state;
|
||||
|
||||
let current_index = index.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let original_identifier = match repo.identifier(hash.clone()).await {
|
||||
let original_identifier = match to.repo.identifier(hash.clone()).await {
|
||||
Ok(Some(identifier)) => identifier,
|
||||
Ok(None) => {
|
||||
tracing::warn!(
|
||||
"Original File identifier for hash {hash:?} is missing, queue cleanup task",
|
||||
);
|
||||
crate::queue::cleanup_hash(repo, hash.clone()).await?;
|
||||
crate::queue::cleanup_hash(&to.repo, hash.clone()).await?;
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => return Err(e.into()),
|
||||
};
|
||||
|
||||
if repo.is_migrated(&original_identifier).await? {
|
||||
if to.repo.is_migrated(&original_identifier).await? {
|
||||
// migrated original for hash - this means we can skip
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -241,26 +209,16 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
if let Some(identifier) = repo.motion_identifier(hash.clone()).await? {
|
||||
if !repo.is_migrated(&identifier).await? {
|
||||
match migrate_file(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
from,
|
||||
to,
|
||||
&identifier,
|
||||
*skip_missing_files,
|
||||
*timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
if let Some(identifier) = to.repo.motion_identifier(hash.clone()).await? {
|
||||
if !to.repo.is_migrated(&identifier).await? {
|
||||
match migrate_file(from, to, &identifier, *skip_missing_files).await {
|
||||
Ok(new_identifier) => {
|
||||
migrate_details(repo, &identifier, &new_identifier).await?;
|
||||
repo.relate_motion_identifier(hash.clone(), &new_identifier)
|
||||
migrate_details(&to.repo, &identifier, &new_identifier).await?;
|
||||
to.repo
|
||||
.relate_motion_identifier(hash.clone(), &new_identifier)
|
||||
.await?;
|
||||
|
||||
repo.mark_migrated(&identifier, &new_identifier).await?;
|
||||
to.repo.mark_migrated(&identifier, &new_identifier).await?;
|
||||
}
|
||||
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
|
||||
tracing::warn!("Skipping motion file for hash {hash:?}");
|
||||
|
@ -281,28 +239,20 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
for (variant, identifier) in repo.variants(hash.clone()).await? {
|
||||
if !repo.is_migrated(&identifier).await? {
|
||||
match migrate_file(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
from,
|
||||
to,
|
||||
&identifier,
|
||||
*skip_missing_files,
|
||||
*timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
for (variant, identifier) in to.repo.variants(hash.clone()).await? {
|
||||
if !to.repo.is_migrated(&identifier).await? {
|
||||
match migrate_file(from, to, &identifier, *skip_missing_files).await {
|
||||
Ok(new_identifier) => {
|
||||
migrate_details(repo, &identifier, &new_identifier).await?;
|
||||
repo.remove_variant(hash.clone(), variant.clone()).await?;
|
||||
let _ = repo
|
||||
migrate_details(&to.repo, &identifier, &new_identifier).await?;
|
||||
to.repo
|
||||
.remove_variant(hash.clone(), variant.clone())
|
||||
.await?;
|
||||
let _ = to
|
||||
.repo
|
||||
.relate_variant_identifier(hash.clone(), variant, &new_identifier)
|
||||
.await?;
|
||||
|
||||
repo.mark_migrated(&identifier, &new_identifier).await?;
|
||||
to.repo.mark_migrated(&identifier, &new_identifier).await?;
|
||||
}
|
||||
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
|
||||
tracing::warn!("Skipping variant {variant} for hash {hash:?}",);
|
||||
|
@ -323,23 +273,14 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
match migrate_file(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
from,
|
||||
to,
|
||||
&original_identifier,
|
||||
*skip_missing_files,
|
||||
*timeout,
|
||||
)
|
||||
.await
|
||||
{
|
||||
match migrate_file(from, to, &original_identifier, *skip_missing_files).await {
|
||||
Ok(new_identifier) => {
|
||||
migrate_details(repo, &original_identifier, &new_identifier).await?;
|
||||
repo.update_identifier(hash.clone(), &new_identifier)
|
||||
migrate_details(&to.repo, &original_identifier, &new_identifier).await?;
|
||||
to.repo
|
||||
.update_identifier(hash.clone(), &new_identifier)
|
||||
.await?;
|
||||
repo.mark_migrated(&original_identifier, &new_identifier)
|
||||
to.repo
|
||||
.mark_migrated(&original_identifier, &new_identifier)
|
||||
.await?;
|
||||
}
|
||||
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
|
||||
|
@ -383,16 +324,11 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn migrate_file<S1, S2>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
repo: &ArcRepo,
|
||||
from: &S1,
|
||||
to: &S2,
|
||||
to: &State<S2>,
|
||||
identifier: &Arc<str>,
|
||||
skip_missing_files: bool,
|
||||
timeout: u64,
|
||||
) -> Result<Arc<str>, MigrateError>
|
||||
where
|
||||
S1: Store,
|
||||
|
@ -403,7 +339,7 @@ where
|
|||
loop {
|
||||
tracing::trace!("migrate_file: looping");
|
||||
|
||||
match do_migrate_file(tmp_dir, policy_dir, repo, from, to, identifier, timeout).await {
|
||||
match do_migrate_file(from, to, identifier).await {
|
||||
Ok(identifier) => return Ok(identifier),
|
||||
Err(MigrateError::From(e)) if e.is_not_found() && skip_missing_files => {
|
||||
return Err(MigrateError::From(e));
|
||||
|
@ -432,13 +368,9 @@ enum MigrateError {
|
|||
}
|
||||
|
||||
async fn do_migrate_file<S1, S2>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
repo: &ArcRepo,
|
||||
from: &S1,
|
||||
to: &S2,
|
||||
to: &State<S2>,
|
||||
identifier: &Arc<str>,
|
||||
timeout: u64,
|
||||
) -> Result<Arc<str>, MigrateError>
|
||||
where
|
||||
S1: Store,
|
||||
|
@ -449,7 +381,8 @@ where
|
|||
.await
|
||||
.map_err(MigrateError::From)?;
|
||||
|
||||
let details_opt = repo
|
||||
let details_opt = to
|
||||
.repo
|
||||
.details(identifier)
|
||||
.await
|
||||
.map_err(Error::from)
|
||||
|
@ -463,11 +396,11 @@ where
|
|||
.await
|
||||
.map_err(From::from)
|
||||
.map_err(MigrateError::Details)?;
|
||||
let new_details =
|
||||
Details::from_bytes(tmp_dir, policy_dir, timeout, bytes_stream.into_bytes())
|
||||
.await
|
||||
.map_err(MigrateError::Details)?;
|
||||
repo.relate_details(identifier, &new_details)
|
||||
let new_details = Details::from_bytes(to, bytes_stream.into_bytes())
|
||||
.await
|
||||
.map_err(MigrateError::Details)?;
|
||||
to.repo
|
||||
.relate_details(identifier, &new_details)
|
||||
.await
|
||||
.map_err(Error::from)
|
||||
.map_err(MigrateError::Details)?;
|
||||
|
@ -475,6 +408,7 @@ where
|
|||
};
|
||||
|
||||
let new_identifier = to
|
||||
.store
|
||||
.save_stream(stream, details.media_type())
|
||||
.await
|
||||
.map_err(MigrateError::To)?;
|
||||
|
|
|
@ -126,8 +126,8 @@ pub(crate) enum ProcessError {
|
|||
#[error("Failed to cleanup for command {0}")]
|
||||
Cleanup(Arc<str>, #[source] std::io::Error),
|
||||
|
||||
#[error("Unknown process error")]
|
||||
Other(#[source] std::io::Error),
|
||||
#[error("Unknown process error for command {0}")]
|
||||
Other(Arc<str>, #[source] std::io::Error),
|
||||
}
|
||||
|
||||
impl ProcessError {
|
||||
|
@ -135,7 +135,7 @@ impl ProcessError {
|
|||
match self {
|
||||
Self::NotFound(_) => ErrorCode::COMMAND_NOT_FOUND,
|
||||
Self::PermissionDenied(_) => ErrorCode::COMMAND_PERMISSION_DENIED,
|
||||
Self::LimitReached | Self::Read(_, _) | Self::Cleanup(_, _) | Self::Other(_) => {
|
||||
Self::LimitReached | Self::Read(_, _) | Self::Cleanup(_, _) | Self::Other(_, _) => {
|
||||
ErrorCode::COMMAND_ERROR
|
||||
}
|
||||
Self::Timeout(_) => ErrorCode::COMMAND_TIMEOUT,
|
||||
|
@ -180,7 +180,7 @@ impl Process {
|
|||
Err(ProcessError::PermissionDenied(command))
|
||||
}
|
||||
std::io::ErrorKind::WouldBlock => Err(ProcessError::LimitReached),
|
||||
_ => Err(ProcessError::Other(e)),
|
||||
_ => Err(ProcessError::Other(command, e)),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ impl Process {
|
|||
Ok(())
|
||||
}
|
||||
Ok(Ok(status)) => Err(ProcessError::Status(command, status)),
|
||||
Ok(Err(e)) => Err(ProcessError::Other(e)),
|
||||
Ok(Err(e)) => Err(ProcessError::Other(command, e)),
|
||||
Err(_) => {
|
||||
let _ = child.kill().await;
|
||||
Err(ProcessError::Timeout(command))
|
||||
|
@ -234,7 +234,16 @@ impl Process {
|
|||
pub(crate) fn bytes_read(self, input: Bytes) -> ProcessRead {
|
||||
self.spawn_fn(move |mut stdin| {
|
||||
let mut input = input;
|
||||
async move { stdin.write_all_buf(&mut input).await }
|
||||
async move {
|
||||
match stdin.write_all_buf(&mut input).await {
|
||||
Ok(()) => Ok(()),
|
||||
// BrokenPipe means we finished reading from Stdout, so we don't need to write
|
||||
// to stdin. We'll still error out if the command failed so treat this as a
|
||||
// success
|
||||
Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -275,9 +284,12 @@ impl Process {
|
|||
Ok(())
|
||||
}
|
||||
Ok(Ok(status)) => Err(ProcessError::Status(command2, status)),
|
||||
Ok(Err(e)) => Err(ProcessError::Other(e)),
|
||||
Ok(Err(e)) => Err(ProcessError::Other(command2, e)),
|
||||
Err(_) => {
|
||||
child.kill().await.map_err(ProcessError::Other)?;
|
||||
child
|
||||
.kill()
|
||||
.await
|
||||
.map_err(|e| ProcessError::Other(command2.clone(), e))?;
|
||||
Err(ProcessError::Timeout(command2))
|
||||
}
|
||||
}
|
||||
|
|
147
src/queue.rs
147
src/queue.rs
|
@ -1,16 +1,14 @@
|
|||
use crate::{
|
||||
concurrent_processor::ProcessMap,
|
||||
config::Configuration,
|
||||
error::{Error, UploadError},
|
||||
formats::InputProcessableFormat,
|
||||
future::LocalBoxFuture,
|
||||
magick::ArcPolicyDir,
|
||||
repo::{Alias, ArcRepo, DeleteToken, Hash, JobId, UploadId},
|
||||
serde_str::Serde,
|
||||
state::State,
|
||||
store::Store,
|
||||
tmp_file::ArcTmpDir,
|
||||
};
|
||||
use reqwest_middleware::ClientWithMiddleware;
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::Arc,
|
||||
|
@ -188,35 +186,12 @@ pub(crate) async fn queue_generate(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn process_cleanup<S: Store + 'static>(
|
||||
repo: ArcRepo,
|
||||
store: S,
|
||||
config: Configuration,
|
||||
) {
|
||||
process_jobs(&repo, &store, &config, CLEANUP_QUEUE, cleanup::perform).await
|
||||
pub(crate) async fn process_cleanup<S: Store + 'static>(state: State<S>) {
|
||||
process_jobs(state, CLEANUP_QUEUE, cleanup::perform).await
|
||||
}
|
||||
|
||||
pub(crate) async fn process_images<S: Store + 'static>(
|
||||
tmp_dir: ArcTmpDir,
|
||||
policy_dir: ArcPolicyDir,
|
||||
repo: ArcRepo,
|
||||
store: S,
|
||||
client: ClientWithMiddleware,
|
||||
process_map: ProcessMap,
|
||||
config: Configuration,
|
||||
) {
|
||||
process_image_jobs(
|
||||
&tmp_dir,
|
||||
&policy_dir,
|
||||
&repo,
|
||||
&store,
|
||||
&client,
|
||||
&process_map,
|
||||
&config,
|
||||
PROCESS_QUEUE,
|
||||
process::perform,
|
||||
)
|
||||
.await
|
||||
pub(crate) async fn process_images<S: Store + 'static>(state: State<S>, process_map: ProcessMap) {
|
||||
process_image_jobs(state, process_map, PROCESS_QUEUE, process::perform).await
|
||||
}
|
||||
|
||||
struct MetricsGuard {
|
||||
|
@ -250,21 +225,10 @@ impl Drop for MetricsGuard {
|
|||
}
|
||||
}
|
||||
|
||||
async fn process_jobs<S, F>(
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
config: &Configuration,
|
||||
queue: &'static str,
|
||||
callback: F,
|
||||
) where
|
||||
async fn process_jobs<S, F>(state: State<S>, queue: &'static str, callback: F)
|
||||
where
|
||||
S: Store,
|
||||
for<'a> F: Fn(
|
||||
&'a ArcRepo,
|
||||
&'a S,
|
||||
&'a Configuration,
|
||||
serde_json::Value,
|
||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
+ Copy,
|
||||
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>> + Copy,
|
||||
{
|
||||
let worker_id = uuid::Uuid::new_v4();
|
||||
|
||||
|
@ -273,7 +237,7 @@ async fn process_jobs<S, F>(
|
|||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
let res = job_loop(repo, store, config, worker_id, queue, callback).await;
|
||||
let res = job_loop(&state, worker_id, queue, callback).await;
|
||||
|
||||
if let Err(e) = res {
|
||||
tracing::warn!("Error processing jobs: {}", format!("{e}"));
|
||||
|
@ -291,22 +255,14 @@ async fn process_jobs<S, F>(
|
|||
}
|
||||
|
||||
async fn job_loop<S, F>(
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
config: &Configuration,
|
||||
state: &State<S>,
|
||||
worker_id: uuid::Uuid,
|
||||
queue: &'static str,
|
||||
callback: F,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: Store,
|
||||
for<'a> F: Fn(
|
||||
&'a ArcRepo,
|
||||
&'a S,
|
||||
&'a Configuration,
|
||||
serde_json::Value,
|
||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
+ Copy,
|
||||
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>> + Copy,
|
||||
{
|
||||
loop {
|
||||
tracing::trace!("job_loop: looping");
|
||||
|
@ -314,20 +270,20 @@ where
|
|||
tokio::task::yield_now().await;
|
||||
|
||||
async {
|
||||
let (job_id, job) = repo.pop(queue, worker_id).await?;
|
||||
let (job_id, job) = state.repo.pop(queue, worker_id).await?;
|
||||
|
||||
let guard = MetricsGuard::guard(worker_id, queue);
|
||||
|
||||
let res = heartbeat(
|
||||
repo,
|
||||
&state.repo,
|
||||
queue,
|
||||
worker_id,
|
||||
job_id,
|
||||
(callback)(repo, store, config, job),
|
||||
(callback)(state, job),
|
||||
)
|
||||
.await;
|
||||
|
||||
repo.complete_job(queue, worker_id, job_id).await?;
|
||||
state.repo.complete_job(queue, worker_id, job_id).await?;
|
||||
|
||||
res?;
|
||||
|
||||
|
@ -340,29 +296,14 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn process_image_jobs<S, F>(
|
||||
tmp_dir: &ArcTmpDir,
|
||||
policy_dir: &ArcPolicyDir,
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
client: &ClientWithMiddleware,
|
||||
process_map: &ProcessMap,
|
||||
config: &Configuration,
|
||||
state: State<S>,
|
||||
process_map: ProcessMap,
|
||||
queue: &'static str,
|
||||
callback: F,
|
||||
) where
|
||||
S: Store,
|
||||
for<'a> F: Fn(
|
||||
&'a ArcTmpDir,
|
||||
&'a ArcPolicyDir,
|
||||
&'a ArcRepo,
|
||||
&'a S,
|
||||
&'a ClientWithMiddleware,
|
||||
&'a ProcessMap,
|
||||
&'a Configuration,
|
||||
serde_json::Value,
|
||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
for<'a> F: Fn(&'a State<S>, &'a ProcessMap, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
+ Copy,
|
||||
{
|
||||
let worker_id = uuid::Uuid::new_v4();
|
||||
|
@ -372,19 +313,7 @@ async fn process_image_jobs<S, F>(
|
|||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
let res = image_job_loop(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
store,
|
||||
client,
|
||||
process_map,
|
||||
config,
|
||||
worker_id,
|
||||
queue,
|
||||
callback,
|
||||
)
|
||||
.await;
|
||||
let res = image_job_loop(&state, &process_map, worker_id, queue, callback).await;
|
||||
|
||||
if let Err(e) = res {
|
||||
tracing::warn!("Error processing jobs: {}", format!("{e}"));
|
||||
|
@ -401,31 +330,16 @@ async fn process_image_jobs<S, F>(
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn image_job_loop<S, F>(
|
||||
tmp_dir: &ArcTmpDir,
|
||||
policy_dir: &ArcPolicyDir,
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
client: &ClientWithMiddleware,
|
||||
state: &State<S>,
|
||||
process_map: &ProcessMap,
|
||||
config: &Configuration,
|
||||
worker_id: uuid::Uuid,
|
||||
queue: &'static str,
|
||||
callback: F,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: Store,
|
||||
for<'a> F: Fn(
|
||||
&'a ArcTmpDir,
|
||||
&'a ArcPolicyDir,
|
||||
&'a ArcRepo,
|
||||
&'a S,
|
||||
&'a ClientWithMiddleware,
|
||||
&'a ProcessMap,
|
||||
&'a Configuration,
|
||||
serde_json::Value,
|
||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
for<'a> F: Fn(&'a State<S>, &'a ProcessMap, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
+ Copy,
|
||||
{
|
||||
loop {
|
||||
|
@ -434,29 +348,20 @@ where
|
|||
tokio::task::yield_now().await;
|
||||
|
||||
async {
|
||||
let (job_id, job) = repo.pop(queue, worker_id).await?;
|
||||
let (job_id, job) = state.repo.pop(queue, worker_id).await?;
|
||||
|
||||
let guard = MetricsGuard::guard(worker_id, queue);
|
||||
|
||||
let res = heartbeat(
|
||||
repo,
|
||||
&state.repo,
|
||||
queue,
|
||||
worker_id,
|
||||
job_id,
|
||||
(callback)(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
store,
|
||||
client,
|
||||
process_map,
|
||||
config,
|
||||
job,
|
||||
),
|
||||
(callback)(state, process_map, job),
|
||||
)
|
||||
.await;
|
||||
|
||||
repo.complete_job(queue, worker_id, job_id).await?;
|
||||
state.repo.complete_job(queue, worker_id, job_id).await?;
|
||||
|
||||
res?;
|
||||
|
||||
|
|
|
@ -10,41 +10,42 @@ use crate::{
|
|||
queue::Cleanup,
|
||||
repo::{Alias, ArcRepo, DeleteToken, Hash},
|
||||
serde_str::Serde,
|
||||
state::State,
|
||||
store::Store,
|
||||
};
|
||||
|
||||
pub(super) fn perform<'a, S>(
|
||||
repo: &'a ArcRepo,
|
||||
store: &'a S,
|
||||
configuration: &'a Configuration,
|
||||
pub(super) fn perform<S>(
|
||||
state: &State<S>,
|
||||
job: serde_json::Value,
|
||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
) -> LocalBoxFuture<'_, Result<(), Error>>
|
||||
where
|
||||
S: Store + 'static,
|
||||
{
|
||||
Box::pin(async move {
|
||||
match serde_json::from_value(job) {
|
||||
Ok(job) => match job {
|
||||
Cleanup::Hash { hash: in_hash } => hash(repo, in_hash).await?,
|
||||
Cleanup::Hash { hash: in_hash } => hash(&state.repo, in_hash).await?,
|
||||
Cleanup::Identifier {
|
||||
identifier: in_identifier,
|
||||
} => identifier(repo, store, Arc::from(in_identifier)).await?,
|
||||
} => identifier(&state.repo, &state.store, Arc::from(in_identifier)).await?,
|
||||
Cleanup::Alias {
|
||||
alias: stored_alias,
|
||||
token,
|
||||
} => {
|
||||
alias(
|
||||
repo,
|
||||
&state.repo,
|
||||
Serde::into_inner(stored_alias),
|
||||
Serde::into_inner(token),
|
||||
)
|
||||
.await?
|
||||
}
|
||||
Cleanup::Variant { hash, variant } => hash_variant(repo, hash, variant).await?,
|
||||
Cleanup::AllVariants => all_variants(repo).await?,
|
||||
Cleanup::OutdatedVariants => outdated_variants(repo, configuration).await?,
|
||||
Cleanup::OutdatedProxies => outdated_proxies(repo, configuration).await?,
|
||||
Cleanup::Prune => prune(repo, store).await?,
|
||||
Cleanup::Variant { hash, variant } => {
|
||||
hash_variant(&state.repo, hash, variant).await?
|
||||
}
|
||||
Cleanup::AllVariants => all_variants(&state.repo).await?,
|
||||
Cleanup::OutdatedVariants => outdated_variants(&state.repo, &state.config).await?,
|
||||
Cleanup::OutdatedProxies => outdated_proxies(&state.repo, &state.config).await?,
|
||||
Cleanup::Prune => prune(&state.repo, &state.store).await?,
|
||||
},
|
||||
Err(e) => {
|
||||
tracing::warn!("Invalid job: {}", format!("{e}"));
|
||||
|
|
|
@ -1,32 +1,23 @@
|
|||
use reqwest_middleware::ClientWithMiddleware;
|
||||
use time::Instant;
|
||||
use tracing::{Instrument, Span};
|
||||
|
||||
use crate::{
|
||||
concurrent_processor::ProcessMap,
|
||||
config::Configuration,
|
||||
error::{Error, UploadError},
|
||||
formats::InputProcessableFormat,
|
||||
future::LocalBoxFuture,
|
||||
ingest::Session,
|
||||
magick::{ArcPolicyDir, PolicyDir},
|
||||
queue::Process,
|
||||
repo::{Alias, ArcRepo, UploadId, UploadResult},
|
||||
repo::{Alias, UploadId, UploadResult},
|
||||
serde_str::Serde,
|
||||
state::State,
|
||||
store::Store,
|
||||
tmp_file::{ArcTmpDir, TmpDir},
|
||||
};
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(super) fn perform<'a, S>(
|
||||
tmp_dir: &'a ArcTmpDir,
|
||||
policy_dir: &'a ArcPolicyDir,
|
||||
repo: &'a ArcRepo,
|
||||
store: &'a S,
|
||||
client: &'a ClientWithMiddleware,
|
||||
state: &'a State<S>,
|
||||
process_map: &'a ProcessMap,
|
||||
config: &'a Configuration,
|
||||
job: serde_json::Value,
|
||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||
where
|
||||
|
@ -41,15 +32,10 @@ where
|
|||
declared_alias,
|
||||
} => {
|
||||
process_ingest(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
store,
|
||||
client,
|
||||
state,
|
||||
Arc::from(identifier),
|
||||
Serde::into_inner(upload_id),
|
||||
declared_alias.map(Serde::into_inner),
|
||||
config,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
|
@ -60,16 +46,12 @@ where
|
|||
process_args,
|
||||
} => {
|
||||
generate(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
store,
|
||||
state,
|
||||
process_map,
|
||||
target_format,
|
||||
Serde::into_inner(source),
|
||||
process_path,
|
||||
process_args,
|
||||
config,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
|
@ -117,18 +99,12 @@ impl Drop for UploadGuard {
|
|||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(tmp_dir, policy_dir, repo, store, client, config))]
|
||||
#[tracing::instrument(skip(state))]
|
||||
async fn process_ingest<S>(
|
||||
tmp_dir: &ArcTmpDir,
|
||||
policy_dir: &ArcPolicyDir,
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
client: &ClientWithMiddleware,
|
||||
state: &State<S>,
|
||||
unprocessed_identifier: Arc<str>,
|
||||
upload_id: UploadId,
|
||||
declared_alias: Option<Alias>,
|
||||
config: &Configuration,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
S: Store + 'static,
|
||||
|
@ -136,33 +112,19 @@ where
|
|||
let guard = UploadGuard::guard(upload_id);
|
||||
|
||||
let fut = async {
|
||||
let tmp_dir = tmp_dir.clone();
|
||||
let policy_dir = policy_dir.clone();
|
||||
let ident = unprocessed_identifier.clone();
|
||||
let store2 = store.clone();
|
||||
let repo = repo.clone();
|
||||
let client = client.clone();
|
||||
let state2 = state.clone();
|
||||
|
||||
let current_span = Span::current();
|
||||
let span = tracing::info_span!(parent: current_span, "error_boundary");
|
||||
|
||||
let config = config.clone();
|
||||
let error_boundary = crate::sync::abort_on_drop(crate::sync::spawn(
|
||||
"ingest-media",
|
||||
async move {
|
||||
let stream = crate::stream::from_err(store2.to_stream(&ident, None, None).await?);
|
||||
let stream =
|
||||
crate::stream::from_err(state2.store.to_stream(&ident, None, None).await?);
|
||||
|
||||
let session = crate::ingest::ingest(
|
||||
&tmp_dir,
|
||||
&policy_dir,
|
||||
&repo,
|
||||
&store2,
|
||||
&client,
|
||||
stream,
|
||||
declared_alias,
|
||||
&config,
|
||||
)
|
||||
.await?;
|
||||
let session = crate::ingest::ingest(&state2, stream, declared_alias).await?;
|
||||
|
||||
Ok(session) as Result<Session, Error>
|
||||
}
|
||||
|
@ -170,7 +132,7 @@ where
|
|||
))
|
||||
.await;
|
||||
|
||||
store.remove(&unprocessed_identifier).await?;
|
||||
state.store.remove(&unprocessed_identifier).await?;
|
||||
|
||||
error_boundary.map_err(|_| UploadError::Canceled)?
|
||||
};
|
||||
|
@ -191,62 +153,46 @@ where
|
|||
}
|
||||
};
|
||||
|
||||
repo.complete_upload(upload_id, result).await?;
|
||||
state.repo.complete_upload(upload_id, result).await?;
|
||||
|
||||
guard.disarm();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
store,
|
||||
process_map,
|
||||
process_path,
|
||||
process_args,
|
||||
config
|
||||
))]
|
||||
#[tracing::instrument(skip(state, process_map, process_path, process_args))]
|
||||
async fn generate<S: Store + 'static>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
repo: &ArcRepo,
|
||||
store: &S,
|
||||
state: &State<S>,
|
||||
process_map: &ProcessMap,
|
||||
target_format: InputProcessableFormat,
|
||||
source: Alias,
|
||||
process_path: PathBuf,
|
||||
process_args: Vec<String>,
|
||||
config: &Configuration,
|
||||
) -> Result<(), Error> {
|
||||
let Some(hash) = repo.hash(&source).await? else {
|
||||
let Some(hash) = state.repo.hash(&source).await? else {
|
||||
// Nothing to do
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
let path_string = process_path.to_string_lossy().to_string();
|
||||
let identifier_opt = repo.variant_identifier(hash.clone(), path_string).await?;
|
||||
let identifier_opt = state
|
||||
.repo
|
||||
.variant_identifier(hash.clone(), path_string)
|
||||
.await?;
|
||||
|
||||
if identifier_opt.is_some() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let original_details =
|
||||
crate::ensure_details(tmp_dir, policy_dir, repo, store, config, &source).await?;
|
||||
let original_details = crate::ensure_details(state, &source).await?;
|
||||
|
||||
crate::generate::generate(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
repo,
|
||||
store,
|
||||
state,
|
||||
process_map,
|
||||
target_format,
|
||||
process_path,
|
||||
process_args,
|
||||
&original_details,
|
||||
config,
|
||||
hash,
|
||||
)
|
||||
.await?;
|
||||
|
|
|
@ -7,17 +7,15 @@ use streem::IntoStreamer;
|
|||
use tokio::{sync::Semaphore, task::JoinSet};
|
||||
|
||||
use crate::{
|
||||
config::Configuration,
|
||||
details::Details,
|
||||
error::{Error, UploadError},
|
||||
magick::{ArcPolicyDir, PolicyDir},
|
||||
repo::{ArcRepo, DeleteToken, Hash},
|
||||
repo_04::{
|
||||
AliasRepo as _, HashRepo as _, IdentifierRepo as _, SettingsRepo as _,
|
||||
SledRepo as OldSledRepo,
|
||||
},
|
||||
state::State,
|
||||
store::Store,
|
||||
tmp_file::{ArcTmpDir, TmpDir},
|
||||
};
|
||||
|
||||
const GENERATOR_KEY: &str = "last-path";
|
||||
|
@ -80,23 +78,19 @@ pub(crate) async fn migrate_repo(old_repo: ArcRepo, new_repo: ArcRepo) -> Result
|
|||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) async fn migrate_04<S: Store + 'static>(
|
||||
tmp_dir: ArcTmpDir,
|
||||
policy_dir: ArcPolicyDir,
|
||||
old_repo: OldSledRepo,
|
||||
new_repo: ArcRepo,
|
||||
store: S,
|
||||
config: Configuration,
|
||||
state: State<S>,
|
||||
) -> Result<(), Error> {
|
||||
tracing::info!("Running checks");
|
||||
if let Err(e) = old_repo.health_check().await {
|
||||
tracing::warn!("Old repo is not configured correctly");
|
||||
return Err(e.into());
|
||||
}
|
||||
if let Err(e) = new_repo.health_check().await {
|
||||
if let Err(e) = state.repo.health_check().await {
|
||||
tracing::warn!("New repo is not configured correctly");
|
||||
return Err(e.into());
|
||||
}
|
||||
if let Err(e) = store.health_check().await {
|
||||
if let Err(e) = state.store.health_check().await {
|
||||
tracing::warn!("Store is not configured correctly");
|
||||
return Err(e.into());
|
||||
}
|
||||
|
@ -116,19 +110,15 @@ pub(crate) async fn migrate_04<S: Store + 'static>(
|
|||
|
||||
if let Ok(hash) = res {
|
||||
set.spawn_local(migrate_hash_04(
|
||||
tmp_dir.clone(),
|
||||
policy_dir.clone(),
|
||||
old_repo.clone(),
|
||||
new_repo.clone(),
|
||||
store.clone(),
|
||||
config.clone(),
|
||||
state.clone(),
|
||||
hash.clone(),
|
||||
));
|
||||
} else {
|
||||
tracing::warn!("Failed to read hash, skipping");
|
||||
}
|
||||
|
||||
while set.len() >= config.upgrade.concurrency {
|
||||
while set.len() >= state.config.upgrade.concurrency {
|
||||
tracing::trace!("migrate_04: join looping");
|
||||
|
||||
if set.join_next().await.is_some() {
|
||||
|
@ -156,13 +146,15 @@ pub(crate) async fn migrate_04<S: Store + 'static>(
|
|||
}
|
||||
|
||||
if let Some(generator_state) = old_repo.get(GENERATOR_KEY).await? {
|
||||
new_repo
|
||||
state
|
||||
.repo
|
||||
.set(GENERATOR_KEY, generator_state.to_vec().into())
|
||||
.await?;
|
||||
}
|
||||
|
||||
if let Some(generator_state) = old_repo.get(crate::NOT_FOUND_KEY).await? {
|
||||
new_repo
|
||||
state
|
||||
.repo
|
||||
.set(crate::NOT_FOUND_KEY, generator_state.to_vec().into())
|
||||
.await?;
|
||||
}
|
||||
|
@ -193,28 +185,10 @@ async fn migrate_hash(old_repo: ArcRepo, new_repo: ArcRepo, hash: Hash) {
|
|||
}
|
||||
}
|
||||
|
||||
async fn migrate_hash_04<S: Store>(
|
||||
tmp_dir: ArcTmpDir,
|
||||
policy_dir: ArcPolicyDir,
|
||||
old_repo: OldSledRepo,
|
||||
new_repo: ArcRepo,
|
||||
store: S,
|
||||
config: Configuration,
|
||||
old_hash: sled::IVec,
|
||||
) {
|
||||
async fn migrate_hash_04<S: Store>(old_repo: OldSledRepo, state: State<S>, old_hash: sled::IVec) {
|
||||
let mut hash_failures = 0;
|
||||
|
||||
while let Err(e) = timed_migrate_hash_04(
|
||||
&tmp_dir,
|
||||
&policy_dir,
|
||||
&old_repo,
|
||||
&new_repo,
|
||||
&store,
|
||||
&config,
|
||||
old_hash.clone(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
while let Err(e) = timed_migrate_hash_04(&old_repo, &state, old_hash.clone()).await {
|
||||
hash_failures += 1;
|
||||
|
||||
if hash_failures > 10 {
|
||||
|
@ -300,19 +274,13 @@ async fn do_migrate_hash(old_repo: &ArcRepo, new_repo: &ArcRepo, hash: Hash) ->
|
|||
}
|
||||
|
||||
async fn timed_migrate_hash_04<S: Store>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
old_repo: &OldSledRepo,
|
||||
new_repo: &ArcRepo,
|
||||
store: &S,
|
||||
config: &Configuration,
|
||||
state: &State<S>,
|
||||
old_hash: sled::IVec,
|
||||
) -> Result<(), Error> {
|
||||
tokio::time::timeout(
|
||||
Duration::from_secs(config.media.external_validation_timeout * 6),
|
||||
do_migrate_hash_04(
|
||||
tmp_dir, policy_dir, old_repo, new_repo, store, config, old_hash,
|
||||
),
|
||||
Duration::from_secs(state.config.media.process_timeout * 6),
|
||||
do_migrate_hash_04(old_repo, state, old_hash),
|
||||
)
|
||||
.await
|
||||
.map_err(|_| UploadError::ProcessTimeout)?
|
||||
|
@ -320,12 +288,8 @@ async fn timed_migrate_hash_04<S: Store>(
|
|||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn do_migrate_hash_04<S: Store>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
old_repo: &OldSledRepo,
|
||||
new_repo: &ArcRepo,
|
||||
store: &S,
|
||||
config: &Configuration,
|
||||
state: &State<S>,
|
||||
old_hash: sled::IVec,
|
||||
) -> Result<(), Error> {
|
||||
let Some(identifier) = old_repo.identifier(old_hash.clone()).await? else {
|
||||
|
@ -333,18 +297,9 @@ async fn do_migrate_hash_04<S: Store>(
|
|||
return Ok(());
|
||||
};
|
||||
|
||||
let size = store.len(&identifier).await?;
|
||||
let size = state.store.len(&identifier).await?;
|
||||
|
||||
let hash_details = set_details(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
old_repo,
|
||||
new_repo,
|
||||
store,
|
||||
config,
|
||||
&identifier,
|
||||
)
|
||||
.await?;
|
||||
let hash_details = set_details(old_repo, state, &identifier).await?;
|
||||
|
||||
let aliases = old_repo.aliases_for_hash(old_hash.clone()).await?;
|
||||
let variants = old_repo.variants(old_hash.clone()).await?;
|
||||
|
@ -354,7 +309,8 @@ async fn do_migrate_hash_04<S: Store>(
|
|||
|
||||
let hash = Hash::new(hash, size, hash_details.internal_format());
|
||||
|
||||
let _ = new_repo
|
||||
let _ = state
|
||||
.repo
|
||||
.create_hash_with_timestamp(hash.clone(), &identifier, hash_details.created_at())
|
||||
.await?;
|
||||
|
||||
|
@ -364,66 +320,45 @@ async fn do_migrate_hash_04<S: Store>(
|
|||
.await?
|
||||
.unwrap_or_else(DeleteToken::generate);
|
||||
|
||||
let _ = new_repo
|
||||
let _ = state
|
||||
.repo
|
||||
.create_alias(&alias, &delete_token, hash.clone())
|
||||
.await?;
|
||||
}
|
||||
|
||||
if let Some(identifier) = motion_identifier {
|
||||
new_repo
|
||||
state
|
||||
.repo
|
||||
.relate_motion_identifier(hash.clone(), &identifier)
|
||||
.await?;
|
||||
|
||||
set_details(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
old_repo,
|
||||
new_repo,
|
||||
store,
|
||||
config,
|
||||
&identifier,
|
||||
)
|
||||
.await?;
|
||||
set_details(old_repo, state, &identifier).await?;
|
||||
}
|
||||
|
||||
for (variant, identifier) in variants {
|
||||
let _ = new_repo
|
||||
let _ = state
|
||||
.repo
|
||||
.relate_variant_identifier(hash.clone(), variant.clone(), &identifier)
|
||||
.await?;
|
||||
|
||||
set_details(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
old_repo,
|
||||
new_repo,
|
||||
store,
|
||||
config,
|
||||
&identifier,
|
||||
)
|
||||
.await?;
|
||||
set_details(old_repo, state, &identifier).await?;
|
||||
|
||||
new_repo.accessed_variant(hash.clone(), variant).await?;
|
||||
state.repo.accessed_variant(hash.clone(), variant).await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn set_details<S: Store>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
old_repo: &OldSledRepo,
|
||||
new_repo: &ArcRepo,
|
||||
store: &S,
|
||||
config: &Configuration,
|
||||
state: &State<S>,
|
||||
identifier: &Arc<str>,
|
||||
) -> Result<Details, Error> {
|
||||
if let Some(details) = new_repo.details(identifier).await? {
|
||||
if let Some(details) = state.repo.details(identifier).await? {
|
||||
Ok(details)
|
||||
} else {
|
||||
let details =
|
||||
fetch_or_generate_details(tmp_dir, policy_dir, old_repo, store, config, identifier)
|
||||
.await?;
|
||||
new_repo.relate_details(identifier, &details).await?;
|
||||
let details = fetch_or_generate_details(old_repo, state, identifier).await?;
|
||||
state.repo.relate_details(identifier, &details).await?;
|
||||
Ok(details)
|
||||
}
|
||||
}
|
||||
|
@ -442,11 +377,8 @@ fn details_semaphore() -> &'static Semaphore {
|
|||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn fetch_or_generate_details<S: Store>(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
old_repo: &OldSledRepo,
|
||||
store: &S,
|
||||
config: &Configuration,
|
||||
state: &State<S>,
|
||||
identifier: &Arc<str>,
|
||||
) -> Result<Details, Error> {
|
||||
let details_opt = old_repo.details(identifier.clone()).await?;
|
||||
|
@ -454,12 +386,11 @@ async fn fetch_or_generate_details<S: Store>(
|
|||
if let Some(details) = details_opt {
|
||||
Ok(details)
|
||||
} else {
|
||||
let bytes_stream = store.to_bytes(identifier, None, None).await?;
|
||||
let bytes_stream = state.store.to_bytes(identifier, None, None).await?;
|
||||
let bytes = bytes_stream.into_bytes();
|
||||
|
||||
let guard = details_semaphore().acquire().await?;
|
||||
let details =
|
||||
Details::from_bytes(tmp_dir, policy_dir, config.media.process_timeout, bytes).await?;
|
||||
let details = Details::from_bytes(state, bytes).await?;
|
||||
drop(guard);
|
||||
|
||||
Ok(details)
|
||||
|
|
13
src/state.rs
Normal file
13
src/state.rs
Normal file
|
@ -0,0 +1,13 @@
|
|||
use reqwest_middleware::ClientWithMiddleware;
|
||||
|
||||
use crate::{config::Configuration, magick::ArcPolicyDir, repo::ArcRepo, tmp_file::ArcTmpDir};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct State<S> {
|
||||
pub(super) config: Configuration,
|
||||
pub(super) tmp_dir: ArcTmpDir,
|
||||
pub(super) policy_dir: ArcPolicyDir,
|
||||
pub(super) repo: ArcRepo,
|
||||
pub(super) store: S,
|
||||
pub(super) client: ClientWithMiddleware,
|
||||
}
|
116
src/validate.rs
116
src/validate.rs
|
@ -8,11 +8,10 @@ use crate::{
|
|||
error_code::ErrorCode,
|
||||
formats::{
|
||||
AnimationFormat, AnimationOutput, ImageInput, ImageOutput, InputFile, InputVideoFormat,
|
||||
InternalFormat, Validations,
|
||||
InternalFormat,
|
||||
},
|
||||
magick::PolicyDir,
|
||||
process::ProcessRead,
|
||||
tmp_file::TmpDir,
|
||||
state::State,
|
||||
};
|
||||
use actix_web::web::Bytes;
|
||||
|
||||
|
@ -57,12 +56,9 @@ impl ValidationError {
|
|||
const MEGABYTES: usize = 1024 * 1024;
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
pub(crate) async fn validate_bytes(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
pub(crate) async fn validate_bytes<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
validations: Validations<'_>,
|
||||
timeout: u64,
|
||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||
if bytes.is_empty() {
|
||||
return Err(ValidationError::Empty.into());
|
||||
|
@ -73,70 +69,39 @@ pub(crate) async fn validate_bytes(
|
|||
width,
|
||||
height,
|
||||
frames,
|
||||
} = crate::discover::discover_bytes(tmp_dir, policy_dir, timeout, bytes.clone()).await?;
|
||||
} = crate::discover::discover_bytes(state, bytes.clone()).await?;
|
||||
|
||||
match &input {
|
||||
InputFile::Image(input) => {
|
||||
let (format, process_read) = process_image(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
bytes,
|
||||
*input,
|
||||
width,
|
||||
height,
|
||||
validations.image,
|
||||
timeout,
|
||||
)
|
||||
.await?;
|
||||
let (format, process_read) = process_image(state, bytes, *input, width, height).await?;
|
||||
|
||||
Ok((format, process_read))
|
||||
}
|
||||
InputFile::Animation(input) => {
|
||||
let (format, process_read) = process_animation(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
bytes,
|
||||
*input,
|
||||
width,
|
||||
height,
|
||||
frames.unwrap_or(1),
|
||||
validations.animation,
|
||||
timeout,
|
||||
)
|
||||
.await?;
|
||||
let (format, process_read) =
|
||||
process_animation(state, bytes, *input, width, height, frames.unwrap_or(1)).await?;
|
||||
|
||||
Ok((format, process_read))
|
||||
}
|
||||
InputFile::Video(input) => {
|
||||
let (format, process_read) = process_video(
|
||||
tmp_dir,
|
||||
bytes,
|
||||
*input,
|
||||
width,
|
||||
height,
|
||||
frames.unwrap_or(1),
|
||||
validations.video,
|
||||
timeout,
|
||||
)
|
||||
.await?;
|
||||
let (format, process_read) =
|
||||
process_video(state, bytes, *input, width, height, frames.unwrap_or(1)).await?;
|
||||
|
||||
Ok((format, process_read))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(tmp_dir, policy_dir, bytes, validations))]
|
||||
async fn process_image(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
#[tracing::instrument(skip(state, bytes), fields(len = bytes.len()))]
|
||||
async fn process_image<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
input: ImageInput,
|
||||
width: u16,
|
||||
height: u16,
|
||||
validations: &crate::config::Image,
|
||||
timeout: u64,
|
||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||
let validations = &state.config.media.image;
|
||||
|
||||
if width > validations.max_width {
|
||||
return Err(ValidationError::Width.into());
|
||||
}
|
||||
|
@ -158,18 +123,9 @@ async fn process_image(
|
|||
let process_read = if needs_transcode {
|
||||
let quality = validations.quality_for(format);
|
||||
|
||||
magick::convert_image(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
input.format,
|
||||
format,
|
||||
quality,
|
||||
timeout,
|
||||
bytes,
|
||||
)
|
||||
.await?
|
||||
magick::convert_image(state, input.format, format, quality, bytes).await?
|
||||
} else {
|
||||
exiftool::clear_metadata_bytes_read(bytes, timeout)?
|
||||
exiftool::clear_metadata_bytes_read(bytes, state.config.media.process_timeout)?
|
||||
};
|
||||
|
||||
Ok((InternalFormat::Image(format), process_read))
|
||||
|
@ -201,19 +157,17 @@ fn validate_animation(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(tmp_dir, policy_dir, bytes, validations))]
|
||||
async fn process_animation(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
#[tracing::instrument(skip(state, bytes), fields(len = bytes.len()))]
|
||||
async fn process_animation<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
input: AnimationFormat,
|
||||
width: u16,
|
||||
height: u16,
|
||||
frames: u32,
|
||||
validations: &crate::config::Animation,
|
||||
timeout: u64,
|
||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||
let validations = &state.config.media.animation;
|
||||
|
||||
validate_animation(bytes.len(), width, height, frames, validations)?;
|
||||
|
||||
let AnimationOutput {
|
||||
|
@ -224,10 +178,9 @@ async fn process_animation(
|
|||
let process_read = if needs_transcode {
|
||||
let quality = validations.quality_for(format);
|
||||
|
||||
magick::convert_animation(tmp_dir, policy_dir, input, format, quality, timeout, bytes)
|
||||
.await?
|
||||
magick::convert_animation(state, input, format, quality, bytes).await?
|
||||
} else {
|
||||
exiftool::clear_metadata_bytes_read(bytes, timeout)?
|
||||
exiftool::clear_metadata_bytes_read(bytes, state.config.media.process_timeout)?
|
||||
};
|
||||
|
||||
Ok((InternalFormat::Animation(format), process_read))
|
||||
|
@ -262,18 +215,17 @@ fn validate_video(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(tmp_dir, bytes, validations))]
|
||||
async fn process_video(
|
||||
tmp_dir: &TmpDir,
|
||||
#[tracing::instrument(skip(state, bytes), fields(len = bytes.len()))]
|
||||
async fn process_video<S>(
|
||||
state: &State<S>,
|
||||
bytes: Bytes,
|
||||
input: InputVideoFormat,
|
||||
width: u16,
|
||||
height: u16,
|
||||
frames: u32,
|
||||
validations: &crate::config::Video,
|
||||
timeout: u64,
|
||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||
let validations = &state.config.media.video;
|
||||
|
||||
validate_video(bytes.len(), width, height, frames, validations)?;
|
||||
|
||||
let output = input.build_output(
|
||||
|
@ -284,7 +236,15 @@ async fn process_video(
|
|||
|
||||
let crf = validations.crf_for(width, height);
|
||||
|
||||
let process_read = ffmpeg::transcode_bytes(tmp_dir, input, output, crf, timeout, bytes).await?;
|
||||
let process_read = ffmpeg::transcode_bytes(
|
||||
&state.tmp_dir,
|
||||
input,
|
||||
output,
|
||||
crf,
|
||||
state.config.media.process_timeout,
|
||||
bytes,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok((
|
||||
InternalFormat::Video(output.format.internal_format()),
|
||||
|
|
|
@ -4,72 +4,62 @@ use actix_web::web::Bytes;
|
|||
|
||||
use crate::{
|
||||
formats::{AnimationFormat, ImageFormat},
|
||||
magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||
process::{Process, ProcessRead},
|
||||
tmp_file::TmpDir,
|
||||
state::State,
|
||||
};
|
||||
|
||||
pub(super) async fn convert_image(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
pub(super) async fn convert_image<S>(
|
||||
state: &State<S>,
|
||||
input: ImageFormat,
|
||||
output: ImageFormat,
|
||||
quality: Option<u8>,
|
||||
timeout: u64,
|
||||
bytes: Bytes,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
convert(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
state,
|
||||
input.magick_format(),
|
||||
output.magick_format(),
|
||||
false,
|
||||
quality,
|
||||
timeout,
|
||||
bytes,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub(super) async fn convert_animation(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
pub(super) async fn convert_animation<S>(
|
||||
state: &State<S>,
|
||||
input: AnimationFormat,
|
||||
output: AnimationFormat,
|
||||
quality: Option<u8>,
|
||||
timeout: u64,
|
||||
bytes: Bytes,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
convert(
|
||||
tmp_dir,
|
||||
policy_dir,
|
||||
state,
|
||||
input.magick_format(),
|
||||
output.magick_format(),
|
||||
true,
|
||||
quality,
|
||||
timeout,
|
||||
bytes,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn convert(
|
||||
tmp_dir: &TmpDir,
|
||||
policy_dir: &PolicyDir,
|
||||
async fn convert<S>(
|
||||
state: &State<S>,
|
||||
input: &'static str,
|
||||
output: &'static str,
|
||||
coalesce: bool,
|
||||
quality: Option<u8>,
|
||||
timeout: u64,
|
||||
bytes: Bytes,
|
||||
) -> Result<ProcessRead, MagickError> {
|
||||
let temporary_path = tmp_dir
|
||||
let temporary_path = state
|
||||
.tmp_dir
|
||||
.tmp_folder()
|
||||
.await
|
||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||
|
||||
let input_file = tmp_dir.tmp_file(None);
|
||||
let input_file = state.tmp_dir.tmp_file(None);
|
||||
|
||||
crate::store::file_store::safe_create_parent(&input_file)
|
||||
.await
|
||||
|
@ -104,10 +94,10 @@ async fn convert(
|
|||
|
||||
let envs = [
|
||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, policy_dir.as_os_str()),
|
||||
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||
];
|
||||
|
||||
let reader = Process::run("magick", &args, &envs, timeout)?.read();
|
||||
let reader = Process::run("magick", &args, &envs, state.config.media.process_timeout)?.read();
|
||||
|
||||
let clean_reader = reader.add_extras(input_file).add_extras(temporary_path);
|
||||
|
||||
|
|
Loading…
Reference in a new issue