mirror of
https://git.asonix.dog/asonix/pict-rs
synced 2024-12-22 19:31:35 +00:00
Merge pull request 'Consolidate a bunch of parameters into a single type' (#50) from asonix/fewer-arguments into main
Reviewed-on: https://git.asonix.dog/asonix/pict-rs/pulls/50
This commit is contained in:
commit
0bc14c810d
23 changed files with 783 additions and 1582 deletions
|
@ -3,6 +3,7 @@ use std::sync::Arc;
|
||||||
use crate::{
|
use crate::{
|
||||||
error::Error,
|
error::Error,
|
||||||
repo::{ArcRepo, UploadId},
|
repo::{ArcRepo, UploadId},
|
||||||
|
state::State,
|
||||||
store::Store,
|
store::Store,
|
||||||
};
|
};
|
||||||
use actix_web::web::Bytes;
|
use actix_web::web::Bytes;
|
||||||
|
@ -30,23 +31,23 @@ impl Backgrounded {
|
||||||
self.identifier.as_ref()
|
self.identifier.as_ref()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn proxy<S, P>(repo: ArcRepo, store: S, stream: P) -> Result<Self, Error>
|
pub(crate) async fn proxy<S, P>(state: &State<S>, stream: P) -> Result<Self, Error>
|
||||||
where
|
where
|
||||||
S: Store,
|
S: Store,
|
||||||
P: Stream<Item = Result<Bytes, Error>> + 'static,
|
P: Stream<Item = Result<Bytes, Error>> + 'static,
|
||||||
{
|
{
|
||||||
let mut this = Self {
|
let mut this = Self {
|
||||||
repo,
|
repo: state.repo.clone(),
|
||||||
identifier: None,
|
identifier: None,
|
||||||
upload_id: None,
|
upload_id: None,
|
||||||
};
|
};
|
||||||
|
|
||||||
this.do_proxy(store, stream).await?;
|
this.do_proxy(&state.store, stream).await?;
|
||||||
|
|
||||||
Ok(this)
|
Ok(this)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_proxy<S, P>(&mut self, store: S, stream: P) -> Result<(), Error>
|
async fn do_proxy<S, P>(&mut self, store: &S, stream: P) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
S: Store,
|
S: Store,
|
||||||
P: Stream<Item = Result<Bytes, Error>> + 'static,
|
P: Stream<Item = Result<Bytes, Error>> + 'static,
|
||||||
|
|
|
@ -12,8 +12,8 @@ use defaults::Defaults;
|
||||||
|
|
||||||
pub(crate) use commandline::Operation;
|
pub(crate) use commandline::Operation;
|
||||||
pub(crate) use file::{
|
pub(crate) use file::{
|
||||||
Animation, ConfigFile as Configuration, Image, Media, ObjectStorage, OpenTelemetry, Postgres,
|
Animation, ConfigFile as Configuration, Media, ObjectStorage, OpenTelemetry, Postgres, Repo,
|
||||||
Repo, Sled, Store, Tracing, Video,
|
Sled, Store, Tracing, Video,
|
||||||
};
|
};
|
||||||
pub(crate) use primitives::{Filesystem, LogFormat};
|
pub(crate) use primitives::{Filesystem, LogFormat};
|
||||||
|
|
||||||
|
|
|
@ -2,9 +2,8 @@ use crate::{
|
||||||
discover::Discovery,
|
discover::Discovery,
|
||||||
error::Error,
|
error::Error,
|
||||||
formats::{InternalFormat, InternalVideoFormat},
|
formats::{InternalFormat, InternalVideoFormat},
|
||||||
magick::PolicyDir,
|
|
||||||
serde_str::Serde,
|
serde_str::Serde,
|
||||||
tmp_file::TmpDir,
|
state::State,
|
||||||
};
|
};
|
||||||
use actix_web::web;
|
use actix_web::web;
|
||||||
use time::{format_description::well_known::Rfc3339, OffsetDateTime};
|
use time::{format_description::well_known::Rfc3339, OffsetDateTime};
|
||||||
|
@ -81,18 +80,13 @@ impl Details {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip_all)]
|
#[tracing::instrument(level = "debug", skip_all)]
|
||||||
pub(crate) async fn from_bytes(
|
pub(crate) async fn from_bytes<S>(state: &State<S>, input: web::Bytes) -> Result<Self, Error> {
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
timeout: u64,
|
|
||||||
input: web::Bytes,
|
|
||||||
) -> Result<Self, Error> {
|
|
||||||
let Discovery {
|
let Discovery {
|
||||||
input,
|
input,
|
||||||
width,
|
width,
|
||||||
height,
|
height,
|
||||||
frames,
|
frames,
|
||||||
} = crate::discover::discover_bytes(tmp_dir, policy_dir, timeout, input).await?;
|
} = crate::discover::discover_bytes(state, input).await?;
|
||||||
|
|
||||||
Ok(Details::from_parts(
|
Ok(Details::from_parts(
|
||||||
input.internal_format(),
|
input.internal_format(),
|
||||||
|
|
|
@ -4,7 +4,7 @@ mod magick;
|
||||||
|
|
||||||
use actix_web::web::Bytes;
|
use actix_web::web::Bytes;
|
||||||
|
|
||||||
use crate::{formats::InputFile, magick::PolicyDir, tmp_file::TmpDir};
|
use crate::{formats::InputFile, state::State};
|
||||||
|
|
||||||
#[derive(Debug, PartialEq, Eq)]
|
#[derive(Debug, PartialEq, Eq)]
|
||||||
pub(crate) struct Discovery {
|
pub(crate) struct Discovery {
|
||||||
|
@ -27,18 +27,16 @@ pub(crate) enum DiscoverError {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "trace", skip_all)]
|
#[tracing::instrument(level = "trace", skip_all)]
|
||||||
pub(crate) async fn discover_bytes(
|
pub(crate) async fn discover_bytes<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
timeout: u64,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
) -> Result<Discovery, crate::error::Error> {
|
) -> Result<Discovery, crate::error::Error> {
|
||||||
let discovery = ffmpeg::discover_bytes(tmp_dir, timeout, bytes.clone()).await?;
|
let discovery = ffmpeg::discover_bytes(state, bytes.clone()).await?;
|
||||||
|
|
||||||
|
let discovery = magick::confirm_bytes(state, discovery, bytes.clone()).await?;
|
||||||
|
|
||||||
let discovery =
|
let discovery =
|
||||||
magick::confirm_bytes(tmp_dir, policy_dir, discovery, timeout, bytes.clone()).await?;
|
exiftool::check_reorient(discovery, bytes, state.config.media.process_timeout).await?;
|
||||||
|
|
||||||
let discovery = exiftool::check_reorient(discovery, timeout, bytes).await?;
|
|
||||||
|
|
||||||
Ok(discovery)
|
Ok(discovery)
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,8 +16,8 @@ pub(super) async fn check_reorient(
|
||||||
height,
|
height,
|
||||||
frames,
|
frames,
|
||||||
}: Discovery,
|
}: Discovery,
|
||||||
timeout: u64,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
|
timeout: u64,
|
||||||
) -> Result<Discovery, ExifError> {
|
) -> Result<Discovery, ExifError> {
|
||||||
let input = match input {
|
let input = match input {
|
||||||
InputFile::Image(ImageInput { format, .. }) => {
|
InputFile::Image(ImageInput { format, .. }) => {
|
||||||
|
|
|
@ -10,7 +10,7 @@ use crate::{
|
||||||
Mp4AudioCodec, Mp4Codec, WebmAlphaCodec, WebmAudioCodec, WebmCodec,
|
Mp4AudioCodec, Mp4Codec, WebmAlphaCodec, WebmAudioCodec, WebmCodec,
|
||||||
},
|
},
|
||||||
process::Process,
|
process::Process,
|
||||||
tmp_file::TmpDir,
|
state::State,
|
||||||
};
|
};
|
||||||
use actix_web::web::Bytes;
|
use actix_web::web::Bytes;
|
||||||
|
|
||||||
|
@ -158,12 +158,11 @@ struct Flags {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
pub(super) async fn discover_bytes(
|
pub(super) async fn discover_bytes<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
timeout: u64,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
) -> Result<Option<Discovery>, FfMpegError> {
|
) -> Result<Option<Discovery>, FfMpegError> {
|
||||||
discover_file(tmp_dir, timeout, move |mut file| {
|
discover_file(state, move |mut file| {
|
||||||
let bytes = bytes.clone();
|
let bytes = bytes.clone();
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
|
@ -191,16 +190,12 @@ async fn allows_alpha(pixel_format: &str, timeout: u64) -> Result<bool, FfMpegEr
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip_all)]
|
#[tracing::instrument(level = "debug", skip_all)]
|
||||||
async fn discover_file<F, Fut>(
|
async fn discover_file<S, F, Fut>(state: &State<S>, f: F) -> Result<Option<Discovery>, FfMpegError>
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
timeout: u64,
|
|
||||||
f: F,
|
|
||||||
) -> Result<Option<Discovery>, FfMpegError>
|
|
||||||
where
|
where
|
||||||
F: FnOnce(crate::file::File) -> Fut,
|
F: FnOnce(crate::file::File) -> Fut,
|
||||||
Fut: std::future::Future<Output = Result<crate::file::File, FfMpegError>>,
|
Fut: std::future::Future<Output = Result<crate::file::File, FfMpegError>>,
|
||||||
{
|
{
|
||||||
let input_file = tmp_dir.tmp_file(None);
|
let input_file = state.tmp_dir.tmp_file(None);
|
||||||
crate::store::file_store::safe_create_parent(&input_file)
|
crate::store::file_store::safe_create_parent(&input_file)
|
||||||
.await
|
.await
|
||||||
.map_err(FfMpegError::CreateDir)?;
|
.map_err(FfMpegError::CreateDir)?;
|
||||||
|
@ -226,7 +221,7 @@ where
|
||||||
input_file.as_os_str(),
|
input_file.as_os_str(),
|
||||||
],
|
],
|
||||||
&[],
|
&[],
|
||||||
timeout,
|
state.config.media.process_timeout,
|
||||||
)?
|
)?
|
||||||
.read()
|
.read()
|
||||||
.into_vec()
|
.into_vec()
|
||||||
|
@ -250,7 +245,7 @@ where
|
||||||
..
|
..
|
||||||
}) = &mut discovery.input
|
}) = &mut discovery.input
|
||||||
{
|
{
|
||||||
*alpha = allows_alpha(&pixel_format, timeout).await?;
|
*alpha = allows_alpha(&pixel_format, state.config.media.process_timeout).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,9 +6,9 @@ use actix_web::web::Bytes;
|
||||||
use crate::{
|
use crate::{
|
||||||
discover::DiscoverError,
|
discover::DiscoverError,
|
||||||
formats::{AnimationFormat, ImageFormat, ImageInput, InputFile},
|
formats::{AnimationFormat, ImageFormat, ImageInput, InputFile},
|
||||||
magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||||
process::Process,
|
process::Process,
|
||||||
tmp_file::TmpDir,
|
state::State,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::Discovery;
|
use super::Discovery;
|
||||||
|
@ -31,11 +31,9 @@ struct Geometry {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
pub(super) async fn confirm_bytes(
|
pub(super) async fn confirm_bytes<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
discovery: Option<Discovery>,
|
discovery: Option<Discovery>,
|
||||||
timeout: u64,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
) -> Result<Discovery, MagickError> {
|
) -> Result<Discovery, MagickError> {
|
||||||
match discovery {
|
match discovery {
|
||||||
|
@ -51,7 +49,7 @@ pub(super) async fn confirm_bytes(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
discover_file(tmp_dir, policy_dir, timeout, move |mut file| async move {
|
discover_file(state, move |mut file| async move {
|
||||||
file.write_from_bytes(bytes)
|
file.write_from_bytes(bytes)
|
||||||
.await
|
.await
|
||||||
.map_err(MagickError::Write)?;
|
.map_err(MagickError::Write)?;
|
||||||
|
@ -62,22 +60,18 @@ pub(super) async fn confirm_bytes(
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(level = "debug", skip_all)]
|
#[tracing::instrument(level = "debug", skip_all)]
|
||||||
async fn discover_file<F, Fut>(
|
async fn discover_file<S, F, Fut>(state: &State<S>, f: F) -> Result<Discovery, MagickError>
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
timeout: u64,
|
|
||||||
f: F,
|
|
||||||
) -> Result<Discovery, MagickError>
|
|
||||||
where
|
where
|
||||||
F: FnOnce(crate::file::File) -> Fut,
|
F: FnOnce(crate::file::File) -> Fut,
|
||||||
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
||||||
{
|
{
|
||||||
let temporary_path = tmp_dir
|
let temporary_path = state
|
||||||
|
.tmp_dir
|
||||||
.tmp_folder()
|
.tmp_folder()
|
||||||
.await
|
.await
|
||||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||||
|
|
||||||
let input_file = tmp_dir.tmp_file(None);
|
let input_file = state.tmp_dir.tmp_file(None);
|
||||||
crate::store::file_store::safe_create_parent(&input_file)
|
crate::store::file_store::safe_create_parent(&input_file)
|
||||||
.await
|
.await
|
||||||
.map_err(MagickError::CreateDir)?;
|
.map_err(MagickError::CreateDir)?;
|
||||||
|
@ -90,7 +84,7 @@ where
|
||||||
|
|
||||||
let envs = [
|
let envs = [
|
||||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||||
(MAGICK_CONFIGURE_PATH, policy_dir.as_os_str()),
|
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||||
];
|
];
|
||||||
|
|
||||||
let res = Process::run(
|
let res = Process::run(
|
||||||
|
@ -102,7 +96,7 @@ where
|
||||||
"JSON:".as_ref(),
|
"JSON:".as_ref(),
|
||||||
],
|
],
|
||||||
&envs,
|
&envs,
|
||||||
timeout,
|
state.config.media.process_timeout,
|
||||||
)?
|
)?
|
||||||
.read()
|
.read()
|
||||||
.into_string()
|
.into_string()
|
||||||
|
|
|
@ -12,13 +12,6 @@ pub(crate) use video::{
|
||||||
OutputVideo, VideoCodec, WebmAlphaCodec, WebmAudioCodec, WebmCodec,
|
OutputVideo, VideoCodec, WebmAlphaCodec, WebmAudioCodec, WebmCodec,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub(crate) struct Validations<'a> {
|
|
||||||
pub(crate) image: &'a crate::config::Image,
|
|
||||||
pub(crate) animation: &'a crate::config::Animation,
|
|
||||||
pub(crate) video: &'a crate::config::Video,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||||
pub(crate) enum InputFile {
|
pub(crate) enum InputFile {
|
||||||
Image(ImageInput),
|
Image(ImageInput),
|
||||||
|
|
130
src/generate.rs
130
src/generate.rs
|
@ -7,10 +7,9 @@ use crate::{
|
||||||
error::{Error, UploadError},
|
error::{Error, UploadError},
|
||||||
formats::{ImageFormat, InputProcessableFormat, InternalVideoFormat, ProcessableFormat},
|
formats::{ImageFormat, InputProcessableFormat, InternalVideoFormat, ProcessableFormat},
|
||||||
future::{WithMetrics, WithTimeout},
|
future::{WithMetrics, WithTimeout},
|
||||||
magick::PolicyDir,
|
repo::{Hash, VariantAlreadyExists},
|
||||||
repo::{ArcRepo, Hash, VariantAlreadyExists},
|
state::State,
|
||||||
store::Store,
|
store::Store,
|
||||||
tmp_file::TmpDir,
|
|
||||||
};
|
};
|
||||||
use actix_web::web::Bytes;
|
use actix_web::web::Bytes;
|
||||||
use std::{
|
use std::{
|
||||||
|
@ -49,47 +48,43 @@ impl Drop for MetricsGuard {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[tracing::instrument(skip(state, process_map, hash))]
|
||||||
#[tracing::instrument(skip(tmp_dir, policy_dir, repo, store, hash, process_map, config))]
|
|
||||||
pub(crate) async fn generate<S: Store + 'static>(
|
pub(crate) async fn generate<S: Store + 'static>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
repo: &ArcRepo,
|
|
||||||
store: &S,
|
|
||||||
process_map: &ProcessMap,
|
process_map: &ProcessMap,
|
||||||
format: InputProcessableFormat,
|
format: InputProcessableFormat,
|
||||||
thumbnail_path: PathBuf,
|
thumbnail_path: PathBuf,
|
||||||
thumbnail_args: Vec<String>,
|
thumbnail_args: Vec<String>,
|
||||||
original_details: &Details,
|
original_details: &Details,
|
||||||
config: &crate::config::Configuration,
|
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
) -> Result<(Details, Bytes), Error> {
|
) -> Result<(Details, Bytes), Error> {
|
||||||
if config.server.danger_dummy_mode {
|
if state.config.server.danger_dummy_mode {
|
||||||
let identifier = repo
|
let identifier = state
|
||||||
|
.repo
|
||||||
.identifier(hash)
|
.identifier(hash)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(UploadError::MissingIdentifier)?;
|
.ok_or(UploadError::MissingIdentifier)?;
|
||||||
|
|
||||||
let bytes = store.to_bytes(&identifier, None, None).await?.into_bytes();
|
let bytes = state
|
||||||
|
.store
|
||||||
|
.to_bytes(&identifier, None, None)
|
||||||
|
.await?
|
||||||
|
.into_bytes();
|
||||||
|
|
||||||
Ok((original_details.clone(), bytes))
|
Ok((original_details.clone(), bytes))
|
||||||
} else {
|
} else {
|
||||||
let process_fut = process(
|
let process_fut = process(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
store,
|
|
||||||
format,
|
format,
|
||||||
thumbnail_path.clone(),
|
thumbnail_path.clone(),
|
||||||
thumbnail_args,
|
thumbnail_args,
|
||||||
original_details,
|
original_details,
|
||||||
config,
|
|
||||||
hash.clone(),
|
hash.clone(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let (details, bytes) = process_map
|
let (details, bytes) = process_map
|
||||||
.process(hash, thumbnail_path, process_fut)
|
.process(hash, thumbnail_path, process_fut)
|
||||||
.with_timeout(Duration::from_secs(config.media.process_timeout * 4))
|
.with_timeout(Duration::from_secs(state.config.media.process_timeout * 4))
|
||||||
.with_metrics("pict-rs.generate.process")
|
.with_metrics("pict-rs.generate.process")
|
||||||
.await
|
.await
|
||||||
.map_err(|_| UploadError::ProcessTimeout)??;
|
.map_err(|_| UploadError::ProcessTimeout)??;
|
||||||
|
@ -98,38 +93,21 @@ pub(crate) async fn generate<S: Store + 'static>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[tracing::instrument(skip(state, hash))]
|
||||||
#[tracing::instrument(skip(tmp_dir, policy_dir, repo, store, hash, config))]
|
|
||||||
async fn process<S: Store + 'static>(
|
async fn process<S: Store + 'static>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
repo: &ArcRepo,
|
|
||||||
store: &S,
|
|
||||||
output_format: InputProcessableFormat,
|
output_format: InputProcessableFormat,
|
||||||
thumbnail_path: PathBuf,
|
thumbnail_path: PathBuf,
|
||||||
thumbnail_args: Vec<String>,
|
thumbnail_args: Vec<String>,
|
||||||
original_details: &Details,
|
original_details: &Details,
|
||||||
config: &crate::config::Configuration,
|
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
) -> Result<(Details, Bytes), Error> {
|
) -> Result<(Details, Bytes), Error> {
|
||||||
let guard = MetricsGuard::guard();
|
let guard = MetricsGuard::guard();
|
||||||
let permit = crate::process_semaphore().acquire().await?;
|
let permit = crate::process_semaphore().acquire().await?;
|
||||||
|
|
||||||
let identifier = input_identifier(
|
let identifier = input_identifier(state, output_format, hash.clone(), original_details).await?;
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
store,
|
|
||||||
output_format,
|
|
||||||
hash.clone(),
|
|
||||||
original_details,
|
|
||||||
&config.media,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let input_details =
|
let input_details = crate::ensure_details_identifier(state, &identifier).await?;
|
||||||
crate::ensure_details_identifier(tmp_dir, policy_dir, repo, store, config, &identifier)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let input_format = input_details
|
let input_format = input_details
|
||||||
.internal_format()
|
.internal_format()
|
||||||
|
@ -139,21 +117,19 @@ async fn process<S: Store + 'static>(
|
||||||
let format = input_format.process_to(output_format);
|
let format = input_format.process_to(output_format);
|
||||||
|
|
||||||
let quality = match format {
|
let quality = match format {
|
||||||
ProcessableFormat::Image(format) => config.media.image.quality_for(format),
|
ProcessableFormat::Image(format) => state.config.media.image.quality_for(format),
|
||||||
ProcessableFormat::Animation(format) => config.media.animation.quality_for(format),
|
ProcessableFormat::Animation(format) => state.config.media.animation.quality_for(format),
|
||||||
};
|
};
|
||||||
|
|
||||||
let stream = store.to_stream(&identifier, None, None).await?;
|
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||||
|
|
||||||
let vec = crate::magick::process_image_stream_read(
|
let vec = crate::magick::process_image_stream_read(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
stream,
|
stream,
|
||||||
thumbnail_args,
|
thumbnail_args,
|
||||||
input_format,
|
input_format,
|
||||||
format,
|
format,
|
||||||
quality,
|
quality,
|
||||||
config.media.process_timeout,
|
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
.into_vec()
|
.into_vec()
|
||||||
|
@ -164,19 +140,15 @@ async fn process<S: Store + 'static>(
|
||||||
|
|
||||||
drop(permit);
|
drop(permit);
|
||||||
|
|
||||||
let details = Details::from_bytes(
|
let details = Details::from_bytes(state, bytes.clone()).await?;
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
config.media.process_timeout,
|
|
||||||
bytes.clone(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let identifier = store
|
let identifier = state
|
||||||
|
.store
|
||||||
.save_bytes(bytes.clone(), details.media_type())
|
.save_bytes(bytes.clone(), details.media_type())
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
if let Err(VariantAlreadyExists) = repo
|
if let Err(VariantAlreadyExists) = state
|
||||||
|
.repo
|
||||||
.relate_variant_identifier(
|
.relate_variant_identifier(
|
||||||
hash,
|
hash,
|
||||||
thumbnail_path.to_string_lossy().to_string(),
|
thumbnail_path.to_string_lossy().to_string(),
|
||||||
|
@ -184,27 +156,22 @@ async fn process<S: Store + 'static>(
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
{
|
{
|
||||||
store.remove(&identifier).await?;
|
state.store.remove(&identifier).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
repo.relate_details(&identifier, &details).await?;
|
state.repo.relate_details(&identifier, &details).await?;
|
||||||
|
|
||||||
guard.disarm();
|
guard.disarm();
|
||||||
|
|
||||||
Ok((details, bytes)) as Result<(Details, Bytes), Error>
|
Ok((details, bytes)) as Result<(Details, Bytes), Error>
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
async fn input_identifier<S>(
|
async fn input_identifier<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
repo: &ArcRepo,
|
|
||||||
store: &S,
|
|
||||||
output_format: InputProcessableFormat,
|
output_format: InputProcessableFormat,
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
original_details: &Details,
|
original_details: &Details,
|
||||||
media: &crate::config::Media,
|
|
||||||
) -> Result<Arc<str>, Error>
|
) -> Result<Arc<str>, Error>
|
||||||
where
|
where
|
||||||
S: Store + 'static,
|
S: Store + 'static,
|
||||||
|
@ -220,11 +187,12 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
if should_thumbnail {
|
if should_thumbnail {
|
||||||
if let Some(identifier) = repo.motion_identifier(hash.clone()).await? {
|
if let Some(identifier) = state.repo.motion_identifier(hash.clone()).await? {
|
||||||
return Ok(identifier);
|
return Ok(identifier);
|
||||||
};
|
};
|
||||||
|
|
||||||
let identifier = repo
|
let identifier = state
|
||||||
|
.repo
|
||||||
.identifier(hash.clone())
|
.identifier(hash.clone())
|
||||||
.await?
|
.await?
|
||||||
.ok_or(UploadError::MissingIdentifier)?;
|
.ok_or(UploadError::MissingIdentifier)?;
|
||||||
|
@ -232,24 +200,16 @@ where
|
||||||
let (reader, media_type) = if let Some(processable_format) =
|
let (reader, media_type) = if let Some(processable_format) =
|
||||||
original_details.internal_format().processable_format()
|
original_details.internal_format().processable_format()
|
||||||
{
|
{
|
||||||
let thumbnail_format = media.image.format.unwrap_or(ImageFormat::Webp);
|
let thumbnail_format = state.config.media.image.format.unwrap_or(ImageFormat::Webp);
|
||||||
|
|
||||||
let stream = store.to_stream(&identifier, None, None).await?;
|
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||||
|
|
||||||
let reader = magick::thumbnail(
|
let reader =
|
||||||
tmp_dir,
|
magick::thumbnail(state, stream, processable_format, thumbnail_format).await?;
|
||||||
policy_dir,
|
|
||||||
stream,
|
|
||||||
processable_format,
|
|
||||||
ProcessableFormat::Image(thumbnail_format),
|
|
||||||
media.image.quality_for(thumbnail_format),
|
|
||||||
media.process_timeout,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
(reader, thumbnail_format.media_type())
|
(reader, thumbnail_format.media_type())
|
||||||
} else {
|
} else {
|
||||||
let thumbnail_format = match media.image.format {
|
let thumbnail_format = match state.config.media.image.format {
|
||||||
Some(ImageFormat::Webp | ImageFormat::Avif | ImageFormat::Jxl) => {
|
Some(ImageFormat::Webp | ImageFormat::Avif | ImageFormat::Jxl) => {
|
||||||
ffmpeg::ThumbnailFormat::Webp
|
ffmpeg::ThumbnailFormat::Webp
|
||||||
}
|
}
|
||||||
|
@ -258,14 +218,12 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
let reader = ffmpeg::thumbnail(
|
let reader = ffmpeg::thumbnail(
|
||||||
tmp_dir,
|
state,
|
||||||
store.clone(),
|
|
||||||
identifier,
|
identifier,
|
||||||
original_details
|
original_details
|
||||||
.video_format()
|
.video_format()
|
||||||
.unwrap_or(InternalVideoFormat::Mp4),
|
.unwrap_or(InternalVideoFormat::Mp4),
|
||||||
thumbnail_format,
|
thumbnail_format,
|
||||||
media.process_timeout,
|
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -273,16 +231,20 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
let motion_identifier = reader
|
let motion_identifier = reader
|
||||||
.with_stdout(|stdout| async { store.save_async_read(stdout, media_type).await })
|
.with_stdout(|stdout| async { state.store.save_async_read(stdout, media_type).await })
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
repo.relate_motion_identifier(hash, &motion_identifier)
|
state
|
||||||
|
.repo
|
||||||
|
.relate_motion_identifier(hash, &motion_identifier)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
return Ok(motion_identifier);
|
return Ok(motion_identifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
repo.identifier(hash)
|
state
|
||||||
|
.repo
|
||||||
|
.identifier(hash)
|
||||||
.await?
|
.await?
|
||||||
.ok_or(UploadError::MissingIdentifier)
|
.ok_or(UploadError::MissingIdentifier)
|
||||||
.map_err(From::from)
|
.map_err(From::from)
|
||||||
|
|
|
@ -6,8 +6,8 @@ use crate::{
|
||||||
ffmpeg::FfMpegError,
|
ffmpeg::FfMpegError,
|
||||||
formats::InternalVideoFormat,
|
formats::InternalVideoFormat,
|
||||||
process::{Process, ProcessRead},
|
process::{Process, ProcessRead},
|
||||||
|
state::State,
|
||||||
store::Store,
|
store::Store,
|
||||||
tmp_file::TmpDir,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[derive(Clone, Copy, Debug)]
|
#[derive(Clone, Copy, Debug)]
|
||||||
|
@ -50,21 +50,19 @@ impl ThumbnailFormat {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(tmp_dir, store, timeout))]
|
#[tracing::instrument(skip(state))]
|
||||||
pub(super) async fn thumbnail<S: Store>(
|
pub(super) async fn thumbnail<S: Store>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
store: S,
|
|
||||||
from: Arc<str>,
|
from: Arc<str>,
|
||||||
input_format: InternalVideoFormat,
|
input_format: InternalVideoFormat,
|
||||||
format: ThumbnailFormat,
|
format: ThumbnailFormat,
|
||||||
timeout: u64,
|
|
||||||
) -> Result<ProcessRead, FfMpegError> {
|
) -> Result<ProcessRead, FfMpegError> {
|
||||||
let input_file = tmp_dir.tmp_file(Some(input_format.file_extension()));
|
let input_file = state.tmp_dir.tmp_file(Some(input_format.file_extension()));
|
||||||
crate::store::file_store::safe_create_parent(&input_file)
|
crate::store::file_store::safe_create_parent(&input_file)
|
||||||
.await
|
.await
|
||||||
.map_err(FfMpegError::CreateDir)?;
|
.map_err(FfMpegError::CreateDir)?;
|
||||||
|
|
||||||
let output_file = tmp_dir.tmp_file(Some(format.to_file_extension()));
|
let output_file = state.tmp_dir.tmp_file(Some(format.to_file_extension()));
|
||||||
crate::store::file_store::safe_create_parent(&output_file)
|
crate::store::file_store::safe_create_parent(&output_file)
|
||||||
.await
|
.await
|
||||||
.map_err(FfMpegError::CreateDir)?;
|
.map_err(FfMpegError::CreateDir)?;
|
||||||
|
@ -72,7 +70,8 @@ pub(super) async fn thumbnail<S: Store>(
|
||||||
let mut tmp_one = crate::file::File::create(&input_file)
|
let mut tmp_one = crate::file::File::create(&input_file)
|
||||||
.await
|
.await
|
||||||
.map_err(FfMpegError::CreateFile)?;
|
.map_err(FfMpegError::CreateFile)?;
|
||||||
let stream = store
|
let stream = state
|
||||||
|
.store
|
||||||
.to_stream(&from, None, None)
|
.to_stream(&from, None, None)
|
||||||
.await
|
.await
|
||||||
.map_err(FfMpegError::Store)?;
|
.map_err(FfMpegError::Store)?;
|
||||||
|
@ -99,7 +98,7 @@ pub(super) async fn thumbnail<S: Store>(
|
||||||
output_file.as_os_str(),
|
output_file.as_os_str(),
|
||||||
],
|
],
|
||||||
&[],
|
&[],
|
||||||
timeout,
|
state.config.media.process_timeout,
|
||||||
)?;
|
)?;
|
||||||
|
|
||||||
let res = process.wait().await;
|
let res = process.wait().await;
|
||||||
|
|
|
@ -3,32 +3,33 @@ use std::ffi::OsStr;
|
||||||
use actix_web::web::Bytes;
|
use actix_web::web::Bytes;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
formats::ProcessableFormat,
|
formats::{ImageFormat, ProcessableFormat},
|
||||||
magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||||
process::{Process, ProcessRead},
|
process::{Process, ProcessRead},
|
||||||
|
state::State,
|
||||||
stream::LocalBoxStream,
|
stream::LocalBoxStream,
|
||||||
tmp_file::TmpDir,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
async fn thumbnail_animation<F, Fut>(
|
async fn thumbnail_animation<S, F, Fut>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
input_format: ProcessableFormat,
|
input_format: ProcessableFormat,
|
||||||
format: ProcessableFormat,
|
thumbnail_format: ImageFormat,
|
||||||
quality: Option<u8>,
|
|
||||||
timeout: u64,
|
|
||||||
write_file: F,
|
write_file: F,
|
||||||
) -> Result<ProcessRead, MagickError>
|
) -> Result<ProcessRead, MagickError>
|
||||||
where
|
where
|
||||||
F: FnOnce(crate::file::File) -> Fut,
|
F: FnOnce(crate::file::File) -> Fut,
|
||||||
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
||||||
{
|
{
|
||||||
let temporary_path = tmp_dir
|
let format = ProcessableFormat::Image(thumbnail_format);
|
||||||
|
let quality = state.config.media.image.quality_for(thumbnail_format);
|
||||||
|
|
||||||
|
let temporary_path = state
|
||||||
|
.tmp_dir
|
||||||
.tmp_folder()
|
.tmp_folder()
|
||||||
.await
|
.await
|
||||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||||
|
|
||||||
let input_file = tmp_dir.tmp_file(None);
|
let input_file = state.tmp_dir.tmp_file(None);
|
||||||
crate::store::file_store::safe_create_parent(&input_file)
|
crate::store::file_store::safe_create_parent(&input_file)
|
||||||
.await
|
.await
|
||||||
.map_err(MagickError::CreateDir)?;
|
.map_err(MagickError::CreateDir)?;
|
||||||
|
@ -62,10 +63,10 @@ where
|
||||||
|
|
||||||
let envs = [
|
let envs = [
|
||||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||||
(MAGICK_CONFIGURE_PATH, policy_dir.as_os_str()),
|
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||||
];
|
];
|
||||||
|
|
||||||
let reader = Process::run("magick", &args, &envs, timeout)?
|
let reader = Process::run("magick", &args, &envs, state.config.media.process_timeout)?
|
||||||
.read()
|
.read()
|
||||||
.add_extras(input_file)
|
.add_extras(input_file)
|
||||||
.add_extras(temporary_path);
|
.add_extras(temporary_path);
|
||||||
|
@ -73,22 +74,16 @@ where
|
||||||
Ok(reader)
|
Ok(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn thumbnail(
|
pub(super) async fn thumbnail<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
stream: LocalBoxStream<'static, std::io::Result<Bytes>>,
|
stream: LocalBoxStream<'static, std::io::Result<Bytes>>,
|
||||||
input_format: ProcessableFormat,
|
input_format: ProcessableFormat,
|
||||||
format: ProcessableFormat,
|
thumbnail_format: ImageFormat,
|
||||||
quality: Option<u8>,
|
|
||||||
timeout: u64,
|
|
||||||
) -> Result<ProcessRead, MagickError> {
|
) -> Result<ProcessRead, MagickError> {
|
||||||
thumbnail_animation(
|
thumbnail_animation(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
input_format,
|
input_format,
|
||||||
format,
|
thumbnail_format,
|
||||||
quality,
|
|
||||||
timeout,
|
|
||||||
|mut tmp_file| async move {
|
|mut tmp_file| async move {
|
||||||
tmp_file
|
tmp_file
|
||||||
.write_from_stream(stream)
|
.write_from_stream(stream)
|
||||||
|
|
136
src/ingest.rs
136
src/ingest.rs
|
@ -4,22 +4,21 @@ use crate::{
|
||||||
bytes_stream::BytesStream,
|
bytes_stream::BytesStream,
|
||||||
details::Details,
|
details::Details,
|
||||||
error::{Error, UploadError},
|
error::{Error, UploadError},
|
||||||
formats::{InternalFormat, Validations},
|
formats::InternalFormat,
|
||||||
future::WithMetrics,
|
future::WithMetrics,
|
||||||
magick::PolicyDir,
|
|
||||||
repo::{Alias, ArcRepo, DeleteToken, Hash},
|
repo::{Alias, ArcRepo, DeleteToken, Hash},
|
||||||
|
state::State,
|
||||||
store::Store,
|
store::Store,
|
||||||
tmp_file::TmpDir,
|
|
||||||
};
|
};
|
||||||
use actix_web::web::Bytes;
|
use actix_web::web::Bytes;
|
||||||
use futures_core::Stream;
|
use futures_core::Stream;
|
||||||
use reqwest::Body;
|
use reqwest::Body;
|
||||||
use reqwest_middleware::ClientWithMiddleware;
|
|
||||||
use streem::IntoStreamer;
|
use streem::IntoStreamer;
|
||||||
use tracing::{Instrument, Span};
|
use tracing::{Instrument, Span};
|
||||||
|
|
||||||
mod hasher;
|
mod hasher;
|
||||||
use hasher::{Hasher, State};
|
use hasher::Hasher;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub(crate) struct Session {
|
pub(crate) struct Session {
|
||||||
|
@ -50,12 +49,17 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn process_ingest<S>(
|
async fn process_ingest<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
store: &S,
|
|
||||||
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
||||||
media: &crate::config::Media,
|
) -> Result<
|
||||||
) -> Result<(InternalFormat, Arc<str>, Details, Rc<RefCell<State>>), Error>
|
(
|
||||||
|
InternalFormat,
|
||||||
|
Arc<str>,
|
||||||
|
Details,
|
||||||
|
Rc<RefCell<hasher::State>>,
|
||||||
|
),
|
||||||
|
Error,
|
||||||
|
>
|
||||||
where
|
where
|
||||||
S: Store,
|
S: Store,
|
||||||
{
|
{
|
||||||
|
@ -65,43 +69,30 @@ where
|
||||||
|
|
||||||
let permit = crate::process_semaphore().acquire().await?;
|
let permit = crate::process_semaphore().acquire().await?;
|
||||||
|
|
||||||
let prescribed = Validations {
|
|
||||||
image: &media.image,
|
|
||||||
animation: &media.animation,
|
|
||||||
video: &media.video,
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::trace!("Validating bytes");
|
tracing::trace!("Validating bytes");
|
||||||
let (input_type, process_read) = crate::validate::validate_bytes(
|
let (input_type, process_read) = crate::validate::validate_bytes(state, bytes).await?;
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
bytes,
|
|
||||||
prescribed,
|
|
||||||
media.process_timeout,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let process_read = if let Some(operations) = media.preprocess_steps() {
|
let process_read = if let Some(operations) = state.config.media.preprocess_steps() {
|
||||||
if let Some(format) = input_type.processable_format() {
|
if let Some(format) = input_type.processable_format() {
|
||||||
let (_, magick_args) =
|
let (_, magick_args) =
|
||||||
crate::processor::build_chain(operations, format.file_extension())?;
|
crate::processor::build_chain(operations, format.file_extension())?;
|
||||||
|
|
||||||
let quality = match format {
|
let quality = match format {
|
||||||
crate::formats::ProcessableFormat::Image(format) => media.image.quality_for(format),
|
crate::formats::ProcessableFormat::Image(format) => {
|
||||||
|
state.config.media.image.quality_for(format)
|
||||||
|
}
|
||||||
crate::formats::ProcessableFormat::Animation(format) => {
|
crate::formats::ProcessableFormat::Animation(format) => {
|
||||||
media.animation.quality_for(format)
|
state.config.media.animation.quality_for(format)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
crate::magick::process_image_process_read(
|
crate::magick::process_image_process_read(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
process_read,
|
process_read,
|
||||||
magick_args,
|
magick_args,
|
||||||
format,
|
format,
|
||||||
format,
|
format,
|
||||||
quality,
|
quality,
|
||||||
media.process_timeout,
|
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
} else {
|
} else {
|
||||||
|
@ -111,36 +102,39 @@ where
|
||||||
process_read
|
process_read
|
||||||
};
|
};
|
||||||
|
|
||||||
let (state, identifier) = process_read
|
let (hash_state, identifier) = process_read
|
||||||
.with_stdout(|stdout| async move {
|
.with_stdout(|stdout| async move {
|
||||||
let hasher_reader = Hasher::new(stdout);
|
let hasher_reader = Hasher::new(stdout);
|
||||||
let state = hasher_reader.state();
|
let hash_state = hasher_reader.state();
|
||||||
|
|
||||||
store
|
state
|
||||||
|
.store
|
||||||
.save_async_read(hasher_reader, input_type.media_type())
|
.save_async_read(hasher_reader, input_type.media_type())
|
||||||
.await
|
.await
|
||||||
.map(move |identifier| (state, identifier))
|
.map(move |identifier| (hash_state, identifier))
|
||||||
})
|
})
|
||||||
.await??;
|
.await??;
|
||||||
|
|
||||||
let bytes_stream = store.to_bytes(&identifier, None, None).await?;
|
let bytes_stream = state.store.to_bytes(&identifier, None, None).await?;
|
||||||
let details = Details::from_bytes(
|
let details = Details::from_bytes(state, bytes_stream.into_bytes()).await?;
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
media.process_timeout,
|
|
||||||
bytes_stream.into_bytes(),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
drop(permit);
|
drop(permit);
|
||||||
|
|
||||||
Ok((input_type, identifier, details, state))
|
Ok((input_type, identifier, details, hash_state))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn dummy_ingest<S>(
|
async fn dummy_ingest<S>(
|
||||||
store: &S,
|
state: &State<S>,
|
||||||
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
||||||
) -> Result<(InternalFormat, Arc<str>, Details, Rc<RefCell<State>>), Error>
|
) -> Result<
|
||||||
|
(
|
||||||
|
InternalFormat,
|
||||||
|
Arc<str>,
|
||||||
|
Details,
|
||||||
|
Rc<RefCell<hasher::State>>,
|
||||||
|
),
|
||||||
|
Error,
|
||||||
|
>
|
||||||
where
|
where
|
||||||
S: Store,
|
S: Store,
|
||||||
{
|
{
|
||||||
|
@ -152,55 +146,51 @@ where
|
||||||
let reader = Box::pin(tokio_util::io::StreamReader::new(stream));
|
let reader = Box::pin(tokio_util::io::StreamReader::new(stream));
|
||||||
|
|
||||||
let hasher_reader = Hasher::new(reader);
|
let hasher_reader = Hasher::new(reader);
|
||||||
let state = hasher_reader.state();
|
let hash_state = hasher_reader.state();
|
||||||
|
|
||||||
let input_type = InternalFormat::Image(crate::formats::ImageFormat::Png);
|
let input_type = InternalFormat::Image(crate::formats::ImageFormat::Png);
|
||||||
|
|
||||||
let identifier = store
|
let identifier = state
|
||||||
|
.store
|
||||||
.save_async_read(hasher_reader, input_type.media_type())
|
.save_async_read(hasher_reader, input_type.media_type())
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
let details = Details::danger_dummy(input_type);
|
let details = Details::danger_dummy(input_type);
|
||||||
|
|
||||||
Ok((input_type, identifier, details, state))
|
Ok((input_type, identifier, details, hash_state))
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[tracing::instrument(skip(state, stream))]
|
||||||
#[tracing::instrument(skip(tmp_dir, policy_dir, repo, store, client, stream, config))]
|
|
||||||
pub(crate) async fn ingest<S>(
|
pub(crate) async fn ingest<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
repo: &ArcRepo,
|
|
||||||
store: &S,
|
|
||||||
client: &ClientWithMiddleware,
|
|
||||||
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
stream: impl Stream<Item = Result<Bytes, Error>> + 'static,
|
||||||
declared_alias: Option<Alias>,
|
declared_alias: Option<Alias>,
|
||||||
config: &crate::config::Configuration,
|
|
||||||
) -> Result<Session, Error>
|
) -> Result<Session, Error>
|
||||||
where
|
where
|
||||||
S: Store,
|
S: Store,
|
||||||
{
|
{
|
||||||
let (input_type, identifier, details, state) = if config.server.danger_dummy_mode {
|
let (input_type, identifier, details, hash_state) = if state.config.server.danger_dummy_mode {
|
||||||
dummy_ingest(store, stream).await?
|
dummy_ingest(state, stream).await?
|
||||||
} else {
|
} else {
|
||||||
process_ingest(tmp_dir, policy_dir, store, stream, &config.media).await?
|
process_ingest(state, stream).await?
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut session = Session {
|
let mut session = Session {
|
||||||
repo: repo.clone(),
|
repo: state.repo.clone(),
|
||||||
delete_token: DeleteToken::generate(),
|
delete_token: DeleteToken::generate(),
|
||||||
hash: None,
|
hash: None,
|
||||||
alias: None,
|
alias: None,
|
||||||
identifier: Some(identifier.clone()),
|
identifier: Some(identifier.clone()),
|
||||||
};
|
};
|
||||||
|
|
||||||
if let Some(endpoint) = &config.media.external_validation {
|
if let Some(endpoint) = &state.config.media.external_validation {
|
||||||
let stream = store.to_stream(&identifier, None, None).await?;
|
let stream = state.store.to_stream(&identifier, None, None).await?;
|
||||||
|
|
||||||
let response = client
|
let response = state
|
||||||
|
.client
|
||||||
.post(endpoint.as_str())
|
.post(endpoint.as_str())
|
||||||
.timeout(Duration::from_secs(
|
.timeout(Duration::from_secs(
|
||||||
config.media.external_validation_timeout,
|
state.config.media.external_validation_timeout,
|
||||||
))
|
))
|
||||||
.header("Content-Type", input_type.media_type().as_ref())
|
.header("Content-Type", input_type.media_type().as_ref())
|
||||||
.body(Body::wrap_stream(crate::stream::make_send(stream)))
|
.body(Body::wrap_stream(crate::stream::make_send(stream)))
|
||||||
|
@ -214,13 +204,13 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let (hash, size) = state.borrow_mut().finalize_reset();
|
let (hash, size) = hash_state.borrow_mut().finalize_reset();
|
||||||
|
|
||||||
let hash = Hash::new(hash, size, input_type);
|
let hash = Hash::new(hash, size, input_type);
|
||||||
|
|
||||||
save_upload(&mut session, repo, store, hash.clone(), &identifier).await?;
|
save_upload(&mut session, state, hash.clone(), &identifier).await?;
|
||||||
|
|
||||||
repo.relate_details(&identifier, &details).await?;
|
state.repo.relate_details(&identifier, &details).await?;
|
||||||
|
|
||||||
if let Some(alias) = declared_alias {
|
if let Some(alias) = declared_alias {
|
||||||
session.add_existing_alias(hash, alias).await?
|
session.add_existing_alias(hash, alias).await?
|
||||||
|
@ -234,17 +224,21 @@ where
|
||||||
#[tracing::instrument(level = "trace", skip_all)]
|
#[tracing::instrument(level = "trace", skip_all)]
|
||||||
async fn save_upload<S>(
|
async fn save_upload<S>(
|
||||||
session: &mut Session,
|
session: &mut Session,
|
||||||
repo: &ArcRepo,
|
state: &State<S>,
|
||||||
store: &S,
|
|
||||||
hash: Hash,
|
hash: Hash,
|
||||||
identifier: &Arc<str>,
|
identifier: &Arc<str>,
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
S: Store,
|
S: Store,
|
||||||
{
|
{
|
||||||
if repo.create_hash(hash.clone(), identifier).await?.is_err() {
|
if state
|
||||||
|
.repo
|
||||||
|
.create_hash(hash.clone(), identifier)
|
||||||
|
.await?
|
||||||
|
.is_err()
|
||||||
|
{
|
||||||
// duplicate upload
|
// duplicate upload
|
||||||
store.remove(identifier).await?;
|
state.store.remove(identifier).await?;
|
||||||
session.identifier.take();
|
session.identifier.take();
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
1155
src/lib.rs
1155
src/lib.rs
File diff suppressed because it is too large
Load diff
|
@ -7,6 +7,7 @@ use crate::{
|
||||||
error_code::ErrorCode,
|
error_code::ErrorCode,
|
||||||
formats::ProcessableFormat,
|
formats::ProcessableFormat,
|
||||||
process::{Process, ProcessError, ProcessRead},
|
process::{Process, ProcessError, ProcessRead},
|
||||||
|
state::State,
|
||||||
stream::LocalBoxStream,
|
stream::LocalBoxStream,
|
||||||
tmp_file::{TmpDir, TmpFolder},
|
tmp_file::{TmpDir, TmpFolder},
|
||||||
};
|
};
|
||||||
|
@ -85,27 +86,25 @@ impl MagickError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
async fn process_image<S, F, Fut>(
|
||||||
async fn process_image<F, Fut>(
|
state: &State<S>,
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
process_args: Vec<String>,
|
process_args: Vec<String>,
|
||||||
input_format: ProcessableFormat,
|
input_format: ProcessableFormat,
|
||||||
format: ProcessableFormat,
|
format: ProcessableFormat,
|
||||||
quality: Option<u8>,
|
quality: Option<u8>,
|
||||||
timeout: u64,
|
|
||||||
write_file: F,
|
write_file: F,
|
||||||
) -> Result<ProcessRead, MagickError>
|
) -> Result<ProcessRead, MagickError>
|
||||||
where
|
where
|
||||||
F: FnOnce(crate::file::File) -> Fut,
|
F: FnOnce(crate::file::File) -> Fut,
|
||||||
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
|
||||||
{
|
{
|
||||||
let temporary_path = tmp_dir
|
let temporary_path = state
|
||||||
|
.tmp_dir
|
||||||
.tmp_folder()
|
.tmp_folder()
|
||||||
.await
|
.await
|
||||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||||
|
|
||||||
let input_file = tmp_dir.tmp_file(None);
|
let input_file = state.tmp_dir.tmp_file(None);
|
||||||
crate::store::file_store::safe_create_parent(&input_file)
|
crate::store::file_store::safe_create_parent(&input_file)
|
||||||
.await
|
.await
|
||||||
.map_err(MagickError::CreateDir)?;
|
.map_err(MagickError::CreateDir)?;
|
||||||
|
@ -143,10 +142,10 @@ where
|
||||||
|
|
||||||
let envs = [
|
let envs = [
|
||||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||||
(MAGICK_CONFIGURE_PATH, policy_dir.as_os_str()),
|
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||||
];
|
];
|
||||||
|
|
||||||
let reader = Process::run("magick", &args, &envs, timeout)?
|
let reader = Process::run("magick", &args, &envs, state.config.media.process_timeout)?
|
||||||
.read()
|
.read()
|
||||||
.add_extras(input_file)
|
.add_extras(input_file)
|
||||||
.add_extras(temporary_path);
|
.add_extras(temporary_path);
|
||||||
|
@ -154,25 +153,20 @@ where
|
||||||
Ok(reader)
|
Ok(reader)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub(crate) async fn process_image_stream_read<S>(
|
||||||
pub(crate) async fn process_image_stream_read(
|
state: &State<S>,
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
stream: LocalBoxStream<'static, std::io::Result<Bytes>>,
|
stream: LocalBoxStream<'static, std::io::Result<Bytes>>,
|
||||||
args: Vec<String>,
|
args: Vec<String>,
|
||||||
input_format: ProcessableFormat,
|
input_format: ProcessableFormat,
|
||||||
format: ProcessableFormat,
|
format: ProcessableFormat,
|
||||||
quality: Option<u8>,
|
quality: Option<u8>,
|
||||||
timeout: u64,
|
|
||||||
) -> Result<ProcessRead, MagickError> {
|
) -> Result<ProcessRead, MagickError> {
|
||||||
process_image(
|
process_image(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
args,
|
args,
|
||||||
input_format,
|
input_format,
|
||||||
format,
|
format,
|
||||||
quality,
|
quality,
|
||||||
timeout,
|
|
||||||
|mut tmp_file| async move {
|
|mut tmp_file| async move {
|
||||||
tmp_file
|
tmp_file
|
||||||
.write_from_stream(stream)
|
.write_from_stream(stream)
|
||||||
|
@ -184,25 +178,20 @@ pub(crate) async fn process_image_stream_read(
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
pub(crate) async fn process_image_process_read<S>(
|
||||||
pub(crate) async fn process_image_process_read(
|
state: &State<S>,
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
process_read: ProcessRead,
|
process_read: ProcessRead,
|
||||||
args: Vec<String>,
|
args: Vec<String>,
|
||||||
input_format: ProcessableFormat,
|
input_format: ProcessableFormat,
|
||||||
format: ProcessableFormat,
|
format: ProcessableFormat,
|
||||||
quality: Option<u8>,
|
quality: Option<u8>,
|
||||||
timeout: u64,
|
|
||||||
) -> Result<ProcessRead, MagickError> {
|
) -> Result<ProcessRead, MagickError> {
|
||||||
process_image(
|
process_image(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
args,
|
args,
|
||||||
input_format,
|
input_format,
|
||||||
format,
|
format,
|
||||||
quality,
|
quality,
|
||||||
timeout,
|
|
||||||
|mut tmp_file| async move {
|
|mut tmp_file| async move {
|
||||||
process_read
|
process_read
|
||||||
.with_stdout(|stdout| async {
|
.with_stdout(|stdout| async {
|
||||||
|
|
|
@ -12,21 +12,15 @@ use streem::IntoStreamer;
|
||||||
use crate::{
|
use crate::{
|
||||||
details::Details,
|
details::Details,
|
||||||
error::{Error, UploadError},
|
error::{Error, UploadError},
|
||||||
magick::{ArcPolicyDir, PolicyDir},
|
|
||||||
repo::{ArcRepo, Hash},
|
repo::{ArcRepo, Hash},
|
||||||
|
state::State,
|
||||||
store::Store,
|
store::Store,
|
||||||
tmp_file::{ArcTmpDir, TmpDir},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub(super) async fn migrate_store<S1, S2>(
|
pub(super) async fn migrate_store<S1, S2>(
|
||||||
tmp_dir: ArcTmpDir,
|
|
||||||
policy_dir: ArcPolicyDir,
|
|
||||||
repo: ArcRepo,
|
|
||||||
from: S1,
|
from: S1,
|
||||||
to: S2,
|
to: State<S2>,
|
||||||
skip_missing_files: bool,
|
skip_missing_files: bool,
|
||||||
timeout: u64,
|
|
||||||
concurrency: usize,
|
concurrency: usize,
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
|
@ -39,7 +33,7 @@ where
|
||||||
tracing::warn!("Old store is not configured correctly");
|
tracing::warn!("Old store is not configured correctly");
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
}
|
}
|
||||||
if let Err(e) = to.health_check().await {
|
if let Err(e) = to.repo.health_check().await {
|
||||||
tracing::warn!("New store is not configured correctly");
|
tracing::warn!("New store is not configured correctly");
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
}
|
}
|
||||||
|
@ -48,17 +42,8 @@ where
|
||||||
|
|
||||||
let mut failure_count = 0;
|
let mut failure_count = 0;
|
||||||
|
|
||||||
while let Err(e) = do_migrate_store(
|
while let Err(e) =
|
||||||
tmp_dir.clone(),
|
do_migrate_store(from.clone(), to.clone(), skip_missing_files, concurrency).await
|
||||||
policy_dir.clone(),
|
|
||||||
repo.clone(),
|
|
||||||
from.clone(),
|
|
||||||
to.clone(),
|
|
||||||
skip_missing_files,
|
|
||||||
timeout,
|
|
||||||
concurrency,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
{
|
||||||
tracing::error!("Migration failed with {}", format!("{e:?}"));
|
tracing::error!("Migration failed with {}", format!("{e:?}"));
|
||||||
|
|
||||||
|
@ -78,11 +63,8 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
struct MigrateState<S1, S2> {
|
struct MigrateState<S1, S2> {
|
||||||
tmp_dir: ArcTmpDir,
|
|
||||||
policy_dir: ArcPolicyDir,
|
|
||||||
repo: ArcRepo,
|
|
||||||
from: S1,
|
from: S1,
|
||||||
to: S2,
|
to: State<S2>,
|
||||||
continuing_migration: bool,
|
continuing_migration: bool,
|
||||||
skip_missing_files: bool,
|
skip_missing_files: bool,
|
||||||
initial_repo_size: u64,
|
initial_repo_size: u64,
|
||||||
|
@ -90,26 +72,20 @@ struct MigrateState<S1, S2> {
|
||||||
pct: AtomicU64,
|
pct: AtomicU64,
|
||||||
index: AtomicU64,
|
index: AtomicU64,
|
||||||
started_at: Instant,
|
started_at: Instant,
|
||||||
timeout: u64,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn do_migrate_store<S1, S2>(
|
async fn do_migrate_store<S1, S2>(
|
||||||
tmp_dir: ArcTmpDir,
|
|
||||||
policy_dir: ArcPolicyDir,
|
|
||||||
repo: ArcRepo,
|
|
||||||
from: S1,
|
from: S1,
|
||||||
to: S2,
|
to: State<S2>,
|
||||||
skip_missing_files: bool,
|
skip_missing_files: bool,
|
||||||
timeout: u64,
|
|
||||||
concurrency: usize,
|
concurrency: usize,
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
S1: Store + 'static,
|
S1: Store + 'static,
|
||||||
S2: Store + 'static,
|
S2: Store + 'static,
|
||||||
{
|
{
|
||||||
let continuing_migration = repo.is_continuing_migration().await?;
|
let continuing_migration = to.repo.is_continuing_migration().await?;
|
||||||
let initial_repo_size = repo.size().await?;
|
let initial_repo_size = to.repo.size().await?;
|
||||||
|
|
||||||
if continuing_migration {
|
if continuing_migration {
|
||||||
tracing::warn!("Continuing previous migration of {initial_repo_size} total hashes");
|
tracing::warn!("Continuing previous migration of {initial_repo_size} total hashes");
|
||||||
|
@ -122,15 +98,12 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hashes are read in a consistent order
|
// Hashes are read in a consistent order
|
||||||
let stream = std::pin::pin!(repo.hashes());
|
let stream = std::pin::pin!(to.repo.hashes());
|
||||||
let mut stream = stream.into_streamer();
|
let mut stream = stream.into_streamer();
|
||||||
|
|
||||||
let state = Rc::new(MigrateState {
|
let state = Rc::new(MigrateState {
|
||||||
tmp_dir: tmp_dir.clone(),
|
|
||||||
policy_dir: policy_dir.clone(),
|
|
||||||
repo: repo.clone(),
|
|
||||||
from,
|
from,
|
||||||
to,
|
to: to.clone(),
|
||||||
continuing_migration,
|
continuing_migration,
|
||||||
skip_missing_files,
|
skip_missing_files,
|
||||||
initial_repo_size,
|
initial_repo_size,
|
||||||
|
@ -138,7 +111,6 @@ where
|
||||||
pct: AtomicU64::new(initial_repo_size / 100),
|
pct: AtomicU64::new(initial_repo_size / 100),
|
||||||
index: AtomicU64::new(0),
|
index: AtomicU64::new(0),
|
||||||
started_at: Instant::now(),
|
started_at: Instant::now(),
|
||||||
timeout,
|
|
||||||
});
|
});
|
||||||
|
|
||||||
let mut joinset = tokio::task::JoinSet::new();
|
let mut joinset = tokio::task::JoinSet::new();
|
||||||
|
@ -165,7 +137,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
// clean up the migration table to avoid interfering with future migrations
|
// clean up the migration table to avoid interfering with future migrations
|
||||||
repo.clear().await?;
|
to.repo.clear().await?;
|
||||||
|
|
||||||
tracing::warn!("Migration completed successfully");
|
tracing::warn!("Migration completed successfully");
|
||||||
|
|
||||||
|
@ -179,9 +151,6 @@ where
|
||||||
S2: Store,
|
S2: Store,
|
||||||
{
|
{
|
||||||
let MigrateState {
|
let MigrateState {
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
from,
|
from,
|
||||||
to,
|
to,
|
||||||
continuing_migration,
|
continuing_migration,
|
||||||
|
@ -191,24 +160,23 @@ where
|
||||||
pct,
|
pct,
|
||||||
index,
|
index,
|
||||||
started_at,
|
started_at,
|
||||||
timeout,
|
|
||||||
} = state;
|
} = state;
|
||||||
|
|
||||||
let current_index = index.fetch_add(1, Ordering::Relaxed);
|
let current_index = index.fetch_add(1, Ordering::Relaxed);
|
||||||
|
|
||||||
let original_identifier = match repo.identifier(hash.clone()).await {
|
let original_identifier = match to.repo.identifier(hash.clone()).await {
|
||||||
Ok(Some(identifier)) => identifier,
|
Ok(Some(identifier)) => identifier,
|
||||||
Ok(None) => {
|
Ok(None) => {
|
||||||
tracing::warn!(
|
tracing::warn!(
|
||||||
"Original File identifier for hash {hash:?} is missing, queue cleanup task",
|
"Original File identifier for hash {hash:?} is missing, queue cleanup task",
|
||||||
);
|
);
|
||||||
crate::queue::cleanup_hash(repo, hash.clone()).await?;
|
crate::queue::cleanup_hash(&to.repo, hash.clone()).await?;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
Err(e) => return Err(e.into()),
|
Err(e) => return Err(e.into()),
|
||||||
};
|
};
|
||||||
|
|
||||||
if repo.is_migrated(&original_identifier).await? {
|
if to.repo.is_migrated(&original_identifier).await? {
|
||||||
// migrated original for hash - this means we can skip
|
// migrated original for hash - this means we can skip
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
@ -241,26 +209,16 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(identifier) = repo.motion_identifier(hash.clone()).await? {
|
if let Some(identifier) = to.repo.motion_identifier(hash.clone()).await? {
|
||||||
if !repo.is_migrated(&identifier).await? {
|
if !to.repo.is_migrated(&identifier).await? {
|
||||||
match migrate_file(
|
match migrate_file(from, to, &identifier, *skip_missing_files).await {
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
from,
|
|
||||||
to,
|
|
||||||
&identifier,
|
|
||||||
*skip_missing_files,
|
|
||||||
*timeout,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(new_identifier) => {
|
Ok(new_identifier) => {
|
||||||
migrate_details(repo, &identifier, &new_identifier).await?;
|
migrate_details(&to.repo, &identifier, &new_identifier).await?;
|
||||||
repo.relate_motion_identifier(hash.clone(), &new_identifier)
|
to.repo
|
||||||
|
.relate_motion_identifier(hash.clone(), &new_identifier)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
repo.mark_migrated(&identifier, &new_identifier).await?;
|
to.repo.mark_migrated(&identifier, &new_identifier).await?;
|
||||||
}
|
}
|
||||||
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
|
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
|
||||||
tracing::warn!("Skipping motion file for hash {hash:?}");
|
tracing::warn!("Skipping motion file for hash {hash:?}");
|
||||||
|
@ -281,28 +239,20 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (variant, identifier) in repo.variants(hash.clone()).await? {
|
for (variant, identifier) in to.repo.variants(hash.clone()).await? {
|
||||||
if !repo.is_migrated(&identifier).await? {
|
if !to.repo.is_migrated(&identifier).await? {
|
||||||
match migrate_file(
|
match migrate_file(from, to, &identifier, *skip_missing_files).await {
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
from,
|
|
||||||
to,
|
|
||||||
&identifier,
|
|
||||||
*skip_missing_files,
|
|
||||||
*timeout,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(new_identifier) => {
|
Ok(new_identifier) => {
|
||||||
migrate_details(repo, &identifier, &new_identifier).await?;
|
migrate_details(&to.repo, &identifier, &new_identifier).await?;
|
||||||
repo.remove_variant(hash.clone(), variant.clone()).await?;
|
to.repo
|
||||||
let _ = repo
|
.remove_variant(hash.clone(), variant.clone())
|
||||||
|
.await?;
|
||||||
|
let _ = to
|
||||||
|
.repo
|
||||||
.relate_variant_identifier(hash.clone(), variant, &new_identifier)
|
.relate_variant_identifier(hash.clone(), variant, &new_identifier)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
repo.mark_migrated(&identifier, &new_identifier).await?;
|
to.repo.mark_migrated(&identifier, &new_identifier).await?;
|
||||||
}
|
}
|
||||||
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
|
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
|
||||||
tracing::warn!("Skipping variant {variant} for hash {hash:?}",);
|
tracing::warn!("Skipping variant {variant} for hash {hash:?}",);
|
||||||
|
@ -323,23 +273,14 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
match migrate_file(
|
match migrate_file(from, to, &original_identifier, *skip_missing_files).await {
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
from,
|
|
||||||
to,
|
|
||||||
&original_identifier,
|
|
||||||
*skip_missing_files,
|
|
||||||
*timeout,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(new_identifier) => {
|
Ok(new_identifier) => {
|
||||||
migrate_details(repo, &original_identifier, &new_identifier).await?;
|
migrate_details(&to.repo, &original_identifier, &new_identifier).await?;
|
||||||
repo.update_identifier(hash.clone(), &new_identifier)
|
to.repo
|
||||||
|
.update_identifier(hash.clone(), &new_identifier)
|
||||||
.await?;
|
.await?;
|
||||||
repo.mark_migrated(&original_identifier, &new_identifier)
|
to.repo
|
||||||
|
.mark_migrated(&original_identifier, &new_identifier)
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
|
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
|
||||||
|
@ -383,16 +324,11 @@ where
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn migrate_file<S1, S2>(
|
async fn migrate_file<S1, S2>(
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
repo: &ArcRepo,
|
|
||||||
from: &S1,
|
from: &S1,
|
||||||
to: &S2,
|
to: &State<S2>,
|
||||||
identifier: &Arc<str>,
|
identifier: &Arc<str>,
|
||||||
skip_missing_files: bool,
|
skip_missing_files: bool,
|
||||||
timeout: u64,
|
|
||||||
) -> Result<Arc<str>, MigrateError>
|
) -> Result<Arc<str>, MigrateError>
|
||||||
where
|
where
|
||||||
S1: Store,
|
S1: Store,
|
||||||
|
@ -403,7 +339,7 @@ where
|
||||||
loop {
|
loop {
|
||||||
tracing::trace!("migrate_file: looping");
|
tracing::trace!("migrate_file: looping");
|
||||||
|
|
||||||
match do_migrate_file(tmp_dir, policy_dir, repo, from, to, identifier, timeout).await {
|
match do_migrate_file(from, to, identifier).await {
|
||||||
Ok(identifier) => return Ok(identifier),
|
Ok(identifier) => return Ok(identifier),
|
||||||
Err(MigrateError::From(e)) if e.is_not_found() && skip_missing_files => {
|
Err(MigrateError::From(e)) if e.is_not_found() && skip_missing_files => {
|
||||||
return Err(MigrateError::From(e));
|
return Err(MigrateError::From(e));
|
||||||
|
@ -432,13 +368,9 @@ enum MigrateError {
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_migrate_file<S1, S2>(
|
async fn do_migrate_file<S1, S2>(
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
repo: &ArcRepo,
|
|
||||||
from: &S1,
|
from: &S1,
|
||||||
to: &S2,
|
to: &State<S2>,
|
||||||
identifier: &Arc<str>,
|
identifier: &Arc<str>,
|
||||||
timeout: u64,
|
|
||||||
) -> Result<Arc<str>, MigrateError>
|
) -> Result<Arc<str>, MigrateError>
|
||||||
where
|
where
|
||||||
S1: Store,
|
S1: Store,
|
||||||
|
@ -449,7 +381,8 @@ where
|
||||||
.await
|
.await
|
||||||
.map_err(MigrateError::From)?;
|
.map_err(MigrateError::From)?;
|
||||||
|
|
||||||
let details_opt = repo
|
let details_opt = to
|
||||||
|
.repo
|
||||||
.details(identifier)
|
.details(identifier)
|
||||||
.await
|
.await
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
|
@ -463,11 +396,11 @@ where
|
||||||
.await
|
.await
|
||||||
.map_err(From::from)
|
.map_err(From::from)
|
||||||
.map_err(MigrateError::Details)?;
|
.map_err(MigrateError::Details)?;
|
||||||
let new_details =
|
let new_details = Details::from_bytes(to, bytes_stream.into_bytes())
|
||||||
Details::from_bytes(tmp_dir, policy_dir, timeout, bytes_stream.into_bytes())
|
|
||||||
.await
|
.await
|
||||||
.map_err(MigrateError::Details)?;
|
.map_err(MigrateError::Details)?;
|
||||||
repo.relate_details(identifier, &new_details)
|
to.repo
|
||||||
|
.relate_details(identifier, &new_details)
|
||||||
.await
|
.await
|
||||||
.map_err(Error::from)
|
.map_err(Error::from)
|
||||||
.map_err(MigrateError::Details)?;
|
.map_err(MigrateError::Details)?;
|
||||||
|
@ -475,6 +408,7 @@ where
|
||||||
};
|
};
|
||||||
|
|
||||||
let new_identifier = to
|
let new_identifier = to
|
||||||
|
.store
|
||||||
.save_stream(stream, details.media_type())
|
.save_stream(stream, details.media_type())
|
||||||
.await
|
.await
|
||||||
.map_err(MigrateError::To)?;
|
.map_err(MigrateError::To)?;
|
||||||
|
|
|
@ -126,8 +126,8 @@ pub(crate) enum ProcessError {
|
||||||
#[error("Failed to cleanup for command {0}")]
|
#[error("Failed to cleanup for command {0}")]
|
||||||
Cleanup(Arc<str>, #[source] std::io::Error),
|
Cleanup(Arc<str>, #[source] std::io::Error),
|
||||||
|
|
||||||
#[error("Unknown process error")]
|
#[error("Unknown process error for command {0}")]
|
||||||
Other(#[source] std::io::Error),
|
Other(Arc<str>, #[source] std::io::Error),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ProcessError {
|
impl ProcessError {
|
||||||
|
@ -135,7 +135,7 @@ impl ProcessError {
|
||||||
match self {
|
match self {
|
||||||
Self::NotFound(_) => ErrorCode::COMMAND_NOT_FOUND,
|
Self::NotFound(_) => ErrorCode::COMMAND_NOT_FOUND,
|
||||||
Self::PermissionDenied(_) => ErrorCode::COMMAND_PERMISSION_DENIED,
|
Self::PermissionDenied(_) => ErrorCode::COMMAND_PERMISSION_DENIED,
|
||||||
Self::LimitReached | Self::Read(_, _) | Self::Cleanup(_, _) | Self::Other(_) => {
|
Self::LimitReached | Self::Read(_, _) | Self::Cleanup(_, _) | Self::Other(_, _) => {
|
||||||
ErrorCode::COMMAND_ERROR
|
ErrorCode::COMMAND_ERROR
|
||||||
}
|
}
|
||||||
Self::Timeout(_) => ErrorCode::COMMAND_TIMEOUT,
|
Self::Timeout(_) => ErrorCode::COMMAND_TIMEOUT,
|
||||||
|
@ -180,7 +180,7 @@ impl Process {
|
||||||
Err(ProcessError::PermissionDenied(command))
|
Err(ProcessError::PermissionDenied(command))
|
||||||
}
|
}
|
||||||
std::io::ErrorKind::WouldBlock => Err(ProcessError::LimitReached),
|
std::io::ErrorKind::WouldBlock => Err(ProcessError::LimitReached),
|
||||||
_ => Err(ProcessError::Other(e)),
|
_ => Err(ProcessError::Other(command, e)),
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -223,7 +223,7 @@ impl Process {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Ok(Ok(status)) => Err(ProcessError::Status(command, status)),
|
Ok(Ok(status)) => Err(ProcessError::Status(command, status)),
|
||||||
Ok(Err(e)) => Err(ProcessError::Other(e)),
|
Ok(Err(e)) => Err(ProcessError::Other(command, e)),
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
let _ = child.kill().await;
|
let _ = child.kill().await;
|
||||||
Err(ProcessError::Timeout(command))
|
Err(ProcessError::Timeout(command))
|
||||||
|
@ -234,7 +234,16 @@ impl Process {
|
||||||
pub(crate) fn bytes_read(self, input: Bytes) -> ProcessRead {
|
pub(crate) fn bytes_read(self, input: Bytes) -> ProcessRead {
|
||||||
self.spawn_fn(move |mut stdin| {
|
self.spawn_fn(move |mut stdin| {
|
||||||
let mut input = input;
|
let mut input = input;
|
||||||
async move { stdin.write_all_buf(&mut input).await }
|
async move {
|
||||||
|
match stdin.write_all_buf(&mut input).await {
|
||||||
|
Ok(()) => Ok(()),
|
||||||
|
// BrokenPipe means we finished reading from Stdout, so we don't need to write
|
||||||
|
// to stdin. We'll still error out if the command failed so treat this as a
|
||||||
|
// success
|
||||||
|
Err(e) if e.kind() == std::io::ErrorKind::BrokenPipe => Ok(()),
|
||||||
|
Err(e) => Err(e),
|
||||||
|
}
|
||||||
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,9 +284,12 @@ impl Process {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
Ok(Ok(status)) => Err(ProcessError::Status(command2, status)),
|
Ok(Ok(status)) => Err(ProcessError::Status(command2, status)),
|
||||||
Ok(Err(e)) => Err(ProcessError::Other(e)),
|
Ok(Err(e)) => Err(ProcessError::Other(command2, e)),
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
child.kill().await.map_err(ProcessError::Other)?;
|
child
|
||||||
|
.kill()
|
||||||
|
.await
|
||||||
|
.map_err(|e| ProcessError::Other(command2.clone(), e))?;
|
||||||
Err(ProcessError::Timeout(command2))
|
Err(ProcessError::Timeout(command2))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
147
src/queue.rs
147
src/queue.rs
|
@ -1,16 +1,14 @@
|
||||||
use crate::{
|
use crate::{
|
||||||
concurrent_processor::ProcessMap,
|
concurrent_processor::ProcessMap,
|
||||||
config::Configuration,
|
|
||||||
error::{Error, UploadError},
|
error::{Error, UploadError},
|
||||||
formats::InputProcessableFormat,
|
formats::InputProcessableFormat,
|
||||||
future::LocalBoxFuture,
|
future::LocalBoxFuture,
|
||||||
magick::ArcPolicyDir,
|
|
||||||
repo::{Alias, ArcRepo, DeleteToken, Hash, JobId, UploadId},
|
repo::{Alias, ArcRepo, DeleteToken, Hash, JobId, UploadId},
|
||||||
serde_str::Serde,
|
serde_str::Serde,
|
||||||
|
state::State,
|
||||||
store::Store,
|
store::Store,
|
||||||
tmp_file::ArcTmpDir,
|
|
||||||
};
|
};
|
||||||
use reqwest_middleware::ClientWithMiddleware;
|
|
||||||
use std::{
|
use std::{
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
|
@ -188,35 +186,12 @@ pub(crate) async fn queue_generate(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn process_cleanup<S: Store + 'static>(
|
pub(crate) async fn process_cleanup<S: Store + 'static>(state: State<S>) {
|
||||||
repo: ArcRepo,
|
process_jobs(state, CLEANUP_QUEUE, cleanup::perform).await
|
||||||
store: S,
|
|
||||||
config: Configuration,
|
|
||||||
) {
|
|
||||||
process_jobs(&repo, &store, &config, CLEANUP_QUEUE, cleanup::perform).await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn process_images<S: Store + 'static>(
|
pub(crate) async fn process_images<S: Store + 'static>(state: State<S>, process_map: ProcessMap) {
|
||||||
tmp_dir: ArcTmpDir,
|
process_image_jobs(state, process_map, PROCESS_QUEUE, process::perform).await
|
||||||
policy_dir: ArcPolicyDir,
|
|
||||||
repo: ArcRepo,
|
|
||||||
store: S,
|
|
||||||
client: ClientWithMiddleware,
|
|
||||||
process_map: ProcessMap,
|
|
||||||
config: Configuration,
|
|
||||||
) {
|
|
||||||
process_image_jobs(
|
|
||||||
&tmp_dir,
|
|
||||||
&policy_dir,
|
|
||||||
&repo,
|
|
||||||
&store,
|
|
||||||
&client,
|
|
||||||
&process_map,
|
|
||||||
&config,
|
|
||||||
PROCESS_QUEUE,
|
|
||||||
process::perform,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
}
|
||||||
|
|
||||||
struct MetricsGuard {
|
struct MetricsGuard {
|
||||||
|
@ -250,21 +225,10 @@ impl Drop for MetricsGuard {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn process_jobs<S, F>(
|
async fn process_jobs<S, F>(state: State<S>, queue: &'static str, callback: F)
|
||||||
repo: &ArcRepo,
|
where
|
||||||
store: &S,
|
|
||||||
config: &Configuration,
|
|
||||||
queue: &'static str,
|
|
||||||
callback: F,
|
|
||||||
) where
|
|
||||||
S: Store,
|
S: Store,
|
||||||
for<'a> F: Fn(
|
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>> + Copy,
|
||||||
&'a ArcRepo,
|
|
||||||
&'a S,
|
|
||||||
&'a Configuration,
|
|
||||||
serde_json::Value,
|
|
||||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
|
||||||
+ Copy,
|
|
||||||
{
|
{
|
||||||
let worker_id = uuid::Uuid::new_v4();
|
let worker_id = uuid::Uuid::new_v4();
|
||||||
|
|
||||||
|
@ -273,7 +237,7 @@ async fn process_jobs<S, F>(
|
||||||
|
|
||||||
tokio::task::yield_now().await;
|
tokio::task::yield_now().await;
|
||||||
|
|
||||||
let res = job_loop(repo, store, config, worker_id, queue, callback).await;
|
let res = job_loop(&state, worker_id, queue, callback).await;
|
||||||
|
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
tracing::warn!("Error processing jobs: {}", format!("{e}"));
|
tracing::warn!("Error processing jobs: {}", format!("{e}"));
|
||||||
|
@ -291,22 +255,14 @@ async fn process_jobs<S, F>(
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn job_loop<S, F>(
|
async fn job_loop<S, F>(
|
||||||
repo: &ArcRepo,
|
state: &State<S>,
|
||||||
store: &S,
|
|
||||||
config: &Configuration,
|
|
||||||
worker_id: uuid::Uuid,
|
worker_id: uuid::Uuid,
|
||||||
queue: &'static str,
|
queue: &'static str,
|
||||||
callback: F,
|
callback: F,
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
S: Store,
|
S: Store,
|
||||||
for<'a> F: Fn(
|
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>> + Copy,
|
||||||
&'a ArcRepo,
|
|
||||||
&'a S,
|
|
||||||
&'a Configuration,
|
|
||||||
serde_json::Value,
|
|
||||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
|
||||||
+ Copy,
|
|
||||||
{
|
{
|
||||||
loop {
|
loop {
|
||||||
tracing::trace!("job_loop: looping");
|
tracing::trace!("job_loop: looping");
|
||||||
|
@ -314,20 +270,20 @@ where
|
||||||
tokio::task::yield_now().await;
|
tokio::task::yield_now().await;
|
||||||
|
|
||||||
async {
|
async {
|
||||||
let (job_id, job) = repo.pop(queue, worker_id).await?;
|
let (job_id, job) = state.repo.pop(queue, worker_id).await?;
|
||||||
|
|
||||||
let guard = MetricsGuard::guard(worker_id, queue);
|
let guard = MetricsGuard::guard(worker_id, queue);
|
||||||
|
|
||||||
let res = heartbeat(
|
let res = heartbeat(
|
||||||
repo,
|
&state.repo,
|
||||||
queue,
|
queue,
|
||||||
worker_id,
|
worker_id,
|
||||||
job_id,
|
job_id,
|
||||||
(callback)(repo, store, config, job),
|
(callback)(state, job),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
repo.complete_job(queue, worker_id, job_id).await?;
|
state.repo.complete_job(queue, worker_id, job_id).await?;
|
||||||
|
|
||||||
res?;
|
res?;
|
||||||
|
|
||||||
|
@ -340,29 +296,14 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn process_image_jobs<S, F>(
|
async fn process_image_jobs<S, F>(
|
||||||
tmp_dir: &ArcTmpDir,
|
state: State<S>,
|
||||||
policy_dir: &ArcPolicyDir,
|
process_map: ProcessMap,
|
||||||
repo: &ArcRepo,
|
|
||||||
store: &S,
|
|
||||||
client: &ClientWithMiddleware,
|
|
||||||
process_map: &ProcessMap,
|
|
||||||
config: &Configuration,
|
|
||||||
queue: &'static str,
|
queue: &'static str,
|
||||||
callback: F,
|
callback: F,
|
||||||
) where
|
) where
|
||||||
S: Store,
|
S: Store,
|
||||||
for<'a> F: Fn(
|
for<'a> F: Fn(&'a State<S>, &'a ProcessMap, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||||
&'a ArcTmpDir,
|
|
||||||
&'a ArcPolicyDir,
|
|
||||||
&'a ArcRepo,
|
|
||||||
&'a S,
|
|
||||||
&'a ClientWithMiddleware,
|
|
||||||
&'a ProcessMap,
|
|
||||||
&'a Configuration,
|
|
||||||
serde_json::Value,
|
|
||||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
|
||||||
+ Copy,
|
+ Copy,
|
||||||
{
|
{
|
||||||
let worker_id = uuid::Uuid::new_v4();
|
let worker_id = uuid::Uuid::new_v4();
|
||||||
|
@ -372,19 +313,7 @@ async fn process_image_jobs<S, F>(
|
||||||
|
|
||||||
tokio::task::yield_now().await;
|
tokio::task::yield_now().await;
|
||||||
|
|
||||||
let res = image_job_loop(
|
let res = image_job_loop(&state, &process_map, worker_id, queue, callback).await;
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
store,
|
|
||||||
client,
|
|
||||||
process_map,
|
|
||||||
config,
|
|
||||||
worker_id,
|
|
||||||
queue,
|
|
||||||
callback,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
tracing::warn!("Error processing jobs: {}", format!("{e}"));
|
tracing::warn!("Error processing jobs: {}", format!("{e}"));
|
||||||
|
@ -401,31 +330,16 @@ async fn process_image_jobs<S, F>(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn image_job_loop<S, F>(
|
async fn image_job_loop<S, F>(
|
||||||
tmp_dir: &ArcTmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &ArcPolicyDir,
|
|
||||||
repo: &ArcRepo,
|
|
||||||
store: &S,
|
|
||||||
client: &ClientWithMiddleware,
|
|
||||||
process_map: &ProcessMap,
|
process_map: &ProcessMap,
|
||||||
config: &Configuration,
|
|
||||||
worker_id: uuid::Uuid,
|
worker_id: uuid::Uuid,
|
||||||
queue: &'static str,
|
queue: &'static str,
|
||||||
callback: F,
|
callback: F,
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
S: Store,
|
S: Store,
|
||||||
for<'a> F: Fn(
|
for<'a> F: Fn(&'a State<S>, &'a ProcessMap, serde_json::Value) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||||
&'a ArcTmpDir,
|
|
||||||
&'a ArcPolicyDir,
|
|
||||||
&'a ArcRepo,
|
|
||||||
&'a S,
|
|
||||||
&'a ClientWithMiddleware,
|
|
||||||
&'a ProcessMap,
|
|
||||||
&'a Configuration,
|
|
||||||
serde_json::Value,
|
|
||||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
|
||||||
+ Copy,
|
+ Copy,
|
||||||
{
|
{
|
||||||
loop {
|
loop {
|
||||||
|
@ -434,29 +348,20 @@ where
|
||||||
tokio::task::yield_now().await;
|
tokio::task::yield_now().await;
|
||||||
|
|
||||||
async {
|
async {
|
||||||
let (job_id, job) = repo.pop(queue, worker_id).await?;
|
let (job_id, job) = state.repo.pop(queue, worker_id).await?;
|
||||||
|
|
||||||
let guard = MetricsGuard::guard(worker_id, queue);
|
let guard = MetricsGuard::guard(worker_id, queue);
|
||||||
|
|
||||||
let res = heartbeat(
|
let res = heartbeat(
|
||||||
repo,
|
&state.repo,
|
||||||
queue,
|
queue,
|
||||||
worker_id,
|
worker_id,
|
||||||
job_id,
|
job_id,
|
||||||
(callback)(
|
(callback)(state, process_map, job),
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
store,
|
|
||||||
client,
|
|
||||||
process_map,
|
|
||||||
config,
|
|
||||||
job,
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
repo.complete_job(queue, worker_id, job_id).await?;
|
state.repo.complete_job(queue, worker_id, job_id).await?;
|
||||||
|
|
||||||
res?;
|
res?;
|
||||||
|
|
||||||
|
|
|
@ -10,41 +10,42 @@ use crate::{
|
||||||
queue::Cleanup,
|
queue::Cleanup,
|
||||||
repo::{Alias, ArcRepo, DeleteToken, Hash},
|
repo::{Alias, ArcRepo, DeleteToken, Hash},
|
||||||
serde_str::Serde,
|
serde_str::Serde,
|
||||||
|
state::State,
|
||||||
store::Store,
|
store::Store,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub(super) fn perform<'a, S>(
|
pub(super) fn perform<S>(
|
||||||
repo: &'a ArcRepo,
|
state: &State<S>,
|
||||||
store: &'a S,
|
|
||||||
configuration: &'a Configuration,
|
|
||||||
job: serde_json::Value,
|
job: serde_json::Value,
|
||||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
) -> LocalBoxFuture<'_, Result<(), Error>>
|
||||||
where
|
where
|
||||||
S: Store + 'static,
|
S: Store + 'static,
|
||||||
{
|
{
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
match serde_json::from_value(job) {
|
match serde_json::from_value(job) {
|
||||||
Ok(job) => match job {
|
Ok(job) => match job {
|
||||||
Cleanup::Hash { hash: in_hash } => hash(repo, in_hash).await?,
|
Cleanup::Hash { hash: in_hash } => hash(&state.repo, in_hash).await?,
|
||||||
Cleanup::Identifier {
|
Cleanup::Identifier {
|
||||||
identifier: in_identifier,
|
identifier: in_identifier,
|
||||||
} => identifier(repo, store, Arc::from(in_identifier)).await?,
|
} => identifier(&state.repo, &state.store, Arc::from(in_identifier)).await?,
|
||||||
Cleanup::Alias {
|
Cleanup::Alias {
|
||||||
alias: stored_alias,
|
alias: stored_alias,
|
||||||
token,
|
token,
|
||||||
} => {
|
} => {
|
||||||
alias(
|
alias(
|
||||||
repo,
|
&state.repo,
|
||||||
Serde::into_inner(stored_alias),
|
Serde::into_inner(stored_alias),
|
||||||
Serde::into_inner(token),
|
Serde::into_inner(token),
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
}
|
}
|
||||||
Cleanup::Variant { hash, variant } => hash_variant(repo, hash, variant).await?,
|
Cleanup::Variant { hash, variant } => {
|
||||||
Cleanup::AllVariants => all_variants(repo).await?,
|
hash_variant(&state.repo, hash, variant).await?
|
||||||
Cleanup::OutdatedVariants => outdated_variants(repo, configuration).await?,
|
}
|
||||||
Cleanup::OutdatedProxies => outdated_proxies(repo, configuration).await?,
|
Cleanup::AllVariants => all_variants(&state.repo).await?,
|
||||||
Cleanup::Prune => prune(repo, store).await?,
|
Cleanup::OutdatedVariants => outdated_variants(&state.repo, &state.config).await?,
|
||||||
|
Cleanup::OutdatedProxies => outdated_proxies(&state.repo, &state.config).await?,
|
||||||
|
Cleanup::Prune => prune(&state.repo, &state.store).await?,
|
||||||
},
|
},
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
tracing::warn!("Invalid job: {}", format!("{e}"));
|
tracing::warn!("Invalid job: {}", format!("{e}"));
|
||||||
|
|
|
@ -1,32 +1,23 @@
|
||||||
use reqwest_middleware::ClientWithMiddleware;
|
|
||||||
use time::Instant;
|
use time::Instant;
|
||||||
use tracing::{Instrument, Span};
|
use tracing::{Instrument, Span};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
concurrent_processor::ProcessMap,
|
concurrent_processor::ProcessMap,
|
||||||
config::Configuration,
|
|
||||||
error::{Error, UploadError},
|
error::{Error, UploadError},
|
||||||
formats::InputProcessableFormat,
|
formats::InputProcessableFormat,
|
||||||
future::LocalBoxFuture,
|
future::LocalBoxFuture,
|
||||||
ingest::Session,
|
ingest::Session,
|
||||||
magick::{ArcPolicyDir, PolicyDir},
|
|
||||||
queue::Process,
|
queue::Process,
|
||||||
repo::{Alias, ArcRepo, UploadId, UploadResult},
|
repo::{Alias, UploadId, UploadResult},
|
||||||
serde_str::Serde,
|
serde_str::Serde,
|
||||||
|
state::State,
|
||||||
store::Store,
|
store::Store,
|
||||||
tmp_file::{ArcTmpDir, TmpDir},
|
|
||||||
};
|
};
|
||||||
use std::{path::PathBuf, sync::Arc};
|
use std::{path::PathBuf, sync::Arc};
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
pub(super) fn perform<'a, S>(
|
pub(super) fn perform<'a, S>(
|
||||||
tmp_dir: &'a ArcTmpDir,
|
state: &'a State<S>,
|
||||||
policy_dir: &'a ArcPolicyDir,
|
|
||||||
repo: &'a ArcRepo,
|
|
||||||
store: &'a S,
|
|
||||||
client: &'a ClientWithMiddleware,
|
|
||||||
process_map: &'a ProcessMap,
|
process_map: &'a ProcessMap,
|
||||||
config: &'a Configuration,
|
|
||||||
job: serde_json::Value,
|
job: serde_json::Value,
|
||||||
) -> LocalBoxFuture<'a, Result<(), Error>>
|
) -> LocalBoxFuture<'a, Result<(), Error>>
|
||||||
where
|
where
|
||||||
|
@ -41,15 +32,10 @@ where
|
||||||
declared_alias,
|
declared_alias,
|
||||||
} => {
|
} => {
|
||||||
process_ingest(
|
process_ingest(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
store,
|
|
||||||
client,
|
|
||||||
Arc::from(identifier),
|
Arc::from(identifier),
|
||||||
Serde::into_inner(upload_id),
|
Serde::into_inner(upload_id),
|
||||||
declared_alias.map(Serde::into_inner),
|
declared_alias.map(Serde::into_inner),
|
||||||
config,
|
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
}
|
}
|
||||||
|
@ -60,16 +46,12 @@ where
|
||||||
process_args,
|
process_args,
|
||||||
} => {
|
} => {
|
||||||
generate(
|
generate(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
store,
|
|
||||||
process_map,
|
process_map,
|
||||||
target_format,
|
target_format,
|
||||||
Serde::into_inner(source),
|
Serde::into_inner(source),
|
||||||
process_path,
|
process_path,
|
||||||
process_args,
|
process_args,
|
||||||
config,
|
|
||||||
)
|
)
|
||||||
.await?
|
.await?
|
||||||
}
|
}
|
||||||
|
@ -117,18 +99,12 @@ impl Drop for UploadGuard {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[tracing::instrument(skip(state))]
|
||||||
#[tracing::instrument(skip(tmp_dir, policy_dir, repo, store, client, config))]
|
|
||||||
async fn process_ingest<S>(
|
async fn process_ingest<S>(
|
||||||
tmp_dir: &ArcTmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &ArcPolicyDir,
|
|
||||||
repo: &ArcRepo,
|
|
||||||
store: &S,
|
|
||||||
client: &ClientWithMiddleware,
|
|
||||||
unprocessed_identifier: Arc<str>,
|
unprocessed_identifier: Arc<str>,
|
||||||
upload_id: UploadId,
|
upload_id: UploadId,
|
||||||
declared_alias: Option<Alias>,
|
declared_alias: Option<Alias>,
|
||||||
config: &Configuration,
|
|
||||||
) -> Result<(), Error>
|
) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
S: Store + 'static,
|
S: Store + 'static,
|
||||||
|
@ -136,33 +112,19 @@ where
|
||||||
let guard = UploadGuard::guard(upload_id);
|
let guard = UploadGuard::guard(upload_id);
|
||||||
|
|
||||||
let fut = async {
|
let fut = async {
|
||||||
let tmp_dir = tmp_dir.clone();
|
|
||||||
let policy_dir = policy_dir.clone();
|
|
||||||
let ident = unprocessed_identifier.clone();
|
let ident = unprocessed_identifier.clone();
|
||||||
let store2 = store.clone();
|
let state2 = state.clone();
|
||||||
let repo = repo.clone();
|
|
||||||
let client = client.clone();
|
|
||||||
|
|
||||||
let current_span = Span::current();
|
let current_span = Span::current();
|
||||||
let span = tracing::info_span!(parent: current_span, "error_boundary");
|
let span = tracing::info_span!(parent: current_span, "error_boundary");
|
||||||
|
|
||||||
let config = config.clone();
|
|
||||||
let error_boundary = crate::sync::abort_on_drop(crate::sync::spawn(
|
let error_boundary = crate::sync::abort_on_drop(crate::sync::spawn(
|
||||||
"ingest-media",
|
"ingest-media",
|
||||||
async move {
|
async move {
|
||||||
let stream = crate::stream::from_err(store2.to_stream(&ident, None, None).await?);
|
let stream =
|
||||||
|
crate::stream::from_err(state2.store.to_stream(&ident, None, None).await?);
|
||||||
|
|
||||||
let session = crate::ingest::ingest(
|
let session = crate::ingest::ingest(&state2, stream, declared_alias).await?;
|
||||||
&tmp_dir,
|
|
||||||
&policy_dir,
|
|
||||||
&repo,
|
|
||||||
&store2,
|
|
||||||
&client,
|
|
||||||
stream,
|
|
||||||
declared_alias,
|
|
||||||
&config,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(session) as Result<Session, Error>
|
Ok(session) as Result<Session, Error>
|
||||||
}
|
}
|
||||||
|
@ -170,7 +132,7 @@ where
|
||||||
))
|
))
|
||||||
.await;
|
.await;
|
||||||
|
|
||||||
store.remove(&unprocessed_identifier).await?;
|
state.store.remove(&unprocessed_identifier).await?;
|
||||||
|
|
||||||
error_boundary.map_err(|_| UploadError::Canceled)?
|
error_boundary.map_err(|_| UploadError::Canceled)?
|
||||||
};
|
};
|
||||||
|
@ -191,62 +153,46 @@ where
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
repo.complete_upload(upload_id, result).await?;
|
state.repo.complete_upload(upload_id, result).await?;
|
||||||
|
|
||||||
guard.disarm();
|
guard.disarm();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[tracing::instrument(skip(state, process_map, process_path, process_args))]
|
||||||
#[tracing::instrument(skip(
|
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
store,
|
|
||||||
process_map,
|
|
||||||
process_path,
|
|
||||||
process_args,
|
|
||||||
config
|
|
||||||
))]
|
|
||||||
async fn generate<S: Store + 'static>(
|
async fn generate<S: Store + 'static>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
repo: &ArcRepo,
|
|
||||||
store: &S,
|
|
||||||
process_map: &ProcessMap,
|
process_map: &ProcessMap,
|
||||||
target_format: InputProcessableFormat,
|
target_format: InputProcessableFormat,
|
||||||
source: Alias,
|
source: Alias,
|
||||||
process_path: PathBuf,
|
process_path: PathBuf,
|
||||||
process_args: Vec<String>,
|
process_args: Vec<String>,
|
||||||
config: &Configuration,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let Some(hash) = repo.hash(&source).await? else {
|
let Some(hash) = state.repo.hash(&source).await? else {
|
||||||
// Nothing to do
|
// Nothing to do
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
let path_string = process_path.to_string_lossy().to_string();
|
let path_string = process_path.to_string_lossy().to_string();
|
||||||
let identifier_opt = repo.variant_identifier(hash.clone(), path_string).await?;
|
let identifier_opt = state
|
||||||
|
.repo
|
||||||
|
.variant_identifier(hash.clone(), path_string)
|
||||||
|
.await?;
|
||||||
|
|
||||||
if identifier_opt.is_some() {
|
if identifier_opt.is_some() {
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let original_details =
|
let original_details = crate::ensure_details(state, &source).await?;
|
||||||
crate::ensure_details(tmp_dir, policy_dir, repo, store, config, &source).await?;
|
|
||||||
|
|
||||||
crate::generate::generate(
|
crate::generate::generate(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
repo,
|
|
||||||
store,
|
|
||||||
process_map,
|
process_map,
|
||||||
target_format,
|
target_format,
|
||||||
process_path,
|
process_path,
|
||||||
process_args,
|
process_args,
|
||||||
&original_details,
|
&original_details,
|
||||||
config,
|
|
||||||
hash,
|
hash,
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
|
@ -7,17 +7,15 @@ use streem::IntoStreamer;
|
||||||
use tokio::{sync::Semaphore, task::JoinSet};
|
use tokio::{sync::Semaphore, task::JoinSet};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::Configuration,
|
|
||||||
details::Details,
|
details::Details,
|
||||||
error::{Error, UploadError},
|
error::{Error, UploadError},
|
||||||
magick::{ArcPolicyDir, PolicyDir},
|
|
||||||
repo::{ArcRepo, DeleteToken, Hash},
|
repo::{ArcRepo, DeleteToken, Hash},
|
||||||
repo_04::{
|
repo_04::{
|
||||||
AliasRepo as _, HashRepo as _, IdentifierRepo as _, SettingsRepo as _,
|
AliasRepo as _, HashRepo as _, IdentifierRepo as _, SettingsRepo as _,
|
||||||
SledRepo as OldSledRepo,
|
SledRepo as OldSledRepo,
|
||||||
},
|
},
|
||||||
|
state::State,
|
||||||
store::Store,
|
store::Store,
|
||||||
tmp_file::{ArcTmpDir, TmpDir},
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const GENERATOR_KEY: &str = "last-path";
|
const GENERATOR_KEY: &str = "last-path";
|
||||||
|
@ -80,23 +78,19 @@ pub(crate) async fn migrate_repo(old_repo: ArcRepo, new_repo: ArcRepo) -> Result
|
||||||
|
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
pub(crate) async fn migrate_04<S: Store + 'static>(
|
pub(crate) async fn migrate_04<S: Store + 'static>(
|
||||||
tmp_dir: ArcTmpDir,
|
|
||||||
policy_dir: ArcPolicyDir,
|
|
||||||
old_repo: OldSledRepo,
|
old_repo: OldSledRepo,
|
||||||
new_repo: ArcRepo,
|
state: State<S>,
|
||||||
store: S,
|
|
||||||
config: Configuration,
|
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
tracing::info!("Running checks");
|
tracing::info!("Running checks");
|
||||||
if let Err(e) = old_repo.health_check().await {
|
if let Err(e) = old_repo.health_check().await {
|
||||||
tracing::warn!("Old repo is not configured correctly");
|
tracing::warn!("Old repo is not configured correctly");
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
}
|
}
|
||||||
if let Err(e) = new_repo.health_check().await {
|
if let Err(e) = state.repo.health_check().await {
|
||||||
tracing::warn!("New repo is not configured correctly");
|
tracing::warn!("New repo is not configured correctly");
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
}
|
}
|
||||||
if let Err(e) = store.health_check().await {
|
if let Err(e) = state.store.health_check().await {
|
||||||
tracing::warn!("Store is not configured correctly");
|
tracing::warn!("Store is not configured correctly");
|
||||||
return Err(e.into());
|
return Err(e.into());
|
||||||
}
|
}
|
||||||
|
@ -116,19 +110,15 @@ pub(crate) async fn migrate_04<S: Store + 'static>(
|
||||||
|
|
||||||
if let Ok(hash) = res {
|
if let Ok(hash) = res {
|
||||||
set.spawn_local(migrate_hash_04(
|
set.spawn_local(migrate_hash_04(
|
||||||
tmp_dir.clone(),
|
|
||||||
policy_dir.clone(),
|
|
||||||
old_repo.clone(),
|
old_repo.clone(),
|
||||||
new_repo.clone(),
|
state.clone(),
|
||||||
store.clone(),
|
|
||||||
config.clone(),
|
|
||||||
hash.clone(),
|
hash.clone(),
|
||||||
));
|
));
|
||||||
} else {
|
} else {
|
||||||
tracing::warn!("Failed to read hash, skipping");
|
tracing::warn!("Failed to read hash, skipping");
|
||||||
}
|
}
|
||||||
|
|
||||||
while set.len() >= config.upgrade.concurrency {
|
while set.len() >= state.config.upgrade.concurrency {
|
||||||
tracing::trace!("migrate_04: join looping");
|
tracing::trace!("migrate_04: join looping");
|
||||||
|
|
||||||
if set.join_next().await.is_some() {
|
if set.join_next().await.is_some() {
|
||||||
|
@ -156,13 +146,15 @@ pub(crate) async fn migrate_04<S: Store + 'static>(
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(generator_state) = old_repo.get(GENERATOR_KEY).await? {
|
if let Some(generator_state) = old_repo.get(GENERATOR_KEY).await? {
|
||||||
new_repo
|
state
|
||||||
|
.repo
|
||||||
.set(GENERATOR_KEY, generator_state.to_vec().into())
|
.set(GENERATOR_KEY, generator_state.to_vec().into())
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(generator_state) = old_repo.get(crate::NOT_FOUND_KEY).await? {
|
if let Some(generator_state) = old_repo.get(crate::NOT_FOUND_KEY).await? {
|
||||||
new_repo
|
state
|
||||||
|
.repo
|
||||||
.set(crate::NOT_FOUND_KEY, generator_state.to_vec().into())
|
.set(crate::NOT_FOUND_KEY, generator_state.to_vec().into())
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
@ -193,28 +185,10 @@ async fn migrate_hash(old_repo: ArcRepo, new_repo: ArcRepo, hash: Hash) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn migrate_hash_04<S: Store>(
|
async fn migrate_hash_04<S: Store>(old_repo: OldSledRepo, state: State<S>, old_hash: sled::IVec) {
|
||||||
tmp_dir: ArcTmpDir,
|
|
||||||
policy_dir: ArcPolicyDir,
|
|
||||||
old_repo: OldSledRepo,
|
|
||||||
new_repo: ArcRepo,
|
|
||||||
store: S,
|
|
||||||
config: Configuration,
|
|
||||||
old_hash: sled::IVec,
|
|
||||||
) {
|
|
||||||
let mut hash_failures = 0;
|
let mut hash_failures = 0;
|
||||||
|
|
||||||
while let Err(e) = timed_migrate_hash_04(
|
while let Err(e) = timed_migrate_hash_04(&old_repo, &state, old_hash.clone()).await {
|
||||||
&tmp_dir,
|
|
||||||
&policy_dir,
|
|
||||||
&old_repo,
|
|
||||||
&new_repo,
|
|
||||||
&store,
|
|
||||||
&config,
|
|
||||||
old_hash.clone(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
hash_failures += 1;
|
hash_failures += 1;
|
||||||
|
|
||||||
if hash_failures > 10 {
|
if hash_failures > 10 {
|
||||||
|
@ -300,19 +274,13 @@ async fn do_migrate_hash(old_repo: &ArcRepo, new_repo: &ArcRepo, hash: Hash) ->
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn timed_migrate_hash_04<S: Store>(
|
async fn timed_migrate_hash_04<S: Store>(
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
old_repo: &OldSledRepo,
|
old_repo: &OldSledRepo,
|
||||||
new_repo: &ArcRepo,
|
state: &State<S>,
|
||||||
store: &S,
|
|
||||||
config: &Configuration,
|
|
||||||
old_hash: sled::IVec,
|
old_hash: sled::IVec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
tokio::time::timeout(
|
tokio::time::timeout(
|
||||||
Duration::from_secs(config.media.external_validation_timeout * 6),
|
Duration::from_secs(state.config.media.process_timeout * 6),
|
||||||
do_migrate_hash_04(
|
do_migrate_hash_04(old_repo, state, old_hash),
|
||||||
tmp_dir, policy_dir, old_repo, new_repo, store, config, old_hash,
|
|
||||||
),
|
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
.map_err(|_| UploadError::ProcessTimeout)?
|
.map_err(|_| UploadError::ProcessTimeout)?
|
||||||
|
@ -320,12 +288,8 @@ async fn timed_migrate_hash_04<S: Store>(
|
||||||
|
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
async fn do_migrate_hash_04<S: Store>(
|
async fn do_migrate_hash_04<S: Store>(
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
old_repo: &OldSledRepo,
|
old_repo: &OldSledRepo,
|
||||||
new_repo: &ArcRepo,
|
state: &State<S>,
|
||||||
store: &S,
|
|
||||||
config: &Configuration,
|
|
||||||
old_hash: sled::IVec,
|
old_hash: sled::IVec,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
let Some(identifier) = old_repo.identifier(old_hash.clone()).await? else {
|
let Some(identifier) = old_repo.identifier(old_hash.clone()).await? else {
|
||||||
|
@ -333,18 +297,9 @@ async fn do_migrate_hash_04<S: Store>(
|
||||||
return Ok(());
|
return Ok(());
|
||||||
};
|
};
|
||||||
|
|
||||||
let size = store.len(&identifier).await?;
|
let size = state.store.len(&identifier).await?;
|
||||||
|
|
||||||
let hash_details = set_details(
|
let hash_details = set_details(old_repo, state, &identifier).await?;
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
old_repo,
|
|
||||||
new_repo,
|
|
||||||
store,
|
|
||||||
config,
|
|
||||||
&identifier,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let aliases = old_repo.aliases_for_hash(old_hash.clone()).await?;
|
let aliases = old_repo.aliases_for_hash(old_hash.clone()).await?;
|
||||||
let variants = old_repo.variants(old_hash.clone()).await?;
|
let variants = old_repo.variants(old_hash.clone()).await?;
|
||||||
|
@ -354,7 +309,8 @@ async fn do_migrate_hash_04<S: Store>(
|
||||||
|
|
||||||
let hash = Hash::new(hash, size, hash_details.internal_format());
|
let hash = Hash::new(hash, size, hash_details.internal_format());
|
||||||
|
|
||||||
let _ = new_repo
|
let _ = state
|
||||||
|
.repo
|
||||||
.create_hash_with_timestamp(hash.clone(), &identifier, hash_details.created_at())
|
.create_hash_with_timestamp(hash.clone(), &identifier, hash_details.created_at())
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
@ -364,66 +320,45 @@ async fn do_migrate_hash_04<S: Store>(
|
||||||
.await?
|
.await?
|
||||||
.unwrap_or_else(DeleteToken::generate);
|
.unwrap_or_else(DeleteToken::generate);
|
||||||
|
|
||||||
let _ = new_repo
|
let _ = state
|
||||||
|
.repo
|
||||||
.create_alias(&alias, &delete_token, hash.clone())
|
.create_alias(&alias, &delete_token, hash.clone())
|
||||||
.await?;
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(identifier) = motion_identifier {
|
if let Some(identifier) = motion_identifier {
|
||||||
new_repo
|
state
|
||||||
|
.repo
|
||||||
.relate_motion_identifier(hash.clone(), &identifier)
|
.relate_motion_identifier(hash.clone(), &identifier)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
set_details(
|
set_details(old_repo, state, &identifier).await?;
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
old_repo,
|
|
||||||
new_repo,
|
|
||||||
store,
|
|
||||||
config,
|
|
||||||
&identifier,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (variant, identifier) in variants {
|
for (variant, identifier) in variants {
|
||||||
let _ = new_repo
|
let _ = state
|
||||||
|
.repo
|
||||||
.relate_variant_identifier(hash.clone(), variant.clone(), &identifier)
|
.relate_variant_identifier(hash.clone(), variant.clone(), &identifier)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
set_details(
|
set_details(old_repo, state, &identifier).await?;
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
old_repo,
|
|
||||||
new_repo,
|
|
||||||
store,
|
|
||||||
config,
|
|
||||||
&identifier,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
new_repo.accessed_variant(hash.clone(), variant).await?;
|
state.repo.accessed_variant(hash.clone(), variant).await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn set_details<S: Store>(
|
async fn set_details<S: Store>(
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
old_repo: &OldSledRepo,
|
old_repo: &OldSledRepo,
|
||||||
new_repo: &ArcRepo,
|
state: &State<S>,
|
||||||
store: &S,
|
|
||||||
config: &Configuration,
|
|
||||||
identifier: &Arc<str>,
|
identifier: &Arc<str>,
|
||||||
) -> Result<Details, Error> {
|
) -> Result<Details, Error> {
|
||||||
if let Some(details) = new_repo.details(identifier).await? {
|
if let Some(details) = state.repo.details(identifier).await? {
|
||||||
Ok(details)
|
Ok(details)
|
||||||
} else {
|
} else {
|
||||||
let details =
|
let details = fetch_or_generate_details(old_repo, state, identifier).await?;
|
||||||
fetch_or_generate_details(tmp_dir, policy_dir, old_repo, store, config, identifier)
|
state.repo.relate_details(identifier, &details).await?;
|
||||||
.await?;
|
|
||||||
new_repo.relate_details(identifier, &details).await?;
|
|
||||||
Ok(details)
|
Ok(details)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -442,11 +377,8 @@ fn details_semaphore() -> &'static Semaphore {
|
||||||
|
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
async fn fetch_or_generate_details<S: Store>(
|
async fn fetch_or_generate_details<S: Store>(
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
old_repo: &OldSledRepo,
|
old_repo: &OldSledRepo,
|
||||||
store: &S,
|
state: &State<S>,
|
||||||
config: &Configuration,
|
|
||||||
identifier: &Arc<str>,
|
identifier: &Arc<str>,
|
||||||
) -> Result<Details, Error> {
|
) -> Result<Details, Error> {
|
||||||
let details_opt = old_repo.details(identifier.clone()).await?;
|
let details_opt = old_repo.details(identifier.clone()).await?;
|
||||||
|
@ -454,12 +386,11 @@ async fn fetch_or_generate_details<S: Store>(
|
||||||
if let Some(details) = details_opt {
|
if let Some(details) = details_opt {
|
||||||
Ok(details)
|
Ok(details)
|
||||||
} else {
|
} else {
|
||||||
let bytes_stream = store.to_bytes(identifier, None, None).await?;
|
let bytes_stream = state.store.to_bytes(identifier, None, None).await?;
|
||||||
let bytes = bytes_stream.into_bytes();
|
let bytes = bytes_stream.into_bytes();
|
||||||
|
|
||||||
let guard = details_semaphore().acquire().await?;
|
let guard = details_semaphore().acquire().await?;
|
||||||
let details =
|
let details = Details::from_bytes(state, bytes).await?;
|
||||||
Details::from_bytes(tmp_dir, policy_dir, config.media.process_timeout, bytes).await?;
|
|
||||||
drop(guard);
|
drop(guard);
|
||||||
|
|
||||||
Ok(details)
|
Ok(details)
|
||||||
|
|
13
src/state.rs
Normal file
13
src/state.rs
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
use reqwest_middleware::ClientWithMiddleware;
|
||||||
|
|
||||||
|
use crate::{config::Configuration, magick::ArcPolicyDir, repo::ArcRepo, tmp_file::ArcTmpDir};
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct State<S> {
|
||||||
|
pub(super) config: Configuration,
|
||||||
|
pub(super) tmp_dir: ArcTmpDir,
|
||||||
|
pub(super) policy_dir: ArcPolicyDir,
|
||||||
|
pub(super) repo: ArcRepo,
|
||||||
|
pub(super) store: S,
|
||||||
|
pub(super) client: ClientWithMiddleware,
|
||||||
|
}
|
116
src/validate.rs
116
src/validate.rs
|
@ -8,11 +8,10 @@ use crate::{
|
||||||
error_code::ErrorCode,
|
error_code::ErrorCode,
|
||||||
formats::{
|
formats::{
|
||||||
AnimationFormat, AnimationOutput, ImageInput, ImageOutput, InputFile, InputVideoFormat,
|
AnimationFormat, AnimationOutput, ImageInput, ImageOutput, InputFile, InputVideoFormat,
|
||||||
InternalFormat, Validations,
|
InternalFormat,
|
||||||
},
|
},
|
||||||
magick::PolicyDir,
|
|
||||||
process::ProcessRead,
|
process::ProcessRead,
|
||||||
tmp_file::TmpDir,
|
state::State,
|
||||||
};
|
};
|
||||||
use actix_web::web::Bytes;
|
use actix_web::web::Bytes;
|
||||||
|
|
||||||
|
@ -57,12 +56,9 @@ impl ValidationError {
|
||||||
const MEGABYTES: usize = 1024 * 1024;
|
const MEGABYTES: usize = 1024 * 1024;
|
||||||
|
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
pub(crate) async fn validate_bytes(
|
pub(crate) async fn validate_bytes<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
validations: Validations<'_>,
|
|
||||||
timeout: u64,
|
|
||||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||||
if bytes.is_empty() {
|
if bytes.is_empty() {
|
||||||
return Err(ValidationError::Empty.into());
|
return Err(ValidationError::Empty.into());
|
||||||
|
@ -73,70 +69,39 @@ pub(crate) async fn validate_bytes(
|
||||||
width,
|
width,
|
||||||
height,
|
height,
|
||||||
frames,
|
frames,
|
||||||
} = crate::discover::discover_bytes(tmp_dir, policy_dir, timeout, bytes.clone()).await?;
|
} = crate::discover::discover_bytes(state, bytes.clone()).await?;
|
||||||
|
|
||||||
match &input {
|
match &input {
|
||||||
InputFile::Image(input) => {
|
InputFile::Image(input) => {
|
||||||
let (format, process_read) = process_image(
|
let (format, process_read) = process_image(state, bytes, *input, width, height).await?;
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
bytes,
|
|
||||||
*input,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
validations.image,
|
|
||||||
timeout,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok((format, process_read))
|
Ok((format, process_read))
|
||||||
}
|
}
|
||||||
InputFile::Animation(input) => {
|
InputFile::Animation(input) => {
|
||||||
let (format, process_read) = process_animation(
|
let (format, process_read) =
|
||||||
tmp_dir,
|
process_animation(state, bytes, *input, width, height, frames.unwrap_or(1)).await?;
|
||||||
policy_dir,
|
|
||||||
bytes,
|
|
||||||
*input,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
frames.unwrap_or(1),
|
|
||||||
validations.animation,
|
|
||||||
timeout,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok((format, process_read))
|
Ok((format, process_read))
|
||||||
}
|
}
|
||||||
InputFile::Video(input) => {
|
InputFile::Video(input) => {
|
||||||
let (format, process_read) = process_video(
|
let (format, process_read) =
|
||||||
tmp_dir,
|
process_video(state, bytes, *input, width, height, frames.unwrap_or(1)).await?;
|
||||||
bytes,
|
|
||||||
*input,
|
|
||||||
width,
|
|
||||||
height,
|
|
||||||
frames.unwrap_or(1),
|
|
||||||
validations.video,
|
|
||||||
timeout,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok((format, process_read))
|
Ok((format, process_read))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[tracing::instrument(skip(state, bytes), fields(len = bytes.len()))]
|
||||||
#[tracing::instrument(skip(tmp_dir, policy_dir, bytes, validations))]
|
async fn process_image<S>(
|
||||||
async fn process_image(
|
state: &State<S>,
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
input: ImageInput,
|
input: ImageInput,
|
||||||
width: u16,
|
width: u16,
|
||||||
height: u16,
|
height: u16,
|
||||||
validations: &crate::config::Image,
|
|
||||||
timeout: u64,
|
|
||||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||||
|
let validations = &state.config.media.image;
|
||||||
|
|
||||||
if width > validations.max_width {
|
if width > validations.max_width {
|
||||||
return Err(ValidationError::Width.into());
|
return Err(ValidationError::Width.into());
|
||||||
}
|
}
|
||||||
|
@ -158,18 +123,9 @@ async fn process_image(
|
||||||
let process_read = if needs_transcode {
|
let process_read = if needs_transcode {
|
||||||
let quality = validations.quality_for(format);
|
let quality = validations.quality_for(format);
|
||||||
|
|
||||||
magick::convert_image(
|
magick::convert_image(state, input.format, format, quality, bytes).await?
|
||||||
tmp_dir,
|
|
||||||
policy_dir,
|
|
||||||
input.format,
|
|
||||||
format,
|
|
||||||
quality,
|
|
||||||
timeout,
|
|
||||||
bytes,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
} else {
|
} else {
|
||||||
exiftool::clear_metadata_bytes_read(bytes, timeout)?
|
exiftool::clear_metadata_bytes_read(bytes, state.config.media.process_timeout)?
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok((InternalFormat::Image(format), process_read))
|
Ok((InternalFormat::Image(format), process_read))
|
||||||
|
@ -201,19 +157,17 @@ fn validate_animation(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[tracing::instrument(skip(state, bytes), fields(len = bytes.len()))]
|
||||||
#[tracing::instrument(skip(tmp_dir, policy_dir, bytes, validations))]
|
async fn process_animation<S>(
|
||||||
async fn process_animation(
|
state: &State<S>,
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
input: AnimationFormat,
|
input: AnimationFormat,
|
||||||
width: u16,
|
width: u16,
|
||||||
height: u16,
|
height: u16,
|
||||||
frames: u32,
|
frames: u32,
|
||||||
validations: &crate::config::Animation,
|
|
||||||
timeout: u64,
|
|
||||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||||
|
let validations = &state.config.media.animation;
|
||||||
|
|
||||||
validate_animation(bytes.len(), width, height, frames, validations)?;
|
validate_animation(bytes.len(), width, height, frames, validations)?;
|
||||||
|
|
||||||
let AnimationOutput {
|
let AnimationOutput {
|
||||||
|
@ -224,10 +178,9 @@ async fn process_animation(
|
||||||
let process_read = if needs_transcode {
|
let process_read = if needs_transcode {
|
||||||
let quality = validations.quality_for(format);
|
let quality = validations.quality_for(format);
|
||||||
|
|
||||||
magick::convert_animation(tmp_dir, policy_dir, input, format, quality, timeout, bytes)
|
magick::convert_animation(state, input, format, quality, bytes).await?
|
||||||
.await?
|
|
||||||
} else {
|
} else {
|
||||||
exiftool::clear_metadata_bytes_read(bytes, timeout)?
|
exiftool::clear_metadata_bytes_read(bytes, state.config.media.process_timeout)?
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok((InternalFormat::Animation(format), process_read))
|
Ok((InternalFormat::Animation(format), process_read))
|
||||||
|
@ -262,18 +215,17 @@ fn validate_video(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
#[tracing::instrument(skip(state, bytes), fields(len = bytes.len()))]
|
||||||
#[tracing::instrument(skip(tmp_dir, bytes, validations))]
|
async fn process_video<S>(
|
||||||
async fn process_video(
|
state: &State<S>,
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
input: InputVideoFormat,
|
input: InputVideoFormat,
|
||||||
width: u16,
|
width: u16,
|
||||||
height: u16,
|
height: u16,
|
||||||
frames: u32,
|
frames: u32,
|
||||||
validations: &crate::config::Video,
|
|
||||||
timeout: u64,
|
|
||||||
) -> Result<(InternalFormat, ProcessRead), Error> {
|
) -> Result<(InternalFormat, ProcessRead), Error> {
|
||||||
|
let validations = &state.config.media.video;
|
||||||
|
|
||||||
validate_video(bytes.len(), width, height, frames, validations)?;
|
validate_video(bytes.len(), width, height, frames, validations)?;
|
||||||
|
|
||||||
let output = input.build_output(
|
let output = input.build_output(
|
||||||
|
@ -284,7 +236,15 @@ async fn process_video(
|
||||||
|
|
||||||
let crf = validations.crf_for(width, height);
|
let crf = validations.crf_for(width, height);
|
||||||
|
|
||||||
let process_read = ffmpeg::transcode_bytes(tmp_dir, input, output, crf, timeout, bytes).await?;
|
let process_read = ffmpeg::transcode_bytes(
|
||||||
|
&state.tmp_dir,
|
||||||
|
input,
|
||||||
|
output,
|
||||||
|
crf,
|
||||||
|
state.config.media.process_timeout,
|
||||||
|
bytes,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
Ok((
|
Ok((
|
||||||
InternalFormat::Video(output.format.internal_format()),
|
InternalFormat::Video(output.format.internal_format()),
|
||||||
|
|
|
@ -4,72 +4,62 @@ use actix_web::web::Bytes;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
formats::{AnimationFormat, ImageFormat},
|
formats::{AnimationFormat, ImageFormat},
|
||||||
magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
magick::{MagickError, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
|
||||||
process::{Process, ProcessRead},
|
process::{Process, ProcessRead},
|
||||||
tmp_file::TmpDir,
|
state::State,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub(super) async fn convert_image(
|
pub(super) async fn convert_image<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
input: ImageFormat,
|
input: ImageFormat,
|
||||||
output: ImageFormat,
|
output: ImageFormat,
|
||||||
quality: Option<u8>,
|
quality: Option<u8>,
|
||||||
timeout: u64,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
) -> Result<ProcessRead, MagickError> {
|
) -> Result<ProcessRead, MagickError> {
|
||||||
convert(
|
convert(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
input.magick_format(),
|
input.magick_format(),
|
||||||
output.magick_format(),
|
output.magick_format(),
|
||||||
false,
|
false,
|
||||||
quality,
|
quality,
|
||||||
timeout,
|
|
||||||
bytes,
|
bytes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn convert_animation(
|
pub(super) async fn convert_animation<S>(
|
||||||
tmp_dir: &TmpDir,
|
state: &State<S>,
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
input: AnimationFormat,
|
input: AnimationFormat,
|
||||||
output: AnimationFormat,
|
output: AnimationFormat,
|
||||||
quality: Option<u8>,
|
quality: Option<u8>,
|
||||||
timeout: u64,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
) -> Result<ProcessRead, MagickError> {
|
) -> Result<ProcessRead, MagickError> {
|
||||||
convert(
|
convert(
|
||||||
tmp_dir,
|
state,
|
||||||
policy_dir,
|
|
||||||
input.magick_format(),
|
input.magick_format(),
|
||||||
output.magick_format(),
|
output.magick_format(),
|
||||||
true,
|
true,
|
||||||
quality,
|
quality,
|
||||||
timeout,
|
|
||||||
bytes,
|
bytes,
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
async fn convert<S>(
|
||||||
async fn convert(
|
state: &State<S>,
|
||||||
tmp_dir: &TmpDir,
|
|
||||||
policy_dir: &PolicyDir,
|
|
||||||
input: &'static str,
|
input: &'static str,
|
||||||
output: &'static str,
|
output: &'static str,
|
||||||
coalesce: bool,
|
coalesce: bool,
|
||||||
quality: Option<u8>,
|
quality: Option<u8>,
|
||||||
timeout: u64,
|
|
||||||
bytes: Bytes,
|
bytes: Bytes,
|
||||||
) -> Result<ProcessRead, MagickError> {
|
) -> Result<ProcessRead, MagickError> {
|
||||||
let temporary_path = tmp_dir
|
let temporary_path = state
|
||||||
|
.tmp_dir
|
||||||
.tmp_folder()
|
.tmp_folder()
|
||||||
.await
|
.await
|
||||||
.map_err(MagickError::CreateTemporaryDirectory)?;
|
.map_err(MagickError::CreateTemporaryDirectory)?;
|
||||||
|
|
||||||
let input_file = tmp_dir.tmp_file(None);
|
let input_file = state.tmp_dir.tmp_file(None);
|
||||||
|
|
||||||
crate::store::file_store::safe_create_parent(&input_file)
|
crate::store::file_store::safe_create_parent(&input_file)
|
||||||
.await
|
.await
|
||||||
|
@ -104,10 +94,10 @@ async fn convert(
|
||||||
|
|
||||||
let envs = [
|
let envs = [
|
||||||
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
|
||||||
(MAGICK_CONFIGURE_PATH, policy_dir.as_os_str()),
|
(MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
|
||||||
];
|
];
|
||||||
|
|
||||||
let reader = Process::run("magick", &args, &envs, timeout)?.read();
|
let reader = Process::run("magick", &args, &envs, state.config.media.process_timeout)?.read();
|
||||||
|
|
||||||
let clean_reader = reader.add_extras(input_file).add_extras(temporary_path);
|
let clean_reader = reader.add_extras(input_file).add_extras(temporary_path);
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue