2
0
Fork 0
mirror of https://git.asonix.dog/asonix/pict-rs synced 2024-12-22 19:31:35 +00:00

It compiles again

This commit is contained in:
asonix 2024-02-03 18:18:13 -06:00
parent f2410a9283
commit 50e31f96da
17 changed files with 240 additions and 451 deletions

View file

@ -3,6 +3,7 @@ use std::sync::Arc;
use crate::{ use crate::{
error::Error, error::Error,
repo::{ArcRepo, UploadId}, repo::{ArcRepo, UploadId},
state::State,
store::Store, store::Store,
}; };
use actix_web::web::Bytes; use actix_web::web::Bytes;
@ -30,23 +31,23 @@ impl Backgrounded {
self.identifier.as_ref() self.identifier.as_ref()
} }
pub(crate) async fn proxy<S, P>(repo: ArcRepo, store: S, stream: P) -> Result<Self, Error> pub(crate) async fn proxy<S, P>(state: &State<S>, stream: P) -> Result<Self, Error>
where where
S: Store, S: Store,
P: Stream<Item = Result<Bytes, Error>> + 'static, P: Stream<Item = Result<Bytes, Error>> + 'static,
{ {
let mut this = Self { let mut this = Self {
repo, repo: state.repo.clone(),
identifier: None, identifier: None,
upload_id: None, upload_id: None,
}; };
this.do_proxy(store, stream).await?; this.do_proxy(&state.store, stream).await?;
Ok(this) Ok(this)
} }
async fn do_proxy<S, P>(&mut self, store: S, stream: P) -> Result<(), Error> async fn do_proxy<S, P>(&mut self, store: &S, stream: P) -> Result<(), Error>
where where
S: Store, S: Store,
P: Stream<Item = Result<Bytes, Error>> + 'static, P: Stream<Item = Result<Bytes, Error>> + 'static,

View file

@ -4,6 +4,7 @@ use crate::{
formats::{InternalFormat, InternalVideoFormat}, formats::{InternalFormat, InternalVideoFormat},
magick::PolicyDir, magick::PolicyDir,
serde_str::Serde, serde_str::Serde,
state::State,
tmp_file::TmpDir, tmp_file::TmpDir,
}; };
use actix_web::web; use actix_web::web;
@ -81,18 +82,13 @@ impl Details {
} }
#[tracing::instrument(level = "debug", skip_all)] #[tracing::instrument(level = "debug", skip_all)]
pub(crate) async fn from_bytes( pub(crate) async fn from_bytes<S>(state: &State<S>, input: web::Bytes) -> Result<Self, Error> {
tmp_dir: &TmpDir,
policy_dir: &PolicyDir,
timeout: u64,
input: web::Bytes,
) -> Result<Self, Error> {
let Discovery { let Discovery {
input, input,
width, width,
height, height,
frames, frames,
} = crate::discover::discover_bytes(tmp_dir, policy_dir, timeout, input).await?; } = crate::discover::discover_bytes(state, input).await?;
Ok(Details::from_parts( Ok(Details::from_parts(
input.internal_format(), input.internal_format(),

View file

@ -4,7 +4,7 @@ mod magick;
use actix_web::web::Bytes; use actix_web::web::Bytes;
use crate::{formats::InputFile, magick::PolicyDir, tmp_file::TmpDir}; use crate::{formats::InputFile, magick::PolicyDir, state::State, tmp_file::TmpDir};
#[derive(Debug, PartialEq, Eq)] #[derive(Debug, PartialEq, Eq)]
pub(crate) struct Discovery { pub(crate) struct Discovery {
@ -27,18 +27,16 @@ pub(crate) enum DiscoverError {
} }
#[tracing::instrument(level = "trace", skip_all)] #[tracing::instrument(level = "trace", skip_all)]
pub(crate) async fn discover_bytes( pub(crate) async fn discover_bytes<S>(
tmp_dir: &TmpDir, state: &State<S>,
policy_dir: &PolicyDir,
timeout: u64,
bytes: Bytes, bytes: Bytes,
) -> Result<Discovery, crate::error::Error> { ) -> Result<Discovery, crate::error::Error> {
let discovery = ffmpeg::discover_bytes(tmp_dir, timeout, bytes.clone()).await?; let discovery = ffmpeg::discover_bytes(state, bytes.clone()).await?;
let discovery = magick::confirm_bytes(state, discovery, bytes.clone()).await?;
let discovery = let discovery =
magick::confirm_bytes(tmp_dir, policy_dir, discovery, timeout, bytes.clone()).await?; exiftool::check_reorient(discovery, bytes, state.config.media.process_timeout).await?;
let discovery = exiftool::check_reorient(discovery, timeout, bytes).await?;
Ok(discovery) Ok(discovery)
} }

View file

@ -16,8 +16,8 @@ pub(super) async fn check_reorient(
height, height,
frames, frames,
}: Discovery, }: Discovery,
timeout: u64,
bytes: Bytes, bytes: Bytes,
timeout: u64,
) -> Result<Discovery, ExifError> { ) -> Result<Discovery, ExifError> {
let input = match input { let input = match input {
InputFile::Image(ImageInput { format, .. }) => { InputFile::Image(ImageInput { format, .. }) => {

View file

@ -10,6 +10,7 @@ use crate::{
Mp4AudioCodec, Mp4Codec, WebmAlphaCodec, WebmAudioCodec, WebmCodec, Mp4AudioCodec, Mp4Codec, WebmAlphaCodec, WebmAudioCodec, WebmCodec,
}, },
process::Process, process::Process,
state::State,
tmp_file::TmpDir, tmp_file::TmpDir,
}; };
use actix_web::web::Bytes; use actix_web::web::Bytes;
@ -158,12 +159,11 @@ struct Flags {
} }
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
pub(super) async fn discover_bytes( pub(super) async fn discover_bytes<S>(
tmp_dir: &TmpDir, state: &State<S>,
timeout: u64,
bytes: Bytes, bytes: Bytes,
) -> Result<Option<Discovery>, FfMpegError> { ) -> Result<Option<Discovery>, FfMpegError> {
discover_file(tmp_dir, timeout, move |mut file| { discover_file(state, move |mut file| {
let bytes = bytes.clone(); let bytes = bytes.clone();
async move { async move {
@ -191,16 +191,12 @@ async fn allows_alpha(pixel_format: &str, timeout: u64) -> Result<bool, FfMpegEr
} }
#[tracing::instrument(level = "debug", skip_all)] #[tracing::instrument(level = "debug", skip_all)]
async fn discover_file<F, Fut>( async fn discover_file<S, F, Fut>(state: &State<S>, f: F) -> Result<Option<Discovery>, FfMpegError>
tmp_dir: &TmpDir,
timeout: u64,
f: F,
) -> Result<Option<Discovery>, FfMpegError>
where where
F: FnOnce(crate::file::File) -> Fut, F: FnOnce(crate::file::File) -> Fut,
Fut: std::future::Future<Output = Result<crate::file::File, FfMpegError>>, Fut: std::future::Future<Output = Result<crate::file::File, FfMpegError>>,
{ {
let input_file = tmp_dir.tmp_file(None); let input_file = state.tmp_dir.tmp_file(None);
crate::store::file_store::safe_create_parent(&input_file) crate::store::file_store::safe_create_parent(&input_file)
.await .await
.map_err(FfMpegError::CreateDir)?; .map_err(FfMpegError::CreateDir)?;
@ -226,7 +222,7 @@ where
input_file.as_os_str(), input_file.as_os_str(),
], ],
&[], &[],
timeout, state.config.media.process_timeout,
)? )?
.read() .read()
.into_vec() .into_vec()
@ -250,7 +246,7 @@ where
.. ..
}) = &mut discovery.input }) = &mut discovery.input
{ {
*alpha = allows_alpha(&pixel_format, timeout).await?; *alpha = allows_alpha(&pixel_format, state.config.media.process_timeout).await?;
} }
} }

View file

@ -8,6 +8,7 @@ use crate::{
formats::{AnimationFormat, ImageFormat, ImageInput, InputFile}, formats::{AnimationFormat, ImageFormat, ImageInput, InputFile},
magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH}, magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
process::Process, process::Process,
state::State,
tmp_file::TmpDir, tmp_file::TmpDir,
}; };
@ -31,11 +32,9 @@ struct Geometry {
} }
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
pub(super) async fn confirm_bytes( pub(super) async fn confirm_bytes<S>(
tmp_dir: &TmpDir, state: &State<S>,
policy_dir: &PolicyDir,
discovery: Option<Discovery>, discovery: Option<Discovery>,
timeout: u64,
bytes: Bytes, bytes: Bytes,
) -> Result<Discovery, MagickError> { ) -> Result<Discovery, MagickError> {
match discovery { match discovery {
@ -51,7 +50,7 @@ pub(super) async fn confirm_bytes(
} }
} }
discover_file(tmp_dir, policy_dir, timeout, move |mut file| async move { discover_file(state, move |mut file| async move {
file.write_from_bytes(bytes) file.write_from_bytes(bytes)
.await .await
.map_err(MagickError::Write)?; .map_err(MagickError::Write)?;
@ -62,22 +61,18 @@ pub(super) async fn confirm_bytes(
} }
#[tracing::instrument(level = "debug", skip_all)] #[tracing::instrument(level = "debug", skip_all)]
async fn discover_file<F, Fut>( async fn discover_file<S, F, Fut>(state: &State<S>, f: F) -> Result<Discovery, MagickError>
tmp_dir: &TmpDir,
policy_dir: &PolicyDir,
timeout: u64,
f: F,
) -> Result<Discovery, MagickError>
where where
F: FnOnce(crate::file::File) -> Fut, F: FnOnce(crate::file::File) -> Fut,
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>, Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
{ {
let temporary_path = tmp_dir let temporary_path = state
.tmp_dir
.tmp_folder() .tmp_folder()
.await .await
.map_err(MagickError::CreateTemporaryDirectory)?; .map_err(MagickError::CreateTemporaryDirectory)?;
let input_file = tmp_dir.tmp_file(None); let input_file = state.tmp_dir.tmp_file(None);
crate::store::file_store::safe_create_parent(&input_file) crate::store::file_store::safe_create_parent(&input_file)
.await .await
.map_err(MagickError::CreateDir)?; .map_err(MagickError::CreateDir)?;
@ -90,7 +85,7 @@ where
let envs = [ let envs = [
(MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()), (MAGICK_TEMPORARY_PATH, temporary_path.as_os_str()),
(MAGICK_CONFIGURE_PATH, policy_dir.as_os_str()), (MAGICK_CONFIGURE_PATH, state.policy_dir.as_os_str()),
]; ];
let res = Process::run( let res = Process::run(
@ -102,7 +97,7 @@ where
"JSON:".as_ref(), "JSON:".as_ref(),
], ],
&envs, &envs,
timeout, state.config.media.process_timeout,
)? )?
.read() .read()
.into_string() .into_string()

View file

@ -143,13 +143,7 @@ async fn process<S: Store + 'static>(
drop(permit); drop(permit);
let details = Details::from_bytes( let details = Details::from_bytes(state, bytes.clone()).await?;
&state.tmp_dir,
&state.policy_dir,
&state.config.media.process_timeout,
bytes.clone(),
)
.await?;
let identifier = state let identifier = state
.store .store
@ -214,14 +208,8 @@ where
let stream = state.store.to_stream(&identifier, None, None).await?; let stream = state.store.to_stream(&identifier, None, None).await?;
let reader = magick::thumbnail( let reader =
state, magick::thumbnail(state, stream, processable_format, thumbnail_format).await?;
stream,
processable_format,
ProcessableFormat::Image(thumbnail_format),
config.media.image.quality_for(thumbnail_format),
)
.await?;
(reader, thumbnail_format.media_type()) (reader, thumbnail_format.media_type())
} else { } else {
@ -234,14 +222,12 @@ where
}; };
let reader = ffmpeg::thumbnail( let reader = ffmpeg::thumbnail(
state.tmp_dir, &state,
state.store.clone(),
identifier, identifier,
original_details original_details
.video_format() .video_format()
.unwrap_or(InternalVideoFormat::Mp4), .unwrap_or(InternalVideoFormat::Mp4),
thumbnail_format, thumbnail_format,
state.config.media.process_timeout,
) )
.await?; .await?;

View file

@ -6,6 +6,7 @@ use crate::{
ffmpeg::FfMpegError, ffmpeg::FfMpegError,
formats::InternalVideoFormat, formats::InternalVideoFormat,
process::{Process, ProcessRead}, process::{Process, ProcessRead},
state::State,
store::Store, store::Store,
tmp_file::TmpDir, tmp_file::TmpDir,
}; };
@ -50,21 +51,19 @@ impl ThumbnailFormat {
} }
} }
#[tracing::instrument(skip(tmp_dir, store, timeout))] #[tracing::instrument(skip(state))]
pub(super) async fn thumbnail<S: Store>( pub(super) async fn thumbnail<S: Store>(
tmp_dir: &TmpDir, state: &State<S>,
store: S,
from: Arc<str>, from: Arc<str>,
input_format: InternalVideoFormat, input_format: InternalVideoFormat,
format: ThumbnailFormat, format: ThumbnailFormat,
timeout: u64,
) -> Result<ProcessRead, FfMpegError> { ) -> Result<ProcessRead, FfMpegError> {
let input_file = tmp_dir.tmp_file(Some(input_format.file_extension())); let input_file = state.tmp_dir.tmp_file(Some(input_format.file_extension()));
crate::store::file_store::safe_create_parent(&input_file) crate::store::file_store::safe_create_parent(&input_file)
.await .await
.map_err(FfMpegError::CreateDir)?; .map_err(FfMpegError::CreateDir)?;
let output_file = tmp_dir.tmp_file(Some(format.to_file_extension())); let output_file = state.tmp_dir.tmp_file(Some(format.to_file_extension()));
crate::store::file_store::safe_create_parent(&output_file) crate::store::file_store::safe_create_parent(&output_file)
.await .await
.map_err(FfMpegError::CreateDir)?; .map_err(FfMpegError::CreateDir)?;
@ -72,7 +71,8 @@ pub(super) async fn thumbnail<S: Store>(
let mut tmp_one = crate::file::File::create(&input_file) let mut tmp_one = crate::file::File::create(&input_file)
.await .await
.map_err(FfMpegError::CreateFile)?; .map_err(FfMpegError::CreateFile)?;
let stream = store let stream = state
.store
.to_stream(&from, None, None) .to_stream(&from, None, None)
.await .await
.map_err(FfMpegError::Store)?; .map_err(FfMpegError::Store)?;
@ -99,7 +99,7 @@ pub(super) async fn thumbnail<S: Store>(
output_file.as_os_str(), output_file.as_os_str(),
], ],
&[], &[],
timeout, state.config.media.process_timeout,
)?; )?;
let res = process.wait().await; let res = process.wait().await;

View file

@ -3,7 +3,7 @@ use std::ffi::OsStr;
use actix_web::web::Bytes; use actix_web::web::Bytes;
use crate::{ use crate::{
formats::ProcessableFormat, formats::{ImageFormat, ProcessableFormat},
magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH}, magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
process::{Process, ProcessRead}, process::{Process, ProcessRead},
state::State, state::State,
@ -14,14 +14,16 @@ use crate::{
async fn thumbnail_animation<S, F, Fut>( async fn thumbnail_animation<S, F, Fut>(
state: &State<S>, state: &State<S>,
input_format: ProcessableFormat, input_format: ProcessableFormat,
format: ProcessableFormat, thumbnail_format: ImageFormat,
quality: Option<u8>,
write_file: F, write_file: F,
) -> Result<ProcessRead, MagickError> ) -> Result<ProcessRead, MagickError>
where where
F: FnOnce(crate::file::File) -> Fut, F: FnOnce(crate::file::File) -> Fut,
Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>, Fut: std::future::Future<Output = Result<crate::file::File, MagickError>>,
{ {
let format = ProcessableFormat::Image(thumbnail_format);
let quality = state.config.media.image.quality_for(thumbnail_format);
let temporary_path = state let temporary_path = state
.tmp_dir .tmp_dir
.tmp_folder() .tmp_folder()
@ -77,14 +79,12 @@ pub(super) async fn thumbnail<S>(
state: &State<S>, state: &State<S>,
stream: LocalBoxStream<'static, std::io::Result<Bytes>>, stream: LocalBoxStream<'static, std::io::Result<Bytes>>,
input_format: ProcessableFormat, input_format: ProcessableFormat,
format: ProcessableFormat, thumbnail_format: ImageFormat,
quality: Option<u8>,
) -> Result<ProcessRead, MagickError> { ) -> Result<ProcessRead, MagickError> {
thumbnail_animation( thumbnail_animation(
state, state,
input_format, input_format,
format, thumbnail_format,
quality,
|mut tmp_file| async move { |mut tmp_file| async move {
tmp_file tmp_file
.write_from_stream(stream) .write_from_stream(stream)

View file

@ -107,9 +107,10 @@ where
let (hash_state, identifier) = process_read let (hash_state, identifier) = process_read
.with_stdout(|stdout| async move { .with_stdout(|stdout| async move {
let hasher_reader = Hasher::new(stdout); let hasher_reader = Hasher::new(stdout);
let state = hasher_reader.state(); let hash_state = hasher_reader.state();
store state
.store
.save_async_read(hasher_reader, input_type.media_type()) .save_async_read(hasher_reader, input_type.media_type())
.await .await
.map(move |identifier| (hash_state, identifier)) .map(move |identifier| (hash_state, identifier))
@ -117,13 +118,7 @@ where
.await??; .await??;
let bytes_stream = state.store.to_bytes(&identifier, None, None).await?; let bytes_stream = state.store.to_bytes(&identifier, None, None).await?;
let details = Details::from_bytes( let details = Details::from_bytes(state, bytes_stream.into_bytes()).await?;
tmp_dir,
policy_dir,
media.process_timeout,
bytes_stream.into_bytes(),
)
.await?;
drop(permit); drop(permit);
@ -153,7 +148,7 @@ where
let reader = Box::pin(tokio_util::io::StreamReader::new(stream)); let reader = Box::pin(tokio_util::io::StreamReader::new(stream));
let hasher_reader = Hasher::new(reader); let hasher_reader = Hasher::new(reader);
let state = hasher_reader.state(); let hash_state = hasher_reader.state();
let input_type = InternalFormat::Image(crate::formats::ImageFormat::Png); let input_type = InternalFormat::Image(crate::formats::ImageFormat::Png);
@ -164,7 +159,7 @@ where
let details = Details::danger_dummy(input_type); let details = Details::danger_dummy(input_type);
Ok((input_type, identifier, details, state)) Ok((input_type, identifier, details, hash_state))
} }
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
@ -192,7 +187,7 @@ where
}; };
if let Some(endpoint) = &state.config.media.external_validation { if let Some(endpoint) = &state.config.media.external_validation {
let stream = store.to_stream(&identifier, None, None).await?; let stream = state.store.to_stream(&identifier, None, None).await?;
let response = state let response = state
.client .client

View file

@ -138,13 +138,7 @@ async fn ensure_details_identifier<S: Store + 'static>(
tracing::debug!("generating new details from {:?}", identifier); tracing::debug!("generating new details from {:?}", identifier);
let bytes_stream = state.store.to_bytes(identifier, None, None).await?; let bytes_stream = state.store.to_bytes(identifier, None, None).await?;
let new_details = Details::from_bytes( let new_details = Details::from_bytes(state, bytes_stream.into_bytes()).await?;
&state.tmp_dir,
&state.policy_dir,
state.config.media.process_timeout,
bytes_stream.into_bytes(),
)
.await?;
tracing::debug!("storing details for {:?}", identifier); tracing::debug!("storing details for {:?}", identifier);
state.repo.relate_details(identifier, &new_details).await?; state.repo.relate_details(identifier, &new_details).await?;
tracing::debug!("stored"); tracing::debug!("stored");
@ -351,7 +345,7 @@ impl<S: Store + 'static> FormData for BackgroundedUpload<S> {
let stream = crate::stream::from_err(stream); let stream = crate::stream::from_err(stream);
Backgrounded::proxy(&state.repo, &state.store, stream).await Backgrounded::proxy(&state, stream).await
} }
.instrument(span), .instrument(span),
) )
@ -539,12 +533,12 @@ async fn do_download_backgrounded<S: Store + 'static>(
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
metrics::counter!("pict-rs.files", "download" => "background").increment(1); metrics::counter!("pict-rs.files", "download" => "background").increment(1);
let backgrounded = Backgrounded::proxy((**repo).clone(), (**store).clone(), stream).await?; let backgrounded = Backgrounded::proxy(&state, stream).await?;
let upload_id = backgrounded.upload_id().expect("Upload ID exists"); let upload_id = backgrounded.upload_id().expect("Upload ID exists");
let identifier = backgrounded.identifier().expect("Identifier exists"); let identifier = backgrounded.identifier().expect("Identifier exists");
queue::queue_ingest(&repo, identifier, upload_id, None).await?; queue::queue_ingest(&state.repo, identifier, upload_id, None).await?;
backgrounded.disarm(); backgrounded.disarm();
@ -611,7 +605,8 @@ async fn page<S>(
for hash in &page.hashes { for hash in &page.hashes {
let hex = hash.to_hex(); let hex = hash.to_hex();
let aliases = repo let aliases = state
.repo
.aliases_for_hash(hash.clone()) .aliases_for_hash(hash.clone())
.await? .await?
.into_iter() .into_iter()
@ -794,7 +789,7 @@ async fn process<S: Store + 'static>(
ProcessSource::Proxy { proxy } => { ProcessSource::Proxy { proxy } => {
let alias = if let Some(alias) = state.repo.related(proxy.clone()).await? { let alias = if let Some(alias) = state.repo.related(proxy.clone()).await? {
alias alias
} else if !config.server.read_only { } else if !state.config.server.read_only {
let stream = download_stream(proxy.as_str(), &state).await?; let stream = download_stream(proxy.as_str(), &state).await?;
let (alias, _, _) = ingest_inline(stream, &state).await?; let (alias, _, _) = ingest_inline(stream, &state).await?;
@ -836,7 +831,10 @@ async fn process<S: Store + 'static>(
.await?; .await?;
} }
let identifier_opt = repo.variant_identifier(hash.clone(), path_string).await?; let identifier_opt = state
.repo
.variant_identifier(hash.clone(), path_string)
.await?;
if let Some(identifier) = identifier_opt { if let Some(identifier) = identifier_opt {
let details = ensure_details_identifier(&state, &identifier).await?; let details = ensure_details_identifier(&state, &identifier).await?;
@ -850,7 +848,7 @@ async fn process<S: Store + 'static>(
return ranged_file_resp(&state.store, identifier, range, details, not_found).await; return ranged_file_resp(&state.store, identifier, range, details, not_found).await;
} }
if config.server.read_only { if state.config.server.read_only {
return Err(UploadError::ReadOnly.into()); return Err(UploadError::ReadOnly.into());
} }
@ -930,7 +928,9 @@ async fn process_head<S: Store + 'static>(
}; };
if !state.config.server.read_only { if !state.config.server.read_only {
repo.accessed_variant(hash.clone(), path_string.clone()) state
.repo
.accessed_variant(hash.clone(), path_string.clone())
.await?; .await?;
} }
@ -959,7 +959,7 @@ async fn process_head<S: Store + 'static>(
async fn process_backgrounded<S: Store>( async fn process_backgrounded<S: Store>(
web::Query(ProcessQuery { source, operations }): web::Query<ProcessQuery>, web::Query(ProcessQuery { source, operations }): web::Query<ProcessQuery>,
ext: web::Path<String>, ext: web::Path<String>,
state: web::Data<State<T>>, state: web::Data<State<S>>,
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let source = match source { let source = match source {
ProcessSource::Alias { alias } | ProcessSource::Source { src: alias } => { ProcessSource::Alias { alias } | ProcessSource::Source { src: alias } => {
@ -1123,7 +1123,7 @@ async fn do_serve<S: Store + 'static>(
async fn serve_query_head<S: Store + 'static>( async fn serve_query_head<S: Store + 'static>(
range: Option<web::Header<Range>>, range: Option<web::Header<Range>>,
web::Query(alias_query): web::Query<AliasQuery>, web::Query(alias_query): web::Query<AliasQuery>,
state: web::Data<State<T>>, state: web::Data<State<S>>,
) -> Result<HttpResponse, Error> { ) -> Result<HttpResponse, Error> {
let alias = match alias_query { let alias = match alias_query {
AliasQuery::Alias { alias } => Serde::into_inner(alias), AliasQuery::Alias { alias } => Serde::into_inner(alias),
@ -1547,10 +1547,12 @@ fn build_client() -> Result<ClientWithMiddleware, Error> {
fn configure_endpoints<S: Store + 'static, F: Fn(&mut web::ServiceConfig)>( fn configure_endpoints<S: Store + 'static, F: Fn(&mut web::ServiceConfig)>(
config: &mut web::ServiceConfig, config: &mut web::ServiceConfig,
state: State<S>, state: State<S>,
process_map: ProcessMap,
extra_config: F, extra_config: F,
) { ) {
config config
.app_data(web::Data::new(state)) .app_data(web::Data::new(state.clone()))
.app_data(web::Data::new(process_map.clone()))
.route("/healthz", web::get().to(healthz::<S>)) .route("/healthz", web::get().to(healthz::<S>))
.service( .service(
web::scope("/image") web::scope("/image")
@ -1613,9 +1615,7 @@ fn configure_endpoints<S: Store + 'static, F: Fn(&mut web::ServiceConfig)>(
) )
.service( .service(
web::scope("/internal") web::scope("/internal")
.wrap(Internal( .wrap(Internal(state.config.server.api_key.clone()))
state.config.server.api_key.as_ref().map(|s| s.to_owned()),
))
.service(web::resource("/import").route(web::post().to(import::<S>))) .service(web::resource("/import").route(web::post().to(import::<S>)))
.service(web::resource("/variants").route(web::delete().to(clean_variants::<S>))) .service(web::resource("/variants").route(web::delete().to(clean_variants::<S>)))
.service(web::resource("/purge").route(web::post().to(purge::<S>))) .service(web::resource("/purge").route(web::post().to(purge::<S>)))
@ -1623,13 +1623,13 @@ fn configure_endpoints<S: Store + 'static, F: Fn(&mut web::ServiceConfig)>(
.service(web::resource("/aliases").route(web::get().to(aliases::<S>))) .service(web::resource("/aliases").route(web::get().to(aliases::<S>)))
.service(web::resource("/identifier").route(web::get().to(identifier::<S>))) .service(web::resource("/identifier").route(web::get().to(identifier::<S>)))
.service(web::resource("/set_not_found").route(web::post().to(set_not_found::<S>))) .service(web::resource("/set_not_found").route(web::post().to(set_not_found::<S>)))
.service(web::resource("/hashes").route(web::get().to(page))) .service(web::resource("/hashes").route(web::get().to(page::<S>)))
.service(web::resource("/prune_missing").route(web::post().to(prune_missing::<S>))) .service(web::resource("/prune_missing").route(web::post().to(prune_missing::<S>)))
.configure(extra_config), .configure(extra_config),
); );
} }
fn spawn_cleanup(state: State<S>) { fn spawn_cleanup<S>(state: State<S>) {
if state.config.server.read_only { if state.config.server.read_only {
return; return;
} }
@ -1668,34 +1668,21 @@ where
} }
async fn launch_file_store<F: Fn(&mut web::ServiceConfig) + Send + Clone + 'static>( async fn launch_file_store<F: Fn(&mut web::ServiceConfig) + Send + Clone + 'static>(
tmp_dir: ArcTmpDir, state: State<FileStore>,
policy_dir: ArcPolicyDir,
repo: ArcRepo,
store: FileStore,
client: ClientWithMiddleware,
config: Configuration,
extra_config: F, extra_config: F,
) -> color_eyre::Result<()> { ) -> color_eyre::Result<()> {
let process_map = ProcessMap::new(); let process_map = ProcessMap::new();
let address = config.server.address; let address = state.config.server.address;
let tls = Tls::from_config(&state.config);
spawn_cleanup(state.clone()); spawn_cleanup(state.clone());
let tls = Tls::from_config(&config);
let state = State {
config,
tmp_dir,
policy_dir,
repo,
store,
client,
};
let server = HttpServer::new(move || { let server = HttpServer::new(move || {
let extra_config = extra_config.clone(); let extra_config = extra_config.clone();
let state = state.clone(); let state = state.clone();
let process_map = process_map.clone();
spawn_workers(state.clone(), process_map.clone()); spawn_workers(state.clone(), process_map.clone());
@ -1704,8 +1691,9 @@ async fn launch_file_store<F: Fn(&mut web::ServiceConfig) + Send + Clone + 'stat
.wrap(Deadline) .wrap(Deadline)
.wrap(Metrics) .wrap(Metrics)
.wrap(Payload::new()) .wrap(Payload::new())
.app_data(web::Data::new(process_map)) .configure(move |sc| {
.configure(move |sc| configure_endpoints(sc, state, extra_config)) configure_endpoints(sc, state.clone(), process_map.clone(), extra_config)
})
}); });
if let Some(tls) = tls { if let Some(tls) = tls {
@ -1748,34 +1736,21 @@ async fn launch_file_store<F: Fn(&mut web::ServiceConfig) + Send + Clone + 'stat
} }
async fn launch_object_store<F: Fn(&mut web::ServiceConfig) + Send + Clone + 'static>( async fn launch_object_store<F: Fn(&mut web::ServiceConfig) + Send + Clone + 'static>(
tmp_dir: ArcTmpDir, state: State<ObjectStore>,
policy_dir: ArcPolicyDir,
repo: ArcRepo,
store: ObjectStore,
client: ClientWithMiddleware,
config: Configuration,
extra_config: F, extra_config: F,
) -> color_eyre::Result<()> { ) -> color_eyre::Result<()> {
let process_map = ProcessMap::new(); let process_map = ProcessMap::new();
let address = config.server.address; let address = state.config.server.address;
let tls = Tls::from_config(&config); let tls = Tls::from_config(&state.config);
let state = State {
config: config.clone(),
tmp_dir: tmp_dir.clone(),
policy_dir: policy_dir.clone(),
repo: repo.clone(),
store: store.clone(),
client: client.clone(),
};
spawn_cleanup(state.clone()); spawn_cleanup(state.clone());
let server = HttpServer::new(move || { let server = HttpServer::new(move || {
let extra_config = extra_config.clone(); let extra_config = extra_config.clone();
let state = state.clone(); let state = state.clone();
let process_map = process_map.clone();
spawn_workers(state.clone(), process_map.clone()); spawn_workers(state.clone(), process_map.clone());
@ -1784,8 +1759,9 @@ async fn launch_object_store<F: Fn(&mut web::ServiceConfig) + Send + Clone + 'st
.wrap(Deadline) .wrap(Deadline)
.wrap(Metrics) .wrap(Metrics)
.wrap(Payload::new()) .wrap(Payload::new())
.app_data(web::Data::new(process_map)) .configure(move |sc| {
.configure(move |sc| configure_endpoints(sc, state, extra_config)) configure_endpoints(sc, state.clone(), process_map.clone(), extra_config)
})
}); });
if let Some(tls) = tls { if let Some(tls) = tls {
@ -1825,6 +1801,7 @@ async fn launch_object_store<F: Fn(&mut web::ServiceConfig) + Send + Clone + 'st
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn migrate_inner<S1>( async fn migrate_inner<S1>(
config: Configuration,
tmp_dir: ArcTmpDir, tmp_dir: ArcTmpDir,
policy_dir: ArcPolicyDir, policy_dir: ArcPolicyDir,
repo: ArcRepo, repo: ArcRepo,
@ -1832,7 +1809,6 @@ async fn migrate_inner<S1>(
from: S1, from: S1,
to: config::primitives::Store, to: config::primitives::Store,
skip_missing_files: bool, skip_missing_files: bool,
timeout: u64,
concurrency: usize, concurrency: usize,
) -> color_eyre::Result<()> ) -> color_eyre::Result<()>
where where
@ -1840,19 +1816,18 @@ where
{ {
match to { match to {
config::primitives::Store::Filesystem(config::Filesystem { path }) => { config::primitives::Store::Filesystem(config::Filesystem { path }) => {
let to = FileStore::build(path.clone(), repo.clone()).await?; let store = FileStore::build(path.clone(), repo.clone()).await?;
migrate_store( let to = State {
config,
tmp_dir, tmp_dir,
policy_dir, policy_dir,
repo, repo,
from, store,
to, client,
skip_missing_files, };
timeout,
concurrency, migrate_store(from, to, skip_missing_files, concurrency).await?
)
.await?
} }
config::primitives::Store::ObjectStorage(config::primitives::ObjectStorage { config::primitives::Store::ObjectStorage(config::primitives::ObjectStorage {
endpoint, endpoint,
@ -1866,7 +1841,7 @@ where
client_timeout, client_timeout,
public_endpoint, public_endpoint,
}) => { }) => {
let to = ObjectStore::build( let store = ObjectStore::build(
endpoint.clone(), endpoint.clone(),
bucket_name, bucket_name,
if use_path_style { if use_path_style {
@ -1884,19 +1859,18 @@ where
repo.clone(), repo.clone(),
) )
.await? .await?
.build(client); .build(client.clone());
migrate_store( let to = State {
config,
tmp_dir, tmp_dir,
policy_dir, policy_dir,
repo, repo,
from, store,
to, client,
skip_missing_files, };
timeout,
concurrency, migrate_store(from, to, skip_missing_files, concurrency).await?
)
.await?
} }
} }
@ -2066,6 +2040,7 @@ impl PictRsConfiguration {
config::primitives::Store::Filesystem(config::Filesystem { path }) => { config::primitives::Store::Filesystem(config::Filesystem { path }) => {
let from = FileStore::build(path.clone(), repo.clone()).await?; let from = FileStore::build(path.clone(), repo.clone()).await?;
migrate_inner( migrate_inner(
config,
tmp_dir, tmp_dir,
policy_dir, policy_dir,
repo, repo,
@ -2073,7 +2048,6 @@ impl PictRsConfiguration {
from, from,
to, to,
skip_missing_files, skip_missing_files,
config.media.process_timeout,
concurrency, concurrency,
) )
.await?; .await?;
@ -2113,6 +2087,7 @@ impl PictRsConfiguration {
.build(client.clone()); .build(client.clone());
migrate_inner( migrate_inner(
config,
tmp_dir, tmp_dir,
policy_dir, policy_dir,
repo, repo,
@ -2120,7 +2095,6 @@ impl PictRsConfiguration {
from, from,
to, to,
skip_missing_files, skip_missing_files,
config.media.process_timeout,
concurrency, concurrency,
) )
.await?; .await?;
@ -2150,18 +2124,19 @@ impl PictRsConfiguration {
let store = FileStore::build(path, arc_repo.clone()).await?; let store = FileStore::build(path, arc_repo.clone()).await?;
let state = State {
tmp_dir: tmp_dir.clone(),
policy_dir: policy_dir.clone(),
repo: arc_repo.clone(),
store: store.clone(),
config: config.clone(),
client: client.clone(),
};
if arc_repo.get("migrate-0.4").await?.is_none() { if arc_repo.get("migrate-0.4").await?.is_none() {
if let Some(path) = config.old_repo_path() { if let Some(path) = config.old_repo_path() {
if let Some(old_repo) = repo_04::open(path)? { if let Some(old_repo) = repo_04::open(path)? {
repo::migrate_04( repo::migrate_04(old_repo, state.clone()).await?;
tmp_dir.clone(),
policy_dir.clone(),
old_repo,
arc_repo.clone(),
store.clone(),
config.clone(),
)
.await?;
arc_repo arc_repo
.set("migrate-0.4", Arc::from(b"migrated".to_vec())) .set("migrate-0.4", Arc::from(b"migrated".to_vec()))
.await?; .await?;
@ -2171,28 +2146,13 @@ impl PictRsConfiguration {
match repo { match repo {
Repo::Sled(sled_repo) => { Repo::Sled(sled_repo) => {
launch_file_store( launch_file_store(state, move |sc| {
tmp_dir.clone(), sled_extra_config(sc, sled_repo.clone())
policy_dir.clone(), })
arc_repo,
store,
client,
config,
move |sc| sled_extra_config(sc, sled_repo.clone()),
)
.await?; .await?;
} }
Repo::Postgres(_) => { Repo::Postgres(_) => {
launch_file_store( launch_file_store(state, |_| {}).await?;
tmp_dir.clone(),
policy_dir.clone(),
arc_repo,
store,
client,
config,
|_| {},
)
.await?;
} }
} }
} }
@ -2230,18 +2190,19 @@ impl PictRsConfiguration {
.await? .await?
.build(client.clone()); .build(client.clone());
let state = State {
tmp_dir: tmp_dir.clone(),
policy_dir: policy_dir.clone(),
repo: arc_repo.clone(),
store: store.clone(),
config: config.clone(),
client: client.clone(),
};
if arc_repo.get("migrate-0.4").await?.is_none() { if arc_repo.get("migrate-0.4").await?.is_none() {
if let Some(path) = config.old_repo_path() { if let Some(path) = config.old_repo_path() {
if let Some(old_repo) = repo_04::open(path)? { if let Some(old_repo) = repo_04::open(path)? {
repo::migrate_04( repo::migrate_04(old_repo, state.clone()).await?;
tmp_dir.clone(),
policy_dir.clone(),
old_repo,
arc_repo.clone(),
store.clone(),
config.clone(),
)
.await?;
arc_repo arc_repo
.set("migrate-0.4", Arc::from(b"migrated".to_vec())) .set("migrate-0.4", Arc::from(b"migrated".to_vec()))
.await?; .await?;
@ -2251,28 +2212,13 @@ impl PictRsConfiguration {
match repo { match repo {
Repo::Sled(sled_repo) => { Repo::Sled(sled_repo) => {
launch_object_store( launch_object_store(state, move |sc| {
tmp_dir.clone(), sled_extra_config(sc, sled_repo.clone())
policy_dir.clone(), })
arc_repo,
store,
client,
config,
move |sc| sled_extra_config(sc, sled_repo.clone()),
)
.await?; .await?;
} }
Repo::Postgres(_) => { Repo::Postgres(_) => {
launch_object_store( launch_object_store(state, |_| {}).await?;
tmp_dir.clone(),
policy_dir.clone(),
arc_repo,
store,
client,
config,
|_| {},
)
.await?;
} }
} }
} }

View file

@ -7,6 +7,7 @@ use crate::{
error_code::ErrorCode, error_code::ErrorCode,
formats::ProcessableFormat, formats::ProcessableFormat,
process::{Process, ProcessError, ProcessRead}, process::{Process, ProcessError, ProcessRead},
state::State,
stream::LocalBoxStream, stream::LocalBoxStream,
tmp_file::{TmpDir, TmpFolder}, tmp_file::{TmpDir, TmpFolder},
}; };
@ -177,7 +178,7 @@ pub(crate) async fn process_image_stream_read<S>(
.await .await
} }
pub(crate) async fn process_image_process_read( pub(crate) async fn process_image_process_read<S>(
state: &State<S>, state: &State<S>,
process_read: ProcessRead, process_read: ProcessRead,
args: Vec<String>, args: Vec<String>,

View file

@ -14,19 +14,16 @@ use crate::{
error::{Error, UploadError}, error::{Error, UploadError},
magick::{ArcPolicyDir, PolicyDir}, magick::{ArcPolicyDir, PolicyDir},
repo::{ArcRepo, Hash}, repo::{ArcRepo, Hash},
state::State,
store::Store, store::Store,
tmp_file::{ArcTmpDir, TmpDir}, tmp_file::{ArcTmpDir, TmpDir},
}; };
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
pub(super) async fn migrate_store<S1, S2>( pub(super) async fn migrate_store<S1, S2>(
tmp_dir: ArcTmpDir,
policy_dir: ArcPolicyDir,
repo: ArcRepo,
from: S1, from: S1,
to: S2, to: State<S2>,
skip_missing_files: bool, skip_missing_files: bool,
timeout: u64,
concurrency: usize, concurrency: usize,
) -> Result<(), Error> ) -> Result<(), Error>
where where
@ -39,7 +36,7 @@ where
tracing::warn!("Old store is not configured correctly"); tracing::warn!("Old store is not configured correctly");
return Err(e.into()); return Err(e.into());
} }
if let Err(e) = to.health_check().await { if let Err(e) = to.repo.health_check().await {
tracing::warn!("New store is not configured correctly"); tracing::warn!("New store is not configured correctly");
return Err(e.into()); return Err(e.into());
} }
@ -48,17 +45,8 @@ where
let mut failure_count = 0; let mut failure_count = 0;
while let Err(e) = do_migrate_store( while let Err(e) =
tmp_dir.clone(), do_migrate_store(from.clone(), to.clone(), skip_missing_files, concurrency).await
policy_dir.clone(),
repo.clone(),
from.clone(),
to.clone(),
skip_missing_files,
timeout,
concurrency,
)
.await
{ {
tracing::error!("Migration failed with {}", format!("{e:?}")); tracing::error!("Migration failed with {}", format!("{e:?}"));
@ -78,11 +66,8 @@ where
} }
struct MigrateState<S1, S2> { struct MigrateState<S1, S2> {
tmp_dir: ArcTmpDir,
policy_dir: ArcPolicyDir,
repo: ArcRepo,
from: S1, from: S1,
to: S2, to: State<S2>,
continuing_migration: bool, continuing_migration: bool,
skip_missing_files: bool, skip_missing_files: bool,
initial_repo_size: u64, initial_repo_size: u64,
@ -90,26 +75,21 @@ struct MigrateState<S1, S2> {
pct: AtomicU64, pct: AtomicU64,
index: AtomicU64, index: AtomicU64,
started_at: Instant, started_at: Instant,
timeout: u64,
} }
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn do_migrate_store<S1, S2>( async fn do_migrate_store<S1, S2>(
tmp_dir: ArcTmpDir,
policy_dir: ArcPolicyDir,
repo: ArcRepo,
from: S1, from: S1,
to: S2, to: State<S2>,
skip_missing_files: bool, skip_missing_files: bool,
timeout: u64,
concurrency: usize, concurrency: usize,
) -> Result<(), Error> ) -> Result<(), Error>
where where
S1: Store + 'static, S1: Store + 'static,
S2: Store + 'static, S2: Store + 'static,
{ {
let continuing_migration = repo.is_continuing_migration().await?; let continuing_migration = to.repo.is_continuing_migration().await?;
let initial_repo_size = repo.size().await?; let initial_repo_size = to.repo.size().await?;
if continuing_migration { if continuing_migration {
tracing::warn!("Continuing previous migration of {initial_repo_size} total hashes"); tracing::warn!("Continuing previous migration of {initial_repo_size} total hashes");
@ -122,15 +102,12 @@ where
} }
// Hashes are read in a consistent order // Hashes are read in a consistent order
let stream = std::pin::pin!(repo.hashes()); let stream = std::pin::pin!(to.repo.hashes());
let mut stream = stream.into_streamer(); let mut stream = stream.into_streamer();
let state = Rc::new(MigrateState { let state = Rc::new(MigrateState {
tmp_dir: tmp_dir.clone(),
policy_dir: policy_dir.clone(),
repo: repo.clone(),
from, from,
to, to: to.clone(),
continuing_migration, continuing_migration,
skip_missing_files, skip_missing_files,
initial_repo_size, initial_repo_size,
@ -138,7 +115,6 @@ where
pct: AtomicU64::new(initial_repo_size / 100), pct: AtomicU64::new(initial_repo_size / 100),
index: AtomicU64::new(0), index: AtomicU64::new(0),
started_at: Instant::now(), started_at: Instant::now(),
timeout,
}); });
let mut joinset = tokio::task::JoinSet::new(); let mut joinset = tokio::task::JoinSet::new();
@ -165,7 +141,7 @@ where
} }
// clean up the migration table to avoid interfering with future migrations // clean up the migration table to avoid interfering with future migrations
repo.clear().await?; to.repo.clear().await?;
tracing::warn!("Migration completed successfully"); tracing::warn!("Migration completed successfully");
@ -179,9 +155,6 @@ where
S2: Store, S2: Store,
{ {
let MigrateState { let MigrateState {
tmp_dir,
policy_dir,
repo,
from, from,
to, to,
continuing_migration, continuing_migration,
@ -191,24 +164,23 @@ where
pct, pct,
index, index,
started_at, started_at,
timeout,
} = state; } = state;
let current_index = index.fetch_add(1, Ordering::Relaxed); let current_index = index.fetch_add(1, Ordering::Relaxed);
let original_identifier = match repo.identifier(hash.clone()).await { let original_identifier = match to.repo.identifier(hash.clone()).await {
Ok(Some(identifier)) => identifier, Ok(Some(identifier)) => identifier,
Ok(None) => { Ok(None) => {
tracing::warn!( tracing::warn!(
"Original File identifier for hash {hash:?} is missing, queue cleanup task", "Original File identifier for hash {hash:?} is missing, queue cleanup task",
); );
crate::queue::cleanup_hash(repo, hash.clone()).await?; crate::queue::cleanup_hash(&to.repo, hash.clone()).await?;
return Ok(()); return Ok(());
} }
Err(e) => return Err(e.into()), Err(e) => return Err(e.into()),
}; };
if repo.is_migrated(&original_identifier).await? { if to.repo.is_migrated(&original_identifier).await? {
// migrated original for hash - this means we can skip // migrated original for hash - this means we can skip
return Ok(()); return Ok(());
} }
@ -241,26 +213,16 @@ where
} }
} }
if let Some(identifier) = repo.motion_identifier(hash.clone()).await? { if let Some(identifier) = to.repo.motion_identifier(hash.clone()).await? {
if !repo.is_migrated(&identifier).await? { if !to.repo.is_migrated(&identifier).await? {
match migrate_file( match migrate_file(from, to, &identifier, *skip_missing_files).await {
tmp_dir,
policy_dir,
repo,
from,
to,
&identifier,
*skip_missing_files,
*timeout,
)
.await
{
Ok(new_identifier) => { Ok(new_identifier) => {
migrate_details(repo, &identifier, &new_identifier).await?; migrate_details(&to.repo, &identifier, &new_identifier).await?;
repo.relate_motion_identifier(hash.clone(), &new_identifier) to.repo
.relate_motion_identifier(hash.clone(), &new_identifier)
.await?; .await?;
repo.mark_migrated(&identifier, &new_identifier).await?; to.repo.mark_migrated(&identifier, &new_identifier).await?;
} }
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => { Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
tracing::warn!("Skipping motion file for hash {hash:?}"); tracing::warn!("Skipping motion file for hash {hash:?}");
@ -281,28 +243,20 @@ where
} }
} }
for (variant, identifier) in repo.variants(hash.clone()).await? { for (variant, identifier) in to.repo.variants(hash.clone()).await? {
if !repo.is_migrated(&identifier).await? { if !to.repo.is_migrated(&identifier).await? {
match migrate_file( match migrate_file(from, to, &identifier, *skip_missing_files).await {
tmp_dir,
policy_dir,
repo,
from,
to,
&identifier,
*skip_missing_files,
*timeout,
)
.await
{
Ok(new_identifier) => { Ok(new_identifier) => {
migrate_details(repo, &identifier, &new_identifier).await?; migrate_details(&to.repo, &identifier, &new_identifier).await?;
repo.remove_variant(hash.clone(), variant.clone()).await?; to.repo
let _ = repo .remove_variant(hash.clone(), variant.clone())
.await?;
let _ = to
.repo
.relate_variant_identifier(hash.clone(), variant, &new_identifier) .relate_variant_identifier(hash.clone(), variant, &new_identifier)
.await?; .await?;
repo.mark_migrated(&identifier, &new_identifier).await?; to.repo.mark_migrated(&identifier, &new_identifier).await?;
} }
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => { Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
tracing::warn!("Skipping variant {variant} for hash {hash:?}",); tracing::warn!("Skipping variant {variant} for hash {hash:?}",);
@ -323,23 +277,14 @@ where
} }
} }
match migrate_file( match migrate_file(from, to, &original_identifier, *skip_missing_files).await {
tmp_dir,
policy_dir,
repo,
from,
to,
&original_identifier,
*skip_missing_files,
*timeout,
)
.await
{
Ok(new_identifier) => { Ok(new_identifier) => {
migrate_details(repo, &original_identifier, &new_identifier).await?; migrate_details(&to.repo, &original_identifier, &new_identifier).await?;
repo.update_identifier(hash.clone(), &new_identifier) to.repo
.update_identifier(hash.clone(), &new_identifier)
.await?; .await?;
repo.mark_migrated(&original_identifier, &new_identifier) to.repo
.mark_migrated(&original_identifier, &new_identifier)
.await?; .await?;
} }
Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => { Err(MigrateError::From(e)) if e.is_not_found() && *skip_missing_files => {
@ -385,14 +330,10 @@ where
#[allow(clippy::too_many_arguments)] #[allow(clippy::too_many_arguments)]
async fn migrate_file<S1, S2>( async fn migrate_file<S1, S2>(
tmp_dir: &TmpDir,
policy_dir: &PolicyDir,
repo: &ArcRepo,
from: &S1, from: &S1,
to: &S2, to: &State<S2>,
identifier: &Arc<str>, identifier: &Arc<str>,
skip_missing_files: bool, skip_missing_files: bool,
timeout: u64,
) -> Result<Arc<str>, MigrateError> ) -> Result<Arc<str>, MigrateError>
where where
S1: Store, S1: Store,
@ -403,7 +344,7 @@ where
loop { loop {
tracing::trace!("migrate_file: looping"); tracing::trace!("migrate_file: looping");
match do_migrate_file(tmp_dir, policy_dir, repo, from, to, identifier, timeout).await { match do_migrate_file(from, to, identifier).await {
Ok(identifier) => return Ok(identifier), Ok(identifier) => return Ok(identifier),
Err(MigrateError::From(e)) if e.is_not_found() && skip_missing_files => { Err(MigrateError::From(e)) if e.is_not_found() && skip_missing_files => {
return Err(MigrateError::From(e)); return Err(MigrateError::From(e));
@ -432,13 +373,9 @@ enum MigrateError {
} }
async fn do_migrate_file<S1, S2>( async fn do_migrate_file<S1, S2>(
tmp_dir: &TmpDir,
policy_dir: &PolicyDir,
repo: &ArcRepo,
from: &S1, from: &S1,
to: &S2, to: &State<S2>,
identifier: &Arc<str>, identifier: &Arc<str>,
timeout: u64,
) -> Result<Arc<str>, MigrateError> ) -> Result<Arc<str>, MigrateError>
where where
S1: Store, S1: Store,
@ -449,7 +386,8 @@ where
.await .await
.map_err(MigrateError::From)?; .map_err(MigrateError::From)?;
let details_opt = repo let details_opt = to
.repo
.details(identifier) .details(identifier)
.await .await
.map_err(Error::from) .map_err(Error::from)
@ -463,11 +401,11 @@ where
.await .await
.map_err(From::from) .map_err(From::from)
.map_err(MigrateError::Details)?; .map_err(MigrateError::Details)?;
let new_details = let new_details = Details::from_bytes(to, bytes_stream.into_bytes())
Details::from_bytes(tmp_dir, policy_dir, timeout, bytes_stream.into_bytes()) .await
.await .map_err(MigrateError::Details)?;
.map_err(MigrateError::Details)?; to.repo
repo.relate_details(identifier, &new_details) .relate_details(identifier, &new_details)
.await .await
.map_err(Error::from) .map_err(Error::from)
.map_err(MigrateError::Details)?; .map_err(MigrateError::Details)?;
@ -475,6 +413,7 @@ where
}; };
let new_identifier = to let new_identifier = to
.store
.save_stream(stream, details.media_type()) .save_stream(stream, details.media_type())
.await .await
.map_err(MigrateError::To)?; .map_err(MigrateError::To)?;

View file

@ -116,6 +116,7 @@ where
let guard = UploadGuard::guard(upload_id); let guard = UploadGuard::guard(upload_id);
let fut = async { let fut = async {
let ident = unprocessed_identifier.clone();
let state2 = state.clone(); let state2 = state.clone();
let current_span = Span::current(); let current_span = Span::current();

View file

@ -16,6 +16,7 @@ use crate::{
AliasRepo as _, HashRepo as _, IdentifierRepo as _, SettingsRepo as _, AliasRepo as _, HashRepo as _, IdentifierRepo as _, SettingsRepo as _,
SledRepo as OldSledRepo, SledRepo as OldSledRepo,
}, },
state::State,
store::Store, store::Store,
tmp_file::{ArcTmpDir, TmpDir}, tmp_file::{ArcTmpDir, TmpDir},
}; };
@ -80,23 +81,19 @@ pub(crate) async fn migrate_repo(old_repo: ArcRepo, new_repo: ArcRepo) -> Result
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
pub(crate) async fn migrate_04<S: Store + 'static>( pub(crate) async fn migrate_04<S: Store + 'static>(
tmp_dir: ArcTmpDir,
policy_dir: ArcPolicyDir,
old_repo: OldSledRepo, old_repo: OldSledRepo,
new_repo: ArcRepo, state: State<S>,
store: S,
config: Configuration,
) -> Result<(), Error> { ) -> Result<(), Error> {
tracing::info!("Running checks"); tracing::info!("Running checks");
if let Err(e) = old_repo.health_check().await { if let Err(e) = old_repo.health_check().await {
tracing::warn!("Old repo is not configured correctly"); tracing::warn!("Old repo is not configured correctly");
return Err(e.into()); return Err(e.into());
} }
if let Err(e) = new_repo.health_check().await { if let Err(e) = state.repo.health_check().await {
tracing::warn!("New repo is not configured correctly"); tracing::warn!("New repo is not configured correctly");
return Err(e.into()); return Err(e.into());
} }
if let Err(e) = store.health_check().await { if let Err(e) = state.store.health_check().await {
tracing::warn!("Store is not configured correctly"); tracing::warn!("Store is not configured correctly");
return Err(e.into()); return Err(e.into());
} }
@ -116,19 +113,15 @@ pub(crate) async fn migrate_04<S: Store + 'static>(
if let Ok(hash) = res { if let Ok(hash) = res {
set.spawn_local(migrate_hash_04( set.spawn_local(migrate_hash_04(
tmp_dir.clone(),
policy_dir.clone(),
old_repo.clone(), old_repo.clone(),
new_repo.clone(), state.clone(),
store.clone(),
config.clone(),
hash.clone(), hash.clone(),
)); ));
} else { } else {
tracing::warn!("Failed to read hash, skipping"); tracing::warn!("Failed to read hash, skipping");
} }
while set.len() >= config.upgrade.concurrency { while set.len() >= state.config.upgrade.concurrency {
tracing::trace!("migrate_04: join looping"); tracing::trace!("migrate_04: join looping");
if set.join_next().await.is_some() { if set.join_next().await.is_some() {
@ -156,13 +149,15 @@ pub(crate) async fn migrate_04<S: Store + 'static>(
} }
if let Some(generator_state) = old_repo.get(GENERATOR_KEY).await? { if let Some(generator_state) = old_repo.get(GENERATOR_KEY).await? {
new_repo state
.repo
.set(GENERATOR_KEY, generator_state.to_vec().into()) .set(GENERATOR_KEY, generator_state.to_vec().into())
.await?; .await?;
} }
if let Some(generator_state) = old_repo.get(crate::NOT_FOUND_KEY).await? { if let Some(generator_state) = old_repo.get(crate::NOT_FOUND_KEY).await? {
new_repo state
.repo
.set(crate::NOT_FOUND_KEY, generator_state.to_vec().into()) .set(crate::NOT_FOUND_KEY, generator_state.to_vec().into())
.await?; .await?;
} }
@ -193,28 +188,10 @@ async fn migrate_hash(old_repo: ArcRepo, new_repo: ArcRepo, hash: Hash) {
} }
} }
async fn migrate_hash_04<S: Store>( async fn migrate_hash_04<S: Store>(old_repo: OldSledRepo, state: State<S>, old_hash: sled::IVec) {
tmp_dir: ArcTmpDir,
policy_dir: ArcPolicyDir,
old_repo: OldSledRepo,
new_repo: ArcRepo,
store: S,
config: Configuration,
old_hash: sled::IVec,
) {
let mut hash_failures = 0; let mut hash_failures = 0;
while let Err(e) = timed_migrate_hash_04( while let Err(e) = timed_migrate_hash_04(&old_repo, &state, old_hash.clone()).await {
&tmp_dir,
&policy_dir,
&old_repo,
&new_repo,
&store,
&config,
old_hash.clone(),
)
.await
{
hash_failures += 1; hash_failures += 1;
if hash_failures > 10 { if hash_failures > 10 {
@ -300,19 +277,13 @@ async fn do_migrate_hash(old_repo: &ArcRepo, new_repo: &ArcRepo, hash: Hash) ->
} }
async fn timed_migrate_hash_04<S: Store>( async fn timed_migrate_hash_04<S: Store>(
tmp_dir: &TmpDir,
policy_dir: &PolicyDir,
old_repo: &OldSledRepo, old_repo: &OldSledRepo,
new_repo: &ArcRepo, state: &State<S>,
store: &S,
config: &Configuration,
old_hash: sled::IVec, old_hash: sled::IVec,
) -> Result<(), Error> { ) -> Result<(), Error> {
tokio::time::timeout( tokio::time::timeout(
Duration::from_secs(config.media.external_validation_timeout * 6), Duration::from_secs(state.config.media.process_timeout * 6),
do_migrate_hash_04( do_migrate_hash_04(old_repo, state, old_hash),
tmp_dir, policy_dir, old_repo, new_repo, store, config, old_hash,
),
) )
.await .await
.map_err(|_| UploadError::ProcessTimeout)? .map_err(|_| UploadError::ProcessTimeout)?
@ -320,12 +291,8 @@ async fn timed_migrate_hash_04<S: Store>(
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
async fn do_migrate_hash_04<S: Store>( async fn do_migrate_hash_04<S: Store>(
tmp_dir: &TmpDir,
policy_dir: &PolicyDir,
old_repo: &OldSledRepo, old_repo: &OldSledRepo,
new_repo: &ArcRepo, state: &State<S>,
store: &S,
config: &Configuration,
old_hash: sled::IVec, old_hash: sled::IVec,
) -> Result<(), Error> { ) -> Result<(), Error> {
let Some(identifier) = old_repo.identifier(old_hash.clone()).await? else { let Some(identifier) = old_repo.identifier(old_hash.clone()).await? else {
@ -333,18 +300,9 @@ async fn do_migrate_hash_04<S: Store>(
return Ok(()); return Ok(());
}; };
let size = store.len(&identifier).await?; let size = state.store.len(&identifier).await?;
let hash_details = set_details( let hash_details = set_details(old_repo, state, &identifier).await?;
tmp_dir,
policy_dir,
old_repo,
new_repo,
store,
config,
&identifier,
)
.await?;
let aliases = old_repo.aliases_for_hash(old_hash.clone()).await?; let aliases = old_repo.aliases_for_hash(old_hash.clone()).await?;
let variants = old_repo.variants(old_hash.clone()).await?; let variants = old_repo.variants(old_hash.clone()).await?;
@ -354,7 +312,8 @@ async fn do_migrate_hash_04<S: Store>(
let hash = Hash::new(hash, size, hash_details.internal_format()); let hash = Hash::new(hash, size, hash_details.internal_format());
let _ = new_repo let _ = state
.repo
.create_hash_with_timestamp(hash.clone(), &identifier, hash_details.created_at()) .create_hash_with_timestamp(hash.clone(), &identifier, hash_details.created_at())
.await?; .await?;
@ -364,66 +323,45 @@ async fn do_migrate_hash_04<S: Store>(
.await? .await?
.unwrap_or_else(DeleteToken::generate); .unwrap_or_else(DeleteToken::generate);
let _ = new_repo let _ = state
.repo
.create_alias(&alias, &delete_token, hash.clone()) .create_alias(&alias, &delete_token, hash.clone())
.await?; .await?;
} }
if let Some(identifier) = motion_identifier { if let Some(identifier) = motion_identifier {
new_repo state
.repo
.relate_motion_identifier(hash.clone(), &identifier) .relate_motion_identifier(hash.clone(), &identifier)
.await?; .await?;
set_details( set_details(old_repo, state, &identifier).await?;
tmp_dir,
policy_dir,
old_repo,
new_repo,
store,
config,
&identifier,
)
.await?;
} }
for (variant, identifier) in variants { for (variant, identifier) in variants {
let _ = new_repo let _ = state
.repo
.relate_variant_identifier(hash.clone(), variant.clone(), &identifier) .relate_variant_identifier(hash.clone(), variant.clone(), &identifier)
.await?; .await?;
set_details( set_details(old_repo, state, &identifier).await?;
tmp_dir,
policy_dir,
old_repo,
new_repo,
store,
config,
&identifier,
)
.await?;
new_repo.accessed_variant(hash.clone(), variant).await?; state.repo.accessed_variant(hash.clone(), variant).await?;
} }
Ok(()) Ok(())
} }
async fn set_details<S: Store>( async fn set_details<S: Store>(
tmp_dir: &TmpDir,
policy_dir: &PolicyDir,
old_repo: &OldSledRepo, old_repo: &OldSledRepo,
new_repo: &ArcRepo, state: &State<S>,
store: &S,
config: &Configuration,
identifier: &Arc<str>, identifier: &Arc<str>,
) -> Result<Details, Error> { ) -> Result<Details, Error> {
if let Some(details) = new_repo.details(identifier).await? { if let Some(details) = state.repo.details(identifier).await? {
Ok(details) Ok(details)
} else { } else {
let details = let details = fetch_or_generate_details(old_repo, state, identifier).await?;
fetch_or_generate_details(tmp_dir, policy_dir, old_repo, store, config, identifier) state.repo.relate_details(identifier, &details).await?;
.await?;
new_repo.relate_details(identifier, &details).await?;
Ok(details) Ok(details)
} }
} }
@ -442,11 +380,8 @@ fn details_semaphore() -> &'static Semaphore {
#[tracing::instrument(skip_all)] #[tracing::instrument(skip_all)]
async fn fetch_or_generate_details<S: Store>( async fn fetch_or_generate_details<S: Store>(
tmp_dir: &TmpDir,
policy_dir: &PolicyDir,
old_repo: &OldSledRepo, old_repo: &OldSledRepo,
store: &S, state: &State<S>,
config: &Configuration,
identifier: &Arc<str>, identifier: &Arc<str>,
) -> Result<Details, Error> { ) -> Result<Details, Error> {
let details_opt = old_repo.details(identifier.clone()).await?; let details_opt = old_repo.details(identifier.clone()).await?;
@ -454,12 +389,11 @@ async fn fetch_or_generate_details<S: Store>(
if let Some(details) = details_opt { if let Some(details) = details_opt {
Ok(details) Ok(details)
} else { } else {
let bytes_stream = store.to_bytes(identifier, None, None).await?; let bytes_stream = state.store.to_bytes(identifier, None, None).await?;
let bytes = bytes_stream.into_bytes(); let bytes = bytes_stream.into_bytes();
let guard = details_semaphore().acquire().await?; let guard = details_semaphore().acquire().await?;
let details = let details = Details::from_bytes(state, bytes).await?;
Details::from_bytes(tmp_dir, policy_dir, config.media.process_timeout, bytes).await?;
drop(guard); drop(guard);
Ok(details) Ok(details)

View file

@ -71,7 +71,7 @@ pub(crate) async fn validate_bytes<S>(
width, width,
height, height,
frames, frames,
} = crate::discover::discover_bytes(tmp_dir, policy_dir, timeout, bytes.clone()).await?; } = crate::discover::discover_bytes(state, bytes.clone()).await?;
match &input { match &input {
InputFile::Image(input) => { InputFile::Image(input) => {
@ -127,7 +127,7 @@ async fn process_image<S>(
magick::convert_image(state, input.format, format, quality, bytes).await? magick::convert_image(state, input.format, format, quality, bytes).await?
} else { } else {
exiftool::clear_metadata_bytes_read(bytes, timeout)? exiftool::clear_metadata_bytes_read(bytes, state.config.media.process_timeout)?
}; };
Ok((InternalFormat::Image(format), process_read)) Ok((InternalFormat::Image(format), process_read))
@ -160,7 +160,7 @@ fn validate_animation(
} }
#[tracing::instrument(skip(state, bytes))] #[tracing::instrument(skip(state, bytes))]
async fn process_animation( async fn process_animation<S>(
state: &State<S>, state: &State<S>,
bytes: Bytes, bytes: Bytes,
input: AnimationFormat, input: AnimationFormat,

View file

@ -6,6 +6,7 @@ use crate::{
formats::{AnimationFormat, ImageFormat}, formats::{AnimationFormat, ImageFormat},
magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH}, magick::{MagickError, PolicyDir, MAGICK_CONFIGURE_PATH, MAGICK_TEMPORARY_PATH},
process::{Process, ProcessRead}, process::{Process, ProcessRead},
state::State,
tmp_file::TmpDir, tmp_file::TmpDir,
}; };
@ -40,12 +41,12 @@ pub(super) async fn convert_animation<S>(
output.magick_format(), output.magick_format(),
true, true,
quality, quality,
timeout, bytes,
) )
.await .await
} }
async fn convert( async fn convert<S>(
state: &State<S>, state: &State<S>,
input: &'static str, input: &'static str,
output: &'static str, output: &'static str,