mirror of
https://git.asonix.dog/asonix/pict-rs
synced 2024-12-22 11:21:24 +00:00
Enable multi-threaded job processing
This commit is contained in:
parent
602d1ea935
commit
8226a3571d
5 changed files with 120 additions and 101 deletions
|
@ -1,5 +1,6 @@
|
||||||
[server]
|
[server]
|
||||||
address = '0.0.0.0:8080'
|
address = '0.0.0.0:8080'
|
||||||
|
worker_id = 'pict-rs-1'
|
||||||
[tracing.logging]
|
[tracing.logging]
|
||||||
format = 'normal'
|
format = 'normal'
|
||||||
targets = 'warn,tracing_actix_web=info,actix_server=info,actix_web=info'
|
targets = 'warn,tracing_actix_web=info,actix_server=info,actix_web=info'
|
||||||
|
|
1
dev.toml
1
dev.toml
|
@ -1,5 +1,6 @@
|
||||||
[server]
|
[server]
|
||||||
address = '0.0.0.0:8080'
|
address = '0.0.0.0:8080'
|
||||||
|
worker_id = 'pict-rs-1'
|
||||||
[tracing.logging]
|
[tracing.logging]
|
||||||
format = 'normal'
|
format = 'normal'
|
||||||
targets = 'warn,tracing_actix_web=info,actix_server=info,actix_web=info'
|
targets = 'warn,tracing_actix_web=info,actix_server=info,actix_web=info'
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
[server]
|
[server]
|
||||||
address = '0.0.0.0:8080'
|
address = '0.0.0.0:8080'
|
||||||
|
worker_id = 'pict-rs-1'
|
||||||
[tracing.logging]
|
[tracing.logging]
|
||||||
format = 'normal'
|
format = 'normal'
|
||||||
targets = 'warn,tracing_actix_web=info,actix_server=info,actix_web=info'
|
targets = 'warn,tracing_actix_web=info,actix_server=info,actix_web=info'
|
||||||
|
|
69
src/main.rs
69
src/main.rs
|
@ -15,11 +15,12 @@ use std::{
|
||||||
future::ready,
|
future::ready,
|
||||||
path::PathBuf,
|
path::PathBuf,
|
||||||
pin::Pin,
|
pin::Pin,
|
||||||
|
sync::atomic::{AtomicU64, Ordering},
|
||||||
task::{Context, Poll},
|
task::{Context, Poll},
|
||||||
time::SystemTime,
|
time::SystemTime,
|
||||||
};
|
};
|
||||||
use tokio::{io::AsyncReadExt, sync::Semaphore};
|
use tokio::{io::AsyncReadExt, sync::Semaphore};
|
||||||
use tracing::{debug, error, info, instrument, Span};
|
use tracing::{debug, error, info, instrument};
|
||||||
use tracing_actix_web::TracingLogger;
|
use tracing_actix_web::TracingLogger;
|
||||||
use tracing_awc::Tracing;
|
use tracing_awc::Tracing;
|
||||||
use tracing_futures::Instrument;
|
use tracing_futures::Instrument;
|
||||||
|
@ -382,38 +383,11 @@ async fn process<S: Store + 'static>(
|
||||||
|
|
||||||
let details = Details::from_bytes(bytes.clone(), format.as_hint()).await?;
|
let details = Details::from_bytes(bytes.clone(), format.as_hint()).await?;
|
||||||
|
|
||||||
let save_span = tracing::info_span!(
|
let identifier = store.save_bytes(bytes.clone()).await?;
|
||||||
parent: None,
|
manager.store_details(&identifier, &details).await?;
|
||||||
"Saving variant information",
|
manager
|
||||||
path = tracing::field::debug(&thumbnail_path),
|
.store_variant(&alias, &thumbnail_path, &identifier)
|
||||||
name = tracing::field::display(&alias),
|
.await?;
|
||||||
);
|
|
||||||
save_span.follows_from(Span::current());
|
|
||||||
let details2 = details.clone();
|
|
||||||
let bytes2 = bytes.clone();
|
|
||||||
let alias2 = alias.clone();
|
|
||||||
actix_rt::spawn(
|
|
||||||
async move {
|
|
||||||
let identifier = match store.save_bytes(bytes2).await {
|
|
||||||
Ok(identifier) => identifier,
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!("Failed to generate directory path: {}", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
if let Err(e) = manager.store_details(&identifier, &details2).await {
|
|
||||||
tracing::warn!("Error saving variant details: {}", e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if let Err(e) = manager
|
|
||||||
.store_variant(&alias2, &thumbnail_path, &identifier)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
tracing::warn!("Error saving variant info: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
.instrument(save_span),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok((details, bytes)) as Result<(Details, web::Bytes), Error>
|
Ok((details, bytes)) as Result<(Details, web::Bytes), Error>
|
||||||
};
|
};
|
||||||
|
@ -632,18 +606,18 @@ fn build_reqwest_client() -> reqwest::Result<reqwest::Client> {
|
||||||
.build()
|
.build()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn next_worker_id() -> String {
|
||||||
|
static WORKER_ID: AtomicU64 = AtomicU64::new(0);
|
||||||
|
|
||||||
|
let next_id = WORKER_ID.fetch_add(1, Ordering::Relaxed);
|
||||||
|
|
||||||
|
format!("{}-{}", CONFIG.server.worker_id, next_id)
|
||||||
|
}
|
||||||
|
|
||||||
async fn launch<S: Store + Clone + 'static>(
|
async fn launch<S: Store + Clone + 'static>(
|
||||||
manager: UploadManager,
|
manager: UploadManager,
|
||||||
store: S,
|
store: S,
|
||||||
) -> color_eyre::Result<()> {
|
) -> color_eyre::Result<()> {
|
||||||
let repo = manager.repo().clone();
|
|
||||||
|
|
||||||
actix_rt::spawn(queue::process_jobs(
|
|
||||||
repo,
|
|
||||||
store.clone(),
|
|
||||||
CONFIG.server.worker_id.as_bytes().to_vec(),
|
|
||||||
));
|
|
||||||
|
|
||||||
// Create a new Multipart Form validator
|
// Create a new Multipart Form validator
|
||||||
//
|
//
|
||||||
// This form is expecting a single array field, 'images' with at most 10 files in it
|
// This form is expecting a single array field, 'images' with at most 10 files in it
|
||||||
|
@ -717,11 +691,20 @@ async fn launch<S: Store + Clone + 'static>(
|
||||||
);
|
);
|
||||||
|
|
||||||
HttpServer::new(move || {
|
HttpServer::new(move || {
|
||||||
|
let manager = manager.clone();
|
||||||
|
let store = store.clone();
|
||||||
|
|
||||||
|
actix_rt::spawn(queue::process_jobs(
|
||||||
|
manager.repo().clone(),
|
||||||
|
store.clone(),
|
||||||
|
next_worker_id(),
|
||||||
|
));
|
||||||
|
|
||||||
App::new()
|
App::new()
|
||||||
.wrap(TracingLogger::default())
|
.wrap(TracingLogger::default())
|
||||||
.wrap(Deadline)
|
.wrap(Deadline)
|
||||||
.app_data(web::Data::new(store.clone()))
|
.app_data(web::Data::new(store))
|
||||||
.app_data(web::Data::new(manager.clone()))
|
.app_data(web::Data::new(manager))
|
||||||
.app_data(web::Data::new(build_client()))
|
.app_data(web::Data::new(build_client()))
|
||||||
.app_data(web::Data::new(CONFIG.media.filters.clone()))
|
.app_data(web::Data::new(CONFIG.media.filters.clone()))
|
||||||
.service(
|
.service(
|
||||||
|
|
149
src/queue.rs
149
src/queue.rs
|
@ -1,103 +1,100 @@
|
||||||
use crate::{
|
use crate::{
|
||||||
error::Error,
|
error::Error,
|
||||||
repo::{AliasRepo, HashRepo, IdentifierRepo, QueueRepo, Repo},
|
repo::{AliasRepo, HashRepo, IdentifierRepo, QueueRepo, Repo},
|
||||||
store::Store,
|
store::{Identifier, Store},
|
||||||
};
|
};
|
||||||
use tracing::{debug, error, Span};
|
use tracing::{debug, error};
|
||||||
|
|
||||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||||
enum Job {
|
enum Job {
|
||||||
Cleanup { hash: Vec<u8> },
|
CleanupHash { hash: Vec<u8> },
|
||||||
|
CleanupIdentifier { identifier: Vec<u8> },
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn queue_cleanup<R: QueueRepo>(repo: &R, hash: R::Bytes) -> Result<(), Error> {
|
pub(crate) async fn queue_cleanup<R: QueueRepo>(repo: &R, hash: R::Bytes) -> Result<(), Error> {
|
||||||
let job = serde_json::to_vec(&Job::Cleanup {
|
let job = serde_json::to_vec(&Job::CleanupHash {
|
||||||
hash: hash.as_ref().to_vec(),
|
hash: hash.as_ref().to_vec(),
|
||||||
})?;
|
})?;
|
||||||
repo.push(job.into()).await?;
|
repo.push(job.into()).await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn process_jobs<S: Store>(repo: Repo, store: S, worker_id: Vec<u8>) {
|
pub(crate) async fn process_jobs<S: Store>(repo: Repo, store: S, worker_id: String) {
|
||||||
loop {
|
match repo {
|
||||||
let res = match repo {
|
Repo::Sled(ref repo) => {
|
||||||
Repo::Sled(ref repo) => do_process_jobs(repo, &store, worker_id.clone()).await,
|
if let Ok(Some(job)) = repo.in_progress(worker_id.as_bytes().to_vec()).await {
|
||||||
};
|
if let Err(e) = run_job(repo, &store, &job).await {
|
||||||
|
tracing::warn!("Failed to run previously dropped job: {}", e);
|
||||||
|
tracing::warn!("{:?}", e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
loop {
|
||||||
|
let res = job_loop(repo, &store, worker_id.clone()).await;
|
||||||
|
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
tracing::warn!("Error processing jobs: {}", e);
|
tracing::warn!("Error processing jobs: {}", e);
|
||||||
tracing::warn!("{:?}", e);
|
tracing::warn!("{:?}", e);
|
||||||
continue;
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn do_process_jobs<R, S>(repo: &R, store: &S, worker_id: Vec<u8>) -> Result<(), Error>
|
async fn job_loop<R, S>(repo: &R, store: &S, worker_id: String) -> Result<(), Error>
|
||||||
where
|
where
|
||||||
R: QueueRepo + HashRepo + IdentifierRepo + AliasRepo,
|
R: QueueRepo + HashRepo + IdentifierRepo + AliasRepo,
|
||||||
R::Bytes: Clone,
|
R::Bytes: Clone,
|
||||||
S: Store,
|
S: Store,
|
||||||
{
|
{
|
||||||
loop {
|
loop {
|
||||||
let bytes = repo.pop(worker_id.clone()).await?;
|
let bytes = repo.pop(worker_id.as_bytes().to_vec()).await?;
|
||||||
|
|
||||||
match serde_json::from_slice(bytes.as_ref()) {
|
run_job(repo, store, bytes.as_ref()).await?;
|
||||||
Ok(job) => match job {
|
|
||||||
Job::Cleanup { hash } => cleanup(repo, store, hash).await?,
|
|
||||||
},
|
|
||||||
Err(e) => {
|
|
||||||
tracing::warn!("Invalid job: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip(repo, store))]
|
async fn run_job<R, S>(repo: &R, store: &S, job: &[u8]) -> Result<(), Error>
|
||||||
async fn cleanup<R, S>(repo: &R, store: &S, hash: Vec<u8>) -> Result<(), Error>
|
|
||||||
where
|
where
|
||||||
R: HashRepo + IdentifierRepo + AliasRepo,
|
R: QueueRepo + HashRepo + IdentifierRepo + AliasRepo,
|
||||||
R::Bytes: Clone,
|
R::Bytes: Clone,
|
||||||
S: Store,
|
S: Store,
|
||||||
{
|
{
|
||||||
let hash: R::Bytes = hash.into();
|
match serde_json::from_slice(job) {
|
||||||
|
Ok(job) => match job {
|
||||||
let aliases = repo.aliases(hash.clone()).await?;
|
Job::CleanupHash { hash } => cleanup_hash::<R, S>(repo, hash).await?,
|
||||||
|
Job::CleanupIdentifier { identifier } => {
|
||||||
if !aliases.is_empty() {
|
cleanup_identifier(repo, store, identifier).await?
|
||||||
return Ok(());
|
}
|
||||||
|
},
|
||||||
|
Err(e) => {
|
||||||
|
tracing::warn!("Invalid job: {}", e);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let variant_idents = repo
|
Ok(())
|
||||||
.variants::<S::Identifier>(hash.clone())
|
}
|
||||||
.await?
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, v)| v)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let main_ident = repo.identifier(hash.clone()).await?;
|
|
||||||
let motion_ident = repo.motion_identifier(hash.clone()).await?;
|
|
||||||
|
|
||||||
HashRepo::cleanup(repo, hash).await?;
|
#[tracing::instrument(skip(repo, store))]
|
||||||
|
async fn cleanup_identifier<R, S>(repo: &R, store: &S, identifier: Vec<u8>) -> Result<(), Error>
|
||||||
let cleanup_span = tracing::info_span!(parent: None, "Cleaning files");
|
where
|
||||||
cleanup_span.follows_from(Span::current());
|
R: QueueRepo + HashRepo + IdentifierRepo,
|
||||||
|
R::Bytes: Clone,
|
||||||
|
S: Store,
|
||||||
|
{
|
||||||
|
let identifier = S::Identifier::from_bytes(identifier)?;
|
||||||
|
|
||||||
let mut errors = Vec::new();
|
let mut errors = Vec::new();
|
||||||
|
|
||||||
for identifier in variant_idents
|
debug!("Deleting {:?}", identifier);
|
||||||
.iter()
|
if let Err(e) = store.remove(&identifier).await {
|
||||||
.chain(&[main_ident])
|
errors.push(e);
|
||||||
.chain(motion_ident.iter())
|
}
|
||||||
{
|
|
||||||
debug!("Deleting {:?}", identifier);
|
|
||||||
if let Err(e) = store.remove(identifier).await {
|
|
||||||
errors.push(e);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Err(e) = IdentifierRepo::cleanup(repo, identifier).await {
|
if let Err(e) = IdentifierRepo::cleanup(repo, &identifier).await {
|
||||||
errors.push(e);
|
errors.push(e);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if !errors.is_empty() {
|
if !errors.is_empty() {
|
||||||
|
@ -111,3 +108,39 @@ where
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[tracing::instrument(skip(repo))]
|
||||||
|
async fn cleanup_hash<R, S>(repo: &R, hash: Vec<u8>) -> Result<(), Error>
|
||||||
|
where
|
||||||
|
R: QueueRepo + AliasRepo + HashRepo + IdentifierRepo,
|
||||||
|
R::Bytes: Clone,
|
||||||
|
S: Store,
|
||||||
|
{
|
||||||
|
let hash: R::Bytes = hash.into();
|
||||||
|
|
||||||
|
let aliases = repo.aliases(hash.clone()).await?;
|
||||||
|
|
||||||
|
if !aliases.is_empty() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut idents = repo
|
||||||
|
.variants::<S::Identifier>(hash.clone())
|
||||||
|
.await?
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, v)| v)
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
idents.push(repo.identifier(hash.clone()).await?);
|
||||||
|
idents.extend(repo.motion_identifier(hash.clone()).await?);
|
||||||
|
|
||||||
|
for identifier in idents {
|
||||||
|
if let Ok(identifier) = identifier.to_bytes() {
|
||||||
|
let job = serde_json::to_vec(&Job::CleanupIdentifier { identifier })?;
|
||||||
|
repo.push(job.into()).await?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
HashRepo::cleanup(repo, hash).await?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
Loading…
Reference in a new issue