2
0
Fork 0
mirror of https://git.asonix.dog/asonix/pict-rs synced 2024-12-22 19:31:35 +00:00
pict-rs/src/queue.rs

462 lines
12 KiB
Rust
Raw Normal View History

use crate::{
2023-09-02 01:50:10 +00:00
error::{Error, UploadError},
formats::InputProcessableFormat,
2024-03-10 04:53:46 +00:00
future::{LocalBoxFuture, WithPollTimer},
repo::{Alias, ArcRepo, DeleteToken, Hash, JobId, UploadId},
serde_str::Serde,
2024-02-03 19:32:20 +00:00
state::State,
store::Store,
UploadQuery,
};
2024-02-04 00:19:14 +00:00
use std::{
ops::Deref,
rc::Rc,
sync::Arc,
time::{Duration, Instant},
};
use tokio::task::JoinError;
2022-04-02 23:53:03 +00:00
use tracing::Instrument;
pub(crate) mod cleanup;
mod process;
const CLEANUP_QUEUE: &str = "cleanup";
const PROCESS_QUEUE: &str = "process";
const OUTDATED_PROXIES_UNIQUE_KEY: &str = "outdated-proxies";
const OUTDATED_VARIANTS_UNIQUE_KEY: &str = "outdated-variants";
const ALL_VARIANTS_UNIQUE_KEY: &str = "all-variants";
const PRUNE_MISSING_UNIQUE_KEY: &str = "prune-missing";
#[derive(Debug, serde::Deserialize, serde::Serialize)]
enum Cleanup {
Hash {
2023-08-14 19:25:19 +00:00
hash: Hash,
},
Identifier {
identifier: String,
},
Alias {
alias: Serde<Alias>,
token: Serde<DeleteToken>,
},
Variant {
2023-08-14 19:25:19 +00:00
hash: Hash,
#[serde(skip_serializing_if = "Option::is_none")]
variant: Option<String>,
},
AllVariants,
OutdatedVariants,
2023-07-23 20:45:52 +00:00
OutdatedProxies,
2023-12-12 22:54:41 +00:00
Prune,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
enum Process {
Ingest {
identifier: String,
upload_id: Serde<UploadId>,
declared_alias: Option<Serde<Alias>>,
#[serde(default)]
upload_query: UploadQuery,
},
Generate {
target_format: InputProcessableFormat,
source: Serde<Alias>,
process_path: String,
process_args: Vec<String>,
},
}
pub(crate) async fn cleanup_alias(
repo: &ArcRepo,
alias: Alias,
token: DeleteToken,
) -> Result<(), Error> {
2023-09-03 21:59:41 +00:00
let job = serde_json::to_value(Cleanup::Alias {
alias: Serde::new(alias),
token: Serde::new(token),
2023-09-02 01:50:10 +00:00
})
.map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job, None).await?;
Ok(())
}
pub(crate) async fn cleanup_hash(repo: &ArcRepo, hash: Hash) -> Result<(), Error> {
2023-09-03 21:59:41 +00:00
let job = serde_json::to_value(Cleanup::Hash { hash }).map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job, None).await?;
Ok(())
}
pub(crate) async fn cleanup_identifier(repo: &ArcRepo, identifier: &Arc<str>) -> Result<(), Error> {
2023-09-03 21:59:41 +00:00
let job = serde_json::to_value(Cleanup::Identifier {
identifier: identifier.to_string(),
2023-09-02 01:50:10 +00:00
})
.map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job, None).await?;
Ok(())
}
async fn cleanup_variants(
repo: &ArcRepo,
2023-08-14 19:25:19 +00:00
hash: Hash,
variant: Option<String>,
) -> Result<(), Error> {
2023-09-02 01:50:10 +00:00
let job =
2023-09-03 21:59:41 +00:00
serde_json::to_value(Cleanup::Variant { hash, variant }).map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job, None).await?;
Ok(())
}
pub(crate) async fn cleanup_outdated_proxies(repo: &ArcRepo) -> Result<(), Error> {
2023-09-03 21:59:41 +00:00
let job = serde_json::to_value(Cleanup::OutdatedProxies).map_err(UploadError::PushJob)?;
if repo
.push(CLEANUP_QUEUE, job, Some(OUTDATED_PROXIES_UNIQUE_KEY))
.await?
.is_none()
{
tracing::debug!("outdated proxies conflict");
}
2023-07-23 20:45:52 +00:00
Ok(())
}
pub(crate) async fn cleanup_outdated_variants(repo: &ArcRepo) -> Result<(), Error> {
2023-09-03 21:59:41 +00:00
let job = serde_json::to_value(Cleanup::OutdatedVariants).map_err(UploadError::PushJob)?;
if repo
.push(CLEANUP_QUEUE, job, Some(OUTDATED_VARIANTS_UNIQUE_KEY))
.await?
.is_none()
{
tracing::debug!("outdated variants conflict");
}
Ok(())
}
pub(crate) async fn cleanup_all_variants(repo: &ArcRepo) -> Result<(), Error> {
2023-09-03 21:59:41 +00:00
let job = serde_json::to_value(Cleanup::AllVariants).map_err(UploadError::PushJob)?;
if repo
.push(CLEANUP_QUEUE, job, Some(ALL_VARIANTS_UNIQUE_KEY))
.await?
.is_none()
{
tracing::debug!("all variants conflict");
}
Ok(())
}
2023-12-12 22:54:41 +00:00
pub(crate) async fn prune_missing(repo: &ArcRepo) -> Result<(), Error> {
let job = serde_json::to_value(Cleanup::Prune).map_err(UploadError::PushJob)?;
if repo
.push(CLEANUP_QUEUE, job, Some(PRUNE_MISSING_UNIQUE_KEY))
.await?
.is_none()
{
tracing::debug!("prune missing conflict");
}
2023-12-12 22:54:41 +00:00
Ok(())
}
pub(crate) async fn queue_ingest(
repo: &ArcRepo,
identifier: &Arc<str>,
upload_id: UploadId,
declared_alias: Option<Alias>,
upload_query: UploadQuery,
) -> Result<(), Error> {
2023-09-03 21:59:41 +00:00
let job = serde_json::to_value(Process::Ingest {
identifier: identifier.to_string(),
declared_alias: declared_alias.map(Serde::new),
upload_id: Serde::new(upload_id),
upload_query,
2023-09-02 01:50:10 +00:00
})
.map_err(UploadError::PushJob)?;
repo.push(PROCESS_QUEUE, job, None).await?;
Ok(())
}
pub(crate) async fn queue_generate(
repo: &ArcRepo,
target_format: InputProcessableFormat,
source: Alias,
variant: String,
process_args: Vec<String>,
) -> Result<(), Error> {
2023-09-03 21:59:41 +00:00
let job = serde_json::to_value(Process::Generate {
target_format,
source: Serde::new(source),
process_path: variant,
process_args,
2023-09-02 01:50:10 +00:00
})
.map_err(UploadError::PushJob)?;
repo.push(PROCESS_QUEUE, job, None).await?;
Ok(())
}
2022-03-29 18:18:47 +00:00
pub(crate) async fn process_cleanup<S: Store + 'static>(state: State<S>) {
process_jobs(state, CLEANUP_QUEUE, cleanup::perform).await
}
2024-03-30 14:36:31 +00:00
pub(crate) async fn process_images<S: Store + 'static>(state: State<S>) {
process_jobs(state, PROCESS_QUEUE, process::perform).await
}
struct MetricsGuard {
worker_id: uuid::Uuid,
queue: &'static str,
start: Instant,
armed: bool,
}
impl MetricsGuard {
fn guard(worker_id: uuid::Uuid, queue: &'static str) -> Self {
2024-02-04 21:45:47 +00:00
metrics::counter!(crate::init_metrics::JOB_START, "queue" => queue, "worker-id" => worker_id.to_string()).increment(1);
Self {
worker_id,
queue,
start: Instant::now(),
armed: true,
}
}
fn disarm(mut self) {
self.armed = false;
}
}
impl Drop for MetricsGuard {
fn drop(&mut self) {
2024-02-04 21:45:47 +00:00
metrics::histogram!(crate::init_metrics::JOB_DURAION, "queue" => self.queue, "worker-id" => self.worker_id.to_string(), "completed" => (!self.armed).to_string()).record(self.start.elapsed().as_secs_f64());
metrics::counter!(crate::init_metrics::JOB_END, "queue" => self.queue, "worker-id" => self.worker_id.to_string(), "completed" => (!self.armed).to_string()).increment(1);
}
}
pub(super) enum JobError {
Abort(Error),
Retry(Error),
}
impl AsRef<Error> for JobError {
fn as_ref(&self) -> &Error {
match self {
Self::Abort(e) | Self::Retry(e) => e,
}
}
}
impl Deref for JobError {
type Target = Error;
fn deref(&self) -> &Self::Target {
match self {
Self::Abort(e) | Self::Retry(e) => e,
}
}
}
impl From<JobError> for Error {
fn from(value: JobError) -> Self {
match value {
JobError::Abort(e) | JobError::Retry(e) => e,
}
}
}
type JobResult<T = ()> = Result<T, JobError>;
type JobFuture<'a> = LocalBoxFuture<'a, JobResult>;
trait JobContext {
type Item;
fn abort(self) -> JobResult<Self::Item>
where
Self: Sized;
fn retry(self) -> JobResult<Self::Item>
where
Self: Sized;
}
impl<T, E> JobContext for Result<T, E>
where
E: Into<Error>,
{
type Item = T;
fn abort(self) -> JobResult<Self::Item>
where
Self: Sized,
{
self.map_err(Into::into).map_err(JobError::Abort)
}
fn retry(self) -> JobResult<Self::Item>
where
Self: Sized,
{
self.map_err(Into::into).map_err(JobError::Retry)
}
}
fn job_result(result: &Result<JobResult, JoinError>) -> crate::repo::JobResult {
match result {
Ok(Ok(())) => crate::repo::JobResult::Success,
Ok(Err(JobError::Retry(_))) => crate::repo::JobResult::Failure,
Ok(Err(JobError::Abort(_))) => crate::repo::JobResult::Aborted,
Err(_) => crate::repo::JobResult::Aborted,
}
}
async fn process_jobs<S, F>(state: State<S>, queue: &'static str, callback: F)
where
S: Store + 'static,
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> JobFuture<'a> + Copy + 'static,
{
let worker_id = uuid::Uuid::new_v4();
let state = Rc::new(state);
loop {
tracing::trace!("process_jobs: looping");
2024-03-10 04:53:46 +00:00
crate::sync::cooperate().await;
// add a panic boundary by spawning a task
let res = crate::sync::spawn(
"job-loop",
job_loop(state.clone(), worker_id, queue, callback),
)
.await;
match res {
// clean exit
Ok(Ok(())) => break,
// job error
Ok(Err(e)) => {
tracing::warn!("Error processing jobs: {}", format!("{e}"));
tracing::warn!("{}", format!("{e:?}"));
if e.is_disconnected() {
tokio::time::sleep(Duration::from_secs(10)).await;
}
2023-09-05 02:51:27 +00:00
}
// job panic
Err(_) => {
tracing::warn!("Panic while processing jobs");
}
}
2022-03-29 18:18:47 +00:00
}
}
async fn job_loop<S, F>(
state: Rc<State<S>>,
worker_id: uuid::Uuid,
2022-04-02 23:53:03 +00:00
queue: &'static str,
callback: F,
) -> Result<(), Error>
2022-03-29 18:18:47 +00:00
where
S: Store + 'static,
for<'a> F: Fn(&'a State<S>, serde_json::Value) -> JobFuture<'a> + Copy + 'static,
2022-03-29 18:18:47 +00:00
{
loop {
tracing::trace!("job_loop: looping");
2024-03-10 20:59:08 +00:00
crate::sync::cooperate().with_poll_timer("cooperate").await;
async {
2024-03-10 04:53:46 +00:00
let (job_id, job) = state
.repo
.pop(queue, worker_id)
2024-03-30 14:36:31 +00:00
.with_poll_timer("pop-job")
2024-03-10 04:53:46 +00:00
.await?;
2022-03-29 18:18:47 +00:00
let guard = MetricsGuard::guard(worker_id, queue);
2023-07-23 02:11:28 +00:00
let state2 = state.clone();
let res = crate::sync::spawn("job-and-heartbeat", async move {
let state = state2;
heartbeat(
&state.repo,
queue,
worker_id,
job_id,
(callback)(&state, job),
)
.await
})
.await;
state
.repo
.complete_job(queue, worker_id, job_id, job_result(&res))
2024-03-30 14:36:31 +00:00
.with_poll_timer("job-complete")
.await?;
2023-07-23 02:11:28 +00:00
res.map_err(|_| UploadError::Canceled)??;
2023-07-23 02:11:28 +00:00
guard.disarm();
Ok(()) as Result<(), Error>
}
.instrument(tracing::info_span!("tick", %queue, %worker_id))
.await?;
2023-07-22 16:15:30 +00:00
}
}
#[tracing::instrument("running-job", skip(repo, queue, worker_id, fut))]
async fn heartbeat<Fut>(
repo: &ArcRepo,
queue: &'static str,
worker_id: uuid::Uuid,
job_id: JobId,
fut: Fut,
) -> Fut::Output
where
Fut: std::future::Future,
{
2024-03-10 04:53:46 +00:00
let mut fut = std::pin::pin!(fut
.with_poll_timer("job-future")
.instrument(tracing::info_span!("job-future")));
let mut interval = tokio::time::interval(Duration::from_secs(5));
let mut hb = None;
loop {
tracing::trace!("heartbeat: looping");
2024-03-10 04:53:46 +00:00
crate::sync::cooperate().await;
tokio::select! {
biased;
output = &mut fut => {
return output;
}
_ = interval.tick() => {
if hb.is_none() {
hb = Some(repo.heartbeat(queue, worker_id, job_id));
}
}
opt = poll_opt(hb.as_mut()), if hb.is_some() => {
hb.take();
if let Some(Err(e)) = opt {
tracing::warn!("Failed heartbeat\n{}", format!("{e:?}"));
}
}
}
}
}
async fn poll_opt<Fut>(opt: Option<&mut Fut>) -> Option<Fut::Output>
where
Fut: std::future::Future + Unpin,
{
match opt {
None => None,
Some(fut) => Some(fut.await),
}
}