2
0
Fork 0
mirror of https://git.asonix.dog/asonix/pict-rs synced 2024-12-22 11:21:24 +00:00

Start work on 0.5 db format - initial queuerepo changes

This commit is contained in:
asonix 2023-08-13 14:12:38 -05:00
parent f3b4342d27
commit 35d0c065e9
7 changed files with 2586 additions and 114 deletions

View file

@ -21,6 +21,7 @@ mod processor;
mod queue;
mod range;
mod repo;
mod repo_04;
mod serde_str;
mod store;
mod stream;
@ -40,7 +41,6 @@ use futures_util::{
use metrics_exporter_prometheus::PrometheusBuilder;
use middleware::Metrics;
use once_cell::sync::Lazy;
use repo::AliasAccessRepo;
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
use reqwest_tracing::TracingMiddleware;
use rusty_s3::UrlStyle;
@ -69,8 +69,8 @@ use self::{
migrate_store::migrate_store,
queue::queue_generate,
repo::{
sled::SledRepo, Alias, DeleteToken, FullRepo, HashRepo, IdentifierRepo, QueueRepo, Repo,
SettingsRepo, UploadId, UploadResult, VariantAccessRepo,
sled::SledRepo, Alias, AliasAccessRepo, DeleteToken, FullRepo, HashRepo, IdentifierRepo,
Repo, SettingsRepo, UploadId, UploadResult, VariantAccessRepo,
},
serde_str::Serde,
store::{
@ -1974,9 +1974,6 @@ impl PictRsConfiguration {
let store = FileStore::build(path, repo.clone()).await?;
match repo {
Repo::Sled(sled_repo) => {
sled_repo
.requeue_in_progress(config.server.worker_id.as_bytes().to_vec())
.await?;
sled_repo
.mark_accessed::<<FileStore as Store>::Identifier>()
.await?;
@ -2019,9 +2016,6 @@ impl PictRsConfiguration {
match repo {
Repo::Sled(sled_repo) => {
sled_repo
.requeue_in_progress(config.server.worker_id.as_bytes().to_vec())
.await?;
sled_repo
.mark_accessed::<<ObjectStore as Store>::Identifier>()
.await?;

View file

@ -4,13 +4,19 @@ use crate::{
error::Error,
formats::InputProcessableFormat,
repo::{
Alias, AliasRepo, DeleteToken, FullRepo, HashRepo, IdentifierRepo, QueueRepo, UploadId,
Alias, AliasRepo, DeleteToken, FullRepo, HashRepo, IdentifierRepo, JobId, QueueRepo,
UploadId,
},
serde_str::Serde,
store::{Identifier, Store},
};
use base64::{prelude::BASE64_STANDARD, Engine};
use std::{future::Future, path::PathBuf, pin::Pin, time::Instant};
use std::{
future::Future,
path::PathBuf,
pin::Pin,
time::{Duration, Instant},
};
use tracing::Instrument;
mod cleanup;
@ -289,15 +295,21 @@ where
+ Copy,
{
loop {
let bytes = repo.pop(queue, worker_id.as_bytes().to_vec()).await?;
let (job_id, bytes) = repo.pop(queue, worker_id.as_bytes().to_vec()).await?;
let span = tracing::info_span!("Running Job", worker_id = ?worker_id);
let guard = MetricsGuard::guard(worker_id.clone(), queue);
span.in_scope(|| (callback)(repo, store, config, bytes.as_ref()))
.instrument(span)
.await?;
span.in_scope(|| {
heartbeat(
repo,
job_id,
(callback)(repo, store, config, bytes.as_ref()),
)
})
.instrument(span)
.await?;
guard.disarm();
}
@ -369,16 +381,62 @@ where
+ Copy,
{
loop {
let bytes = repo.pop(queue, worker_id.as_bytes().to_vec()).await?;
let (job_id, bytes) = repo.pop(queue, worker_id.as_bytes().to_vec()).await?;
let span = tracing::info_span!("Running Job", worker_id = ?worker_id);
let guard = MetricsGuard::guard(worker_id.clone(), queue);
span.in_scope(|| (callback)(repo, store, process_map, config, bytes.as_ref()))
.instrument(span)
.await?;
span.in_scope(|| {
heartbeat(
repo,
job_id,
(callback)(repo, store, process_map, config, bytes.as_ref()),
)
})
.instrument(span)
.await?;
guard.disarm();
}
}
async fn heartbeat<R, Fut>(repo: &R, job_id: JobId, fut: Fut) -> Fut::Output
where
R: QueueRepo,
Fut: std::future::Future,
{
let mut fut = std::pin::pin!(fut);
let mut interval = actix_rt::time::interval(Duration::from_secs(5));
let mut hb = None;
loop {
tokio::select! {
output = &mut fut => {
return output;
}
_ = interval.tick() => {
if hb.is_none() {
hb = Some(repo.heartbeat(job_id));
}
}
opt = poll_opt(hb.as_mut()), if hb.is_some() => {
if let Some(Err(e)) = opt {
tracing::warn!("Failed heartbeat\n{}", format!("{e:?}"));
}
}
}
}
}
async fn poll_opt<Fut>(opt: Option<&mut Fut>) -> Option<Fut::Output>
where
Fut: std::future::Future + Unpin,
{
match opt {
None => None,
Some(fut) => std::future::poll_fn(|cx| Pin::new(&mut *fut).poll(cx).map(Some)).await,
}
}

View file

@ -3,7 +3,6 @@ use crate::{
details::Details,
store::{Identifier, StoreError},
};
use base64::{prelude::BASE64_STANDARD, Engine};
use futures_util::Stream;
use std::fmt::Debug;
use url::Url;
@ -285,13 +284,38 @@ where
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct JobId(Uuid);
impl JobId {
pub(crate) fn gen() -> Self {
Self(Uuid::new_v4())
}
pub(crate) const fn as_bytes(&self) -> &[u8; 16] {
self.0.as_bytes()
}
pub(crate) const fn from_bytes(bytes: [u8; 16]) -> Self {
Self(Uuid::from_bytes(bytes))
}
}
#[async_trait::async_trait(?Send)]
pub(crate) trait QueueRepo: BaseRepo {
async fn requeue_in_progress(&self, worker_prefix: Vec<u8>) -> Result<(), RepoError>;
async fn requeue_timed_out(&self, worker_prefix: Vec<u8>) -> Result<(), RepoError>;
async fn push(&self, queue: &'static str, job: Self::Bytes) -> Result<(), RepoError>;
async fn pop(&self, queue: &'static str, worker_id: Vec<u8>) -> Result<Self::Bytes, RepoError>;
async fn pop(
&self,
queue: &'static str,
worker_id: Vec<u8>,
) -> Result<(JobId, Self::Bytes), RepoError>;
async fn heartbeat(&self, job_id: JobId) -> Result<(), RepoError>;
async fn complete_job(&self, job_id: JobId) -> Result<(), RepoError>;
}
#[async_trait::async_trait(?Send)]
@ -299,17 +323,29 @@ impl<T> QueueRepo for actix_web::web::Data<T>
where
T: QueueRepo,
{
async fn requeue_in_progress(&self, worker_prefix: Vec<u8>) -> Result<(), RepoError> {
T::requeue_in_progress(self, worker_prefix).await
async fn requeue_timed_out(&self, worker_prefix: Vec<u8>) -> Result<(), RepoError> {
T::requeue_timed_out(self, worker_prefix).await
}
async fn push(&self, queue: &'static str, job: Self::Bytes) -> Result<(), RepoError> {
T::push(self, queue, job).await
}
async fn pop(&self, queue: &'static str, worker_id: Vec<u8>) -> Result<Self::Bytes, RepoError> {
async fn pop(
&self,
queue: &'static str,
worker_id: Vec<u8>,
) -> Result<(JobId, Self::Bytes), RepoError> {
T::pop(self, queue, worker_id).await
}
async fn heartbeat(&self, job_id: JobId) -> Result<(), RepoError> {
T::heartbeat(self, job_id).await
}
async fn complete_job(&self, job_id: JobId) -> Result<(), RepoError> {
T::complete_job(self, job_id).await
}
}
#[async_trait::async_trait(?Send)]
@ -801,23 +837,6 @@ impl std::fmt::Display for Alias {
}
}
impl Identifier for Vec<u8> {
fn from_bytes(bytes: Vec<u8>) -> Result<Self, StoreError>
where
Self: Sized,
{
Ok(bytes)
}
fn to_bytes(&self) -> Result<Vec<u8>, StoreError> {
Ok(self.clone())
}
fn string_repr(&self) -> String {
BASE64_STANDARD.encode(self.as_slice())
}
}
#[cfg(test)]
mod tests {
use super::{Alias, DeleteToken, MaybeUuid, Uuid};

View file

@ -2,7 +2,7 @@ use crate::{
details::MaybeHumanDate,
repo::{
Alias, AliasAlreadyExists, AliasRepo, BaseRepo, DeleteToken, Details, FullRepo,
HashAlreadyExists, HashRepo, Identifier, IdentifierRepo, MigrationRepo, QueueRepo,
HashAlreadyExists, HashRepo, Identifier, IdentifierRepo, JobId, MigrationRepo, QueueRepo,
SettingsRepo, UploadId, UploadRepo, UploadResult,
},
serde_str::Serde,
@ -74,13 +74,13 @@ pub(crate) struct SledRepo {
alias_hashes: Tree,
alias_delete_tokens: Tree,
queue: Tree,
job_state: Tree,
alias_access: Tree,
inverse_alias_access: Tree,
variant_access: Tree,
inverse_variant_access: Tree,
proxy: Tree,
inverse_proxy: Tree,
in_progress_queue: Tree,
queue_notifier: Arc<RwLock<HashMap<&'static str, Arc<Notify>>>>,
uploads: Tree,
migration_identifiers: Tree,
@ -112,13 +112,13 @@ impl SledRepo {
alias_hashes: db.open_tree("pict-rs-alias-hashes-tree")?,
alias_delete_tokens: db.open_tree("pict-rs-alias-delete-tokens-tree")?,
queue: db.open_tree("pict-rs-queue-tree")?,
job_state: db.open_tree("pict-rs-job-state-tree")?,
alias_access: db.open_tree("pict-rs-alias-access-tree")?,
inverse_alias_access: db.open_tree("pict-rs-inverse-alias-access-tree")?,
variant_access: db.open_tree("pict-rs-variant-access-tree")?,
inverse_variant_access: db.open_tree("pict-rs-inverse-variant-access-tree")?,
proxy: db.open_tree("pict-rs-proxy-tree")?,
inverse_proxy: db.open_tree("pict-rs-inverse-proxy-tree")?,
in_progress_queue: db.open_tree("pict-rs-in-progress-queue-tree")?,
queue_notifier: Arc::new(RwLock::new(HashMap::new())),
uploads: db.open_tree("pict-rs-uploads-tree")?,
migration_identifiers: db.open_tree("pict-rs-migration-identifiers-tree")?,
@ -270,7 +270,7 @@ impl futures_util::Stream for IterStream {
std::task::Poll::Ready(None)
}
} else if let Some(mut iter) = self.iter.take() {
self.next = Some(tokio::task::spawn_blocking(move || {
self.next = Some(actix_rt::task::spawn_blocking(move || {
let opt = iter
.next()
.map(|res| res.map_err(SledError::from).map_err(RepoError::from));
@ -624,62 +624,68 @@ impl UploadRepo for SledRepo {
}
}
enum JobState {
Pending,
Running(Vec<u8>),
}
impl JobState {
const fn pending() -> Self {
Self::Pending
}
fn running() -> Self {
Self::Running(
time::OffsetDateTime::now_utc()
.format(&time::format_description::well_known::Rfc3339)
.expect("Can format")
.into_bytes(),
)
}
fn as_bytes(&self) -> &[u8] {
match self {
Self::Pending => b"pending",
Self::Running(ref bytes) => bytes,
}
}
}
#[async_trait::async_trait(?Send)]
impl QueueRepo for SledRepo {
#[tracing::instrument(skip_all, fields(worker_id = %String::from_utf8_lossy(&worker_prefix)))]
async fn requeue_in_progress(&self, worker_prefix: Vec<u8>) -> Result<(), RepoError> {
let vec: Vec<(String, IVec)> = b!(self.in_progress_queue, {
let vec = in_progress_queue
.scan_prefix(worker_prefix)
.values()
.filter_map(Result::ok)
.filter_map(|ivec| {
let index = ivec.as_ref().iter().enumerate().find_map(|(index, byte)| {
if *byte == 0 {
Some(index)
} else {
None
}
})?;
let (queue, job) = ivec.split_at(index);
if queue.is_empty() || job.len() <= 1 {
return None;
}
let job = &job[1..];
Some((String::from_utf8_lossy(queue).to_string(), IVec::from(job)))
})
.collect::<Vec<(String, IVec)>>();
Ok(vec) as Result<_, SledError>
});
let db = self.db.clone();
b!(self.queue, {
for (queue_name, job) in vec {
let id = db.generate_id()?;
let mut key = queue_name.as_bytes().to_vec();
key.extend(id.to_be_bytes());
queue.insert(key, job)?;
}
Ok(()) as Result<(), SledError>
});
Ok(())
async fn requeue_timed_out(&self, worker_prefix: Vec<u8>) -> Result<(), RepoError> {
todo!()
}
#[tracing::instrument(skip(self, job), fields(job = %String::from_utf8_lossy(&job)))]
async fn push(&self, queue_name: &'static str, job: Self::Bytes) -> Result<(), RepoError> {
let metrics_guard = PushMetricsGuard::guard(queue_name);
let id = self.db.generate_id().map_err(SledError::from)?;
let id = JobId::gen();
let mut key = queue_name.as_bytes().to_vec();
key.extend(id.to_be_bytes());
key.push(0);
key.extend(id.as_bytes());
b!(self.queue, queue.insert(key, job));
let queue = self.queue.clone();
let job_state = self.job_state.clone();
let res = actix_rt::task::spawn_blocking(move || {
(&queue, &job_state).transaction(|(queue, job_state)| {
let state = JobState::pending();
queue.insert(key.as_slice(), &job)?;
job_state.insert(key.as_slice(), state.as_bytes())?;
Ok(())
})
})
.await
.map_err(|_| RepoError::Canceled)?;
if let Err(TransactionError::Abort(e) | TransactionError::Storage(e)) = res {
return Err(RepoError::from(SledError::from(e)));
}
if let Some(notifier) = self.queue_notifier.read().unwrap().get(&queue_name) {
notifier.notify_one();
@ -704,40 +710,53 @@ impl QueueRepo for SledRepo {
&self,
queue_name: &'static str,
worker_id: Vec<u8>,
) -> Result<Self::Bytes, RepoError> {
) -> Result<(JobId, Self::Bytes), RepoError> {
let metrics_guard = PopMetricsGuard::guard(queue_name);
loop {
let in_progress_queue = self.in_progress_queue.clone();
let queue = self.queue.clone();
let job_state = self.job_state.clone();
let worker_id = worker_id.clone();
let job = b!(self.queue, {
in_progress_queue.remove(&worker_id)?;
in_progress_queue.flush()?;
let opt = actix_rt::task::spawn_blocking(move || {
for res in job_state.scan_prefix(queue_name) {
let (key, value) = res?;
while let Some((key, job)) = queue
.scan_prefix(queue_name.as_bytes())
.find_map(Result::ok)
{
let mut in_progress_value = queue_name.as_bytes().to_vec();
in_progress_value.push(0);
in_progress_value.extend_from_slice(&job);
in_progress_queue.insert(&worker_id, in_progress_value)?;
if queue.remove(key)?.is_some() {
return Ok(Some(job));
if value != "pending" {
// TODO: requeue dead jobs
continue;
}
in_progress_queue.remove(&worker_id)?;
let state = JobState::running();
match job_state.compare_and_swap(&key, Some(value), Some(state.as_bytes())) {
Ok(_) => {
// acquired job
}
Err(_) => {
// someone else acquired job
continue;
}
}
let id_bytes = &key[queue_name.len() + 1..];
let id_bytes: [u8; 16] = id_bytes.try_into().expect("Key length");
let job_id = JobId::from_bytes(id_bytes);
let opt = queue.get(&key)?.map(|job_bytes| (job_id, job_bytes));
return Ok(opt) as Result<Option<(JobId, Self::Bytes)>, SledError>;
}
Ok(None) as Result<_, SledError>
});
Ok(None)
})
.await
.map_err(|_| RepoError::Canceled)??;
if let Some(job) = job {
if let Some(tup) = opt {
metrics_guard.disarm();
return Ok(job);
return Ok(tup);
}
let opt = self
@ -760,6 +779,14 @@ impl QueueRepo for SledRepo {
notify.notified().await
}
}
async fn heartbeat(&self, job_id: JobId) -> Result<(), RepoError> {
todo!()
}
async fn complete_job(&self, job_id: JobId) -> Result<(), RepoError> {
todo!()
}
}
#[async_trait::async_trait(?Send)]

1019
src/repo_04.rs Normal file

File diff suppressed because it is too large Load diff

1334
src/repo_04/sled.rs Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,4 +1,5 @@
use actix_web::web::Bytes;
use base64::{prelude::BASE64_STANDARD, Engine};
use futures_util::stream::Stream;
use std::fmt::Debug;
use tokio::io::{AsyncRead, AsyncWrite};
@ -17,6 +18,9 @@ pub(crate) enum StoreError {
#[error("Error in DB")]
Repo(#[from] crate::repo::RepoError),
#[error("Error in 0.4 DB")]
Repo04(#[from] crate::repo_04::RepoError),
#[error("Requested file is not found")]
FileNotFound(#[source] std::io::Error),
@ -265,3 +269,20 @@ where
T::remove(self, identifier).await
}
}
impl Identifier for Vec<u8> {
fn from_bytes(bytes: Vec<u8>) -> Result<Self, StoreError>
where
Self: Sized,
{
Ok(bytes)
}
fn to_bytes(&self) -> Result<Vec<u8>, StoreError> {
Ok(self.clone())
}
fn string_repr(&self) -> String {
BASE64_STANDARD.encode(self.as_slice())
}
}