2
0
Fork 0
mirror of https://git.asonix.dog/asonix/pict-rs synced 2024-12-22 11:21:24 +00:00

Implement a couple more repo traits

This commit is contained in:
asonix 2023-09-03 12:47:06 -05:00
parent eac4cd54a4
commit 443d327edf
16 changed files with 800 additions and 240 deletions

13
Cargo.lock generated
View file

@ -750,6 +750,18 @@ dependencies = [
"tokio-postgres", "tokio-postgres",
] ]
[[package]]
name = "diesel-derive-enum"
version = "2.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81c5131a2895ef64741dad1d483f358c2a229a3a2d1b256778cdc5e146db64d4"
dependencies = [
"heck",
"proc-macro2",
"quote",
"syn 2.0.29",
]
[[package]] [[package]]
name = "diesel_derives" name = "diesel_derives"
version = "2.1.1" version = "2.1.1"
@ -1807,6 +1819,7 @@ dependencies = [
"deadpool", "deadpool",
"diesel", "diesel",
"diesel-async", "diesel-async",
"diesel-derive-enum",
"flume", "flume",
"futures-core", "futures-core",
"hex", "hex",

View file

@ -30,6 +30,7 @@ dashmap = "5.1.0"
deadpool = { version = "0.9.5", features = ["rt_tokio_1"] } deadpool = { version = "0.9.5", features = ["rt_tokio_1"] }
diesel = { version = "2.1.1", features = ["postgres_backend", "serde_json", "time", "uuid"] } diesel = { version = "2.1.1", features = ["postgres_backend", "serde_json", "time", "uuid"] }
diesel-async = { version = "0.4.1", features = ["postgres", "deadpool"] } diesel-async = { version = "0.4.1", features = ["postgres", "deadpool"] }
diesel-derive-enum = { version = "2.1.0", features = ["postgres"] }
flume = "0.11.0" flume = "0.11.0"
futures-core = "0.3" futures-core = "0.3"
hex = "0.4.3" hex = "0.4.3"

View file

@ -155,7 +155,7 @@ methods:
CREATE TYPE job_status AS ENUM ('new', 'running'); CREATE TYPE job_status AS ENUM ('new', 'running');
CREATE TABLE queue ( CREATE TABLE job_queue (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(), id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
queue VARCHAR(30) NOT NULL, queue VARCHAR(30) NOT NULL,
job JSONB NOT NULL, job JSONB NOT NULL,
@ -171,14 +171,14 @@ CREATE INDEX heartbeat_index ON queue INCLUDE heartbeat;
claiming a job can be claiming a job can be
```sql ```sql
UPDATE queue SET status = 'new', heartbeat = NULL UPDATE job_queue SET status = 'new', heartbeat = NULL
WHERE WHERE
heartbeat IS NOT NULL AND heartbeat < NOW - INTERVAL '2 MINUTES'; heartbeat IS NOT NULL AND heartbeat < NOW - INTERVAL '2 MINUTES';
UPDATE queue SET status = 'running', heartbeat = CURRENT_TIMESTAMP UPDATE job_queue SET status = 'running', heartbeat = CURRENT_TIMESTAMP
WHERE id = ( WHERE id = (
SELECT id SELECT id
FROM queue FROM job_queue
WHERE status = 'new' AND queue = '$QUEUE' WHERE status = 'new' AND queue = '$QUEUE'
ORDER BY queue_time ASC ORDER BY queue_time ASC
FOR UPDATE SKIP LOCKED FOR UPDATE SKIP LOCKED

View file

@ -62,6 +62,9 @@ impl ErrorCode {
pub(crate) const EXTRACT_UPLOAD_RESULT: ErrorCode = ErrorCode { pub(crate) const EXTRACT_UPLOAD_RESULT: ErrorCode = ErrorCode {
code: "extract-upload-result", code: "extract-upload-result",
}; };
pub(crate) const EXTRACT_JOB: ErrorCode = ErrorCode {
code: "extract-job",
};
pub(crate) const CONFLICTED_RECORD: ErrorCode = ErrorCode { pub(crate) const CONFLICTED_RECORD: ErrorCode = ErrorCode {
code: "conflicted-record", code: "conflicted-record",
}; };

View file

@ -7,7 +7,6 @@ use crate::{
serde_str::Serde, serde_str::Serde,
store::Store, store::Store,
}; };
use base64::{prelude::BASE64_STANDARD, Engine};
use std::{ use std::{
future::Future, future::Future,
path::PathBuf, path::PathBuf,
@ -20,32 +19,6 @@ use tracing::Instrument;
mod cleanup; mod cleanup;
mod process; mod process;
#[derive(Debug)]
struct Base64Bytes(Vec<u8>);
impl serde::Serialize for Base64Bytes {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let s = BASE64_STANDARD.encode(&self.0);
s.serialize(serializer)
}
}
impl<'de> serde::Deserialize<'de> for Base64Bytes {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let s: String = serde::Deserialize::deserialize(deserializer)?;
BASE64_STANDARD
.decode(s)
.map(Base64Bytes)
.map_err(|e| serde::de::Error::custom(e.to_string()))
}
}
const CLEANUP_QUEUE: &str = "cleanup"; const CLEANUP_QUEUE: &str = "cleanup";
const PROCESS_QUEUE: &str = "process"; const PROCESS_QUEUE: &str = "process";
@ -91,18 +64,18 @@ pub(crate) async fn cleanup_alias(
alias: Alias, alias: Alias,
token: DeleteToken, token: DeleteToken,
) -> Result<(), Error> { ) -> Result<(), Error> {
let job = serde_json::to_string(&Cleanup::Alias { let job = serde_json::to_value(&Cleanup::Alias {
alias: Serde::new(alias), alias: Serde::new(alias),
token: Serde::new(token), token: Serde::new(token),
}) })
.map_err(UploadError::PushJob)?; .map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job.into()).await?; repo.push(CLEANUP_QUEUE, job).await?;
Ok(()) Ok(())
} }
pub(crate) async fn cleanup_hash(repo: &Arc<dyn FullRepo>, hash: Hash) -> Result<(), Error> { pub(crate) async fn cleanup_hash(repo: &Arc<dyn FullRepo>, hash: Hash) -> Result<(), Error> {
let job = serde_json::to_string(&Cleanup::Hash { hash }).map_err(UploadError::PushJob)?; let job = serde_json::to_value(&Cleanup::Hash { hash }).map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job.into()).await?; repo.push(CLEANUP_QUEUE, job).await?;
Ok(()) Ok(())
} }
@ -110,11 +83,11 @@ pub(crate) async fn cleanup_identifier(
repo: &Arc<dyn FullRepo>, repo: &Arc<dyn FullRepo>,
identifier: &Arc<str>, identifier: &Arc<str>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let job = serde_json::to_string(&Cleanup::Identifier { let job = serde_json::to_value(&Cleanup::Identifier {
identifier: identifier.to_string(), identifier: identifier.to_string(),
}) })
.map_err(UploadError::PushJob)?; .map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job.into()).await?; repo.push(CLEANUP_QUEUE, job).await?;
Ok(()) Ok(())
} }
@ -124,26 +97,26 @@ async fn cleanup_variants(
variant: Option<String>, variant: Option<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let job = let job =
serde_json::to_string(&Cleanup::Variant { hash, variant }).map_err(UploadError::PushJob)?; serde_json::to_value(&Cleanup::Variant { hash, variant }).map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job.into()).await?; repo.push(CLEANUP_QUEUE, job).await?;
Ok(()) Ok(())
} }
pub(crate) async fn cleanup_outdated_proxies(repo: &Arc<dyn FullRepo>) -> Result<(), Error> { pub(crate) async fn cleanup_outdated_proxies(repo: &Arc<dyn FullRepo>) -> Result<(), Error> {
let job = serde_json::to_string(&Cleanup::OutdatedProxies).map_err(UploadError::PushJob)?; let job = serde_json::to_value(&Cleanup::OutdatedProxies).map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job.into()).await?; repo.push(CLEANUP_QUEUE, job).await?;
Ok(()) Ok(())
} }
pub(crate) async fn cleanup_outdated_variants(repo: &Arc<dyn FullRepo>) -> Result<(), Error> { pub(crate) async fn cleanup_outdated_variants(repo: &Arc<dyn FullRepo>) -> Result<(), Error> {
let job = serde_json::to_string(&Cleanup::OutdatedVariants).map_err(UploadError::PushJob)?; let job = serde_json::to_value(&Cleanup::OutdatedVariants).map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job.into()).await?; repo.push(CLEANUP_QUEUE, job).await?;
Ok(()) Ok(())
} }
pub(crate) async fn cleanup_all_variants(repo: &Arc<dyn FullRepo>) -> Result<(), Error> { pub(crate) async fn cleanup_all_variants(repo: &Arc<dyn FullRepo>) -> Result<(), Error> {
let job = serde_json::to_string(&Cleanup::AllVariants).map_err(UploadError::PushJob)?; let job = serde_json::to_value(&Cleanup::AllVariants).map_err(UploadError::PushJob)?;
repo.push(CLEANUP_QUEUE, job.into()).await?; repo.push(CLEANUP_QUEUE, job).await?;
Ok(()) Ok(())
} }
@ -153,13 +126,13 @@ pub(crate) async fn queue_ingest(
upload_id: UploadId, upload_id: UploadId,
declared_alias: Option<Alias>, declared_alias: Option<Alias>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let job = serde_json::to_string(&Process::Ingest { let job = serde_json::to_value(&Process::Ingest {
identifier: identifier.to_string(), identifier: identifier.to_string(),
declared_alias: declared_alias.map(Serde::new), declared_alias: declared_alias.map(Serde::new),
upload_id: Serde::new(upload_id), upload_id: Serde::new(upload_id),
}) })
.map_err(UploadError::PushJob)?; .map_err(UploadError::PushJob)?;
repo.push(PROCESS_QUEUE, job.into()).await?; repo.push(PROCESS_QUEUE, job).await?;
Ok(()) Ok(())
} }
@ -170,14 +143,14 @@ pub(crate) async fn queue_generate(
process_path: PathBuf, process_path: PathBuf,
process_args: Vec<String>, process_args: Vec<String>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let job = serde_json::to_string(&Process::Generate { let job = serde_json::to_value(&Process::Generate {
target_format, target_format,
source: Serde::new(source), source: Serde::new(source),
process_path, process_path,
process_args, process_args,
}) })
.map_err(UploadError::PushJob)?; .map_err(UploadError::PushJob)?;
repo.push(PROCESS_QUEUE, job.into()).await?; repo.push(PROCESS_QUEUE, job).await?;
Ok(()) Ok(())
} }
@ -220,7 +193,7 @@ async fn process_jobs<S, F>(
&'a Arc<dyn FullRepo>, &'a Arc<dyn FullRepo>,
&'a S, &'a S,
&'a Configuration, &'a Configuration,
&'a str, serde_json::Value,
) -> LocalBoxFuture<'a, Result<(), Error>> ) -> LocalBoxFuture<'a, Result<(), Error>>
+ Copy, + Copy,
{ {
@ -284,13 +257,13 @@ where
&'a Arc<dyn FullRepo>, &'a Arc<dyn FullRepo>,
&'a S, &'a S,
&'a Configuration, &'a Configuration,
&'a str, serde_json::Value,
) -> LocalBoxFuture<'a, Result<(), Error>> ) -> LocalBoxFuture<'a, Result<(), Error>>
+ Copy, + Copy,
{ {
loop { loop {
let fut = async { let fut = async {
let (job_id, string) = repo.pop(queue, worker_id).await?; let (job_id, job) = repo.pop(queue, worker_id).await?;
let span = tracing::info_span!("Running Job"); let span = tracing::info_span!("Running Job");
@ -303,7 +276,7 @@ where
queue, queue,
worker_id, worker_id,
job_id, job_id,
(callback)(repo, store, config, string.as_ref()), (callback)(repo, store, config, job),
) )
}) })
.instrument(span) .instrument(span)
@ -337,7 +310,7 @@ async fn process_image_jobs<S, F>(
&'a S, &'a S,
&'a ProcessMap, &'a ProcessMap,
&'a Configuration, &'a Configuration,
&'a str, serde_json::Value,
) -> LocalBoxFuture<'a, Result<(), Error>> ) -> LocalBoxFuture<'a, Result<(), Error>>
+ Copy, + Copy,
{ {
@ -373,13 +346,13 @@ where
&'a S, &'a S,
&'a ProcessMap, &'a ProcessMap,
&'a Configuration, &'a Configuration,
&'a str, serde_json::Value,
) -> LocalBoxFuture<'a, Result<(), Error>> ) -> LocalBoxFuture<'a, Result<(), Error>>
+ Copy, + Copy,
{ {
loop { loop {
let fut = async { let fut = async {
let (job_id, string) = repo.pop(queue, worker_id).await?; let (job_id, job) = repo.pop(queue, worker_id).await?;
let span = tracing::info_span!("Running Job"); let span = tracing::info_span!("Running Job");
@ -392,7 +365,7 @@ where
queue, queue,
worker_id, worker_id,
job_id, job_id,
(callback)(repo, store, process_map, config, string.as_ref()), (callback)(repo, store, process_map, config, job),
) )
}) })
.instrument(span) .instrument(span)

View file

@ -14,13 +14,13 @@ pub(super) fn perform<'a, S>(
repo: &'a ArcRepo, repo: &'a ArcRepo,
store: &'a S, store: &'a S,
configuration: &'a Configuration, configuration: &'a Configuration,
job: &'a str, job: serde_json::Value,
) -> LocalBoxFuture<'a, Result<(), Error>> ) -> LocalBoxFuture<'a, Result<(), Error>>
where where
S: Store, S: Store,
{ {
Box::pin(async move { Box::pin(async move {
match serde_json::from_str(job) { match serde_json::from_value(job) {
Ok(job) => match job { Ok(job) => match job {
Cleanup::Hash { hash: in_hash } => hash(repo, in_hash).await?, Cleanup::Hash { hash: in_hash } => hash(repo, in_hash).await?,
Cleanup::Identifier { Cleanup::Identifier {

View file

@ -17,13 +17,13 @@ pub(super) fn perform<'a, S>(
store: &'a S, store: &'a S,
process_map: &'a ProcessMap, process_map: &'a ProcessMap,
config: &'a Configuration, config: &'a Configuration,
job: &'a str, job: serde_json::Value,
) -> LocalBoxFuture<'a, Result<(), Error>> ) -> LocalBoxFuture<'a, Result<(), Error>>
where where
S: Store + 'static, S: Store + 'static,
{ {
Box::pin(async move { Box::pin(async move {
match serde_json::from_str(job) { match serde_json::from_value(job) {
Ok(job) => match job { Ok(job) => match job {
Process::Ingest { Process::Ingest {
identifier, identifier,

View file

@ -9,11 +9,15 @@ use std::{fmt::Debug, sync::Arc};
use url::Url; use url::Url;
use uuid::Uuid; use uuid::Uuid;
mod alias;
mod delete_token;
mod hash; mod hash;
mod migrate; mod migrate;
pub(crate) mod postgres; pub(crate) mod postgres;
pub(crate) mod sled; pub(crate) mod sled;
pub(crate) use alias::Alias;
pub(crate) use delete_token::DeleteToken;
pub(crate) use hash::Hash; pub(crate) use hash::Hash;
pub(crate) use migrate::{migrate_04, migrate_repo}; pub(crate) use migrate::{migrate_04, migrate_repo};
@ -31,17 +35,6 @@ enum MaybeUuid {
Name(String), Name(String),
} }
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct Alias {
id: MaybeUuid,
extension: Option<String>,
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub(crate) struct DeleteToken {
id: MaybeUuid,
}
#[derive(Debug)] #[derive(Debug)]
pub(crate) struct HashAlreadyExists; pub(crate) struct HashAlreadyExists;
#[derive(Debug)] #[derive(Debug)]
@ -372,13 +365,13 @@ impl JobId {
#[async_trait::async_trait(?Send)] #[async_trait::async_trait(?Send)]
pub(crate) trait QueueRepo: BaseRepo { pub(crate) trait QueueRepo: BaseRepo {
async fn push(&self, queue: &'static str, job: Arc<str>) -> Result<JobId, RepoError>; async fn push(&self, queue: &'static str, job: serde_json::Value) -> Result<JobId, RepoError>;
async fn pop( async fn pop(
&self, &self,
queue: &'static str, queue: &'static str,
worker_id: Uuid, worker_id: Uuid,
) -> Result<(JobId, Arc<str>), RepoError>; ) -> Result<(JobId, serde_json::Value), RepoError>;
async fn heartbeat( async fn heartbeat(
&self, &self,
@ -400,7 +393,7 @@ impl<T> QueueRepo for Arc<T>
where where
T: QueueRepo, T: QueueRepo,
{ {
async fn push(&self, queue: &'static str, job: Arc<str>) -> Result<JobId, RepoError> { async fn push(&self, queue: &'static str, job: serde_json::Value) -> Result<JobId, RepoError> {
T::push(self, queue, job).await T::push(self, queue, job).await
} }
@ -408,7 +401,7 @@ where
&self, &self,
queue: &'static str, queue: &'static str,
worker_id: Uuid, worker_id: Uuid,
) -> Result<(JobId, Arc<str>), RepoError> { ) -> Result<(JobId, serde_json::Value), RepoError> {
T::pop(self, queue, worker_id).await T::pop(self, queue, worker_id).await
} }
@ -903,106 +896,6 @@ impl MaybeUuid {
} }
} }
fn split_at_dot(s: &str) -> Option<(&str, &str)> {
let index = s.find('.')?;
Some(s.split_at(index))
}
impl Alias {
pub(crate) fn generate(extension: String) -> Self {
Alias {
id: MaybeUuid::Uuid(Uuid::new_v4()),
extension: Some(extension),
}
}
pub(crate) fn from_existing(alias: &str) -> Self {
if let Some((start, end)) = split_at_dot(alias) {
Alias {
id: MaybeUuid::from_str(start),
extension: Some(end.into()),
}
} else {
Alias {
id: MaybeUuid::from_str(alias),
extension: None,
}
}
}
pub(crate) fn extension(&self) -> Option<&str> {
self.extension.as_deref()
}
pub(crate) fn to_bytes(&self) -> Vec<u8> {
let mut v = self.id.as_bytes().to_vec();
if let Some(ext) = self.extension() {
v.extend_from_slice(ext.as_bytes());
}
v
}
pub(crate) fn from_slice(bytes: &[u8]) -> Option<Self> {
if let Ok(s) = std::str::from_utf8(bytes) {
Some(Self::from_existing(s))
} else if bytes.len() >= 16 {
let id = Uuid::from_slice(&bytes[0..16]).expect("Already checked length");
let extension = if bytes.len() > 16 {
Some(String::from_utf8_lossy(&bytes[16..]).to_string())
} else {
None
};
Some(Self {
id: MaybeUuid::Uuid(id),
extension,
})
} else {
None
}
}
}
impl DeleteToken {
pub(crate) fn from_existing(existing: &str) -> Self {
if let Ok(uuid) = Uuid::parse_str(existing) {
DeleteToken {
id: MaybeUuid::Uuid(uuid),
}
} else {
DeleteToken {
id: MaybeUuid::Name(existing.into()),
}
}
}
pub(crate) fn generate() -> Self {
Self {
id: MaybeUuid::Uuid(Uuid::new_v4()),
}
}
pub(crate) fn to_bytes(&self) -> Vec<u8> {
self.id.as_bytes().to_vec()
}
pub(crate) fn from_slice(bytes: &[u8]) -> Option<Self> {
if let Ok(s) = std::str::from_utf8(bytes) {
Some(DeleteToken::from_existing(s))
} else if bytes.len() == 16 {
Some(DeleteToken {
id: MaybeUuid::Uuid(Uuid::from_slice(bytes).ok()?),
})
} else {
None
}
}
}
impl UploadId { impl UploadId {
pub(crate) fn generate() -> Self { pub(crate) fn generate() -> Self {
Self { id: Uuid::new_v4() } Self { id: Uuid::new_v4() }
@ -1036,38 +929,6 @@ impl std::fmt::Display for MaybeUuid {
} }
} }
impl std::str::FromStr for DeleteToken {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(DeleteToken::from_existing(s))
}
}
impl std::fmt::Display for DeleteToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.id)
}
}
impl std::str::FromStr for Alias {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Alias::from_existing(s))
}
}
impl std::fmt::Display for Alias {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(ext) = self.extension() {
write!(f, "{}{ext}", self.id)
} else {
write!(f, "{}", self.id)
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::{Alias, DeleteToken, MaybeUuid, Uuid}; use super::{Alias, DeleteToken, MaybeUuid, Uuid};

121
src/repo/alias.rs Normal file
View file

@ -0,0 +1,121 @@
use diesel::{backend::Backend, sql_types::VarChar, AsExpression, FromSqlRow};
use uuid::Uuid;
use super::MaybeUuid;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, AsExpression, FromSqlRow)]
#[diesel(sql_type = VarChar)]
pub(crate) struct Alias {
id: MaybeUuid,
extension: Option<String>,
}
impl diesel::serialize::ToSql<VarChar, diesel::pg::Pg> for Alias {
fn to_sql<'b>(
&'b self,
out: &mut diesel::serialize::Output<'b, '_, diesel::pg::Pg>,
) -> diesel::serialize::Result {
let s = self.to_string();
<String as diesel::serialize::ToSql<VarChar, diesel::pg::Pg>>::to_sql(
&s,
&mut out.reborrow(),
)
}
}
impl<B> diesel::deserialize::FromSql<VarChar, B> for Alias
where
B: Backend,
String: diesel::deserialize::FromSql<VarChar, B>,
{
fn from_sql(
bytes: <B as diesel::backend::Backend>::RawValue<'_>,
) -> diesel::deserialize::Result<Self> {
let s = String::from_sql(bytes)?;
s.parse().map_err(From::from)
}
}
impl Alias {
pub(crate) fn generate(extension: String) -> Self {
Alias {
id: MaybeUuid::Uuid(Uuid::new_v4()),
extension: Some(extension),
}
}
pub(crate) fn from_existing(alias: &str) -> Self {
if let Some((start, end)) = split_at_dot(alias) {
Alias {
id: MaybeUuid::from_str(start),
extension: Some(end.into()),
}
} else {
Alias {
id: MaybeUuid::from_str(alias),
extension: None,
}
}
}
pub(crate) fn extension(&self) -> Option<&str> {
self.extension.as_deref()
}
pub(crate) fn to_bytes(&self) -> Vec<u8> {
let mut v = self.id.as_bytes().to_vec();
if let Some(ext) = self.extension() {
v.extend_from_slice(ext.as_bytes());
}
v
}
pub(crate) fn from_slice(bytes: &[u8]) -> Option<Self> {
if let Ok(s) = std::str::from_utf8(bytes) {
Some(Self::from_existing(s))
} else if bytes.len() >= 16 {
let id = Uuid::from_slice(&bytes[0..16]).expect("Already checked length");
let extension = if bytes.len() > 16 {
Some(String::from_utf8_lossy(&bytes[16..]).to_string())
} else {
None
};
Some(Self {
id: MaybeUuid::Uuid(id),
extension,
})
} else {
None
}
}
}
fn split_at_dot(s: &str) -> Option<(&str, &str)> {
let index = s.find('.')?;
Some(s.split_at(index))
}
impl std::str::FromStr for Alias {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Alias::from_existing(s))
}
}
impl std::fmt::Display for Alias {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(ext) = self.extension() {
write!(f, "{}{ext}", self.id)
} else {
write!(f, "{}", self.id)
}
}
}

88
src/repo/delete_token.rs Normal file
View file

@ -0,0 +1,88 @@
use diesel::{backend::Backend, sql_types::VarChar, AsExpression, FromSqlRow};
use uuid::Uuid;
use super::MaybeUuid;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, AsExpression, FromSqlRow)]
#[diesel(sql_type = VarChar)]
pub(crate) struct DeleteToken {
id: MaybeUuid,
}
impl diesel::serialize::ToSql<VarChar, diesel::pg::Pg> for DeleteToken {
fn to_sql<'b>(
&'b self,
out: &mut diesel::serialize::Output<'b, '_, diesel::pg::Pg>,
) -> diesel::serialize::Result {
let s = self.to_string();
<String as diesel::serialize::ToSql<VarChar, diesel::pg::Pg>>::to_sql(
&s,
&mut out.reborrow(),
)
}
}
impl<B> diesel::deserialize::FromSql<VarChar, B> for DeleteToken
where
B: Backend,
String: diesel::deserialize::FromSql<VarChar, B>,
{
fn from_sql(
bytes: <B as diesel::backend::Backend>::RawValue<'_>,
) -> diesel::deserialize::Result<Self> {
let s = String::from_sql(bytes)?;
s.parse().map_err(From::from)
}
}
impl DeleteToken {
pub(crate) fn from_existing(existing: &str) -> Self {
if let Ok(uuid) = Uuid::parse_str(existing) {
DeleteToken {
id: MaybeUuid::Uuid(uuid),
}
} else {
DeleteToken {
id: MaybeUuid::Name(existing.into()),
}
}
}
pub(crate) fn generate() -> Self {
Self {
id: MaybeUuid::Uuid(Uuid::new_v4()),
}
}
pub(crate) fn to_bytes(&self) -> Vec<u8> {
self.id.as_bytes().to_vec()
}
pub(crate) fn from_slice(bytes: &[u8]) -> Option<Self> {
if let Ok(s) = std::str::from_utf8(bytes) {
Some(DeleteToken::from_existing(s))
} else if bytes.len() == 16 {
Some(DeleteToken {
id: MaybeUuid::Uuid(Uuid::from_slice(bytes).ok()?),
})
} else {
None
}
}
}
impl std::str::FromStr for DeleteToken {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(DeleteToken::from_existing(s))
}
}
impl std::fmt::Display for DeleteToken {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.id)
}
}

View file

@ -1,8 +1,10 @@
mod embedded; mod embedded;
mod job_status;
mod schema; mod schema;
use std::sync::Arc; use std::sync::Arc;
use dashmap::{DashMap, DashSet};
use diesel::prelude::*; use diesel::prelude::*;
use diesel_async::{ use diesel_async::{
pooled_connection::{ pooled_connection::{
@ -11,20 +13,58 @@ use diesel_async::{
}, },
AsyncConnection, AsyncPgConnection, RunQueryDsl, AsyncConnection, AsyncPgConnection, RunQueryDsl,
}; };
use tokio::sync::Notify;
use tokio_postgres::{AsyncMessage, Notification}; use tokio_postgres::{AsyncMessage, Notification};
use url::Url; use url::Url;
use uuid::Uuid;
use crate::error_code::ErrorCode; use crate::{details::Details, error_code::ErrorCode};
use self::job_status::JobStatus;
use super::{ use super::{
BaseRepo, Hash, HashAlreadyExists, HashPage, HashRepo, OrderedHash, RepoError, Alias, AliasAlreadyExists, AliasRepo, BaseRepo, DeleteToken, DetailsRepo, Hash,
VariantAlreadyExists, HashAlreadyExists, HashPage, HashRepo, JobId, OrderedHash, QueueRepo, RepoError, SettingsRepo,
StoreMigrationRepo, UploadId, VariantAlreadyExists,
}; };
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct PostgresRepo { pub(crate) struct PostgresRepo {
inner: Arc<Inner>,
notifications: Arc<actix_rt::task::JoinHandle<()>>,
}
struct Inner {
pool: Pool<AsyncPgConnection>, pool: Pool<AsyncPgConnection>,
notifications: flume::Receiver<Notification>, queue_notifications: DashMap<String, Arc<Notify>>,
completed_uploads: DashSet<UploadId>,
upload_notifier: Notify,
}
async fn delegate_notifications(receiver: flume::Receiver<Notification>, inner: Arc<Inner>) {
while let Ok(notification) = receiver.recv_async().await {
match notification.channel() {
"queue_status_channel" => {
// new job inserted for queue
let queue_name = notification.payload().to_string();
inner
.queue_notifications
.entry(queue_name)
.or_insert_with(|| Arc::new(Notify::new()))
.notify_waiters();
}
channel => {
tracing::info!(
"Unhandled postgres notification: {channel}: {}",
notification.payload()
);
}
}
todo!()
}
tracing::warn!("Notification delegator shutting down");
} }
#[derive(Debug, thiserror::Error)] #[derive(Debug, thiserror::Error)]
@ -46,6 +86,15 @@ pub(crate) enum PostgresError {
#[error("Error in database")] #[error("Error in database")]
Diesel(#[source] diesel::result::Error), Diesel(#[source] diesel::result::Error),
#[error("Error deserializing hex value")]
Hex(#[source] hex::FromHexError),
#[error("Error serializing details")]
SerializeDetails(#[source] serde_json::Error),
#[error("Error deserializing details")]
DeserializeDetails(#[source] serde_json::Error),
} }
impl PostgresError { impl PostgresError {
@ -71,7 +120,7 @@ impl PostgresRepo {
handle.abort(); handle.abort();
let _ = handle.await; let _ = handle.await;
let (tx, notifications) = flume::bounded(10); let (tx, rx) = flume::bounded(10);
let mut config = ManagerConfig::default(); let mut config = ManagerConfig::default();
config.custom_setup = build_handler(tx); config.custom_setup = build_handler(tx);
@ -84,8 +133,17 @@ impl PostgresRepo {
.build() .build()
.map_err(ConnectPostgresError::BuildPool)?; .map_err(ConnectPostgresError::BuildPool)?;
Ok(PostgresRepo { let inner = Arc::new(Inner {
pool, pool,
queue_notifications: DashMap::new(),
completed_uploads: DashSet::new(),
upload_notifier: Notify::new(),
});
let notifications = Arc::new(actix_rt::spawn(delegate_notifications(rx, inner.clone())));
Ok(PostgresRepo {
inner,
notifications, notifications,
}) })
} }
@ -147,7 +205,7 @@ impl HashRepo for PostgresRepo {
async fn size(&self) -> Result<u64, RepoError> { async fn size(&self) -> Result<u64, RepoError> {
use schema::hashes::dsl::*; use schema::hashes::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let count = hashes let count = hashes
.count() .count()
@ -161,7 +219,7 @@ impl HashRepo for PostgresRepo {
async fn bound(&self, input_hash: Hash) -> Result<Option<OrderedHash>, RepoError> { async fn bound(&self, input_hash: Hash) -> Result<Option<OrderedHash>, RepoError> {
use schema::hashes::dsl::*; use schema::hashes::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let timestamp = hashes let timestamp = hashes
.select(created_at) .select(created_at)
@ -185,7 +243,7 @@ impl HashRepo for PostgresRepo {
) -> Result<HashPage, RepoError> { ) -> Result<HashPage, RepoError> {
use schema::hashes::dsl::*; use schema::hashes::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let timestamp = to_primitive(date); let timestamp = to_primitive(date);
@ -212,7 +270,7 @@ impl HashRepo for PostgresRepo {
) -> Result<HashPage, RepoError> { ) -> Result<HashPage, RepoError> {
use schema::hashes::dsl::*; use schema::hashes::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let (mut page, prev) = if let Some(OrderedHash { let (mut page, prev) = if let Some(OrderedHash {
timestamp, timestamp,
@ -276,7 +334,7 @@ impl HashRepo for PostgresRepo {
) -> Result<Result<(), HashAlreadyExists>, RepoError> { ) -> Result<Result<(), HashAlreadyExists>, RepoError> {
use schema::hashes::dsl::*; use schema::hashes::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let timestamp = to_primitive(timestamp); let timestamp = to_primitive(timestamp);
@ -306,7 +364,7 @@ impl HashRepo for PostgresRepo {
) -> Result<(), RepoError> { ) -> Result<(), RepoError> {
use schema::hashes::dsl::*; use schema::hashes::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::update(hashes) diesel::update(hashes)
.filter(hash.eq(&input_hash)) .filter(hash.eq(&input_hash))
@ -321,7 +379,7 @@ impl HashRepo for PostgresRepo {
async fn identifier(&self, input_hash: Hash) -> Result<Option<Arc<str>>, RepoError> { async fn identifier(&self, input_hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
use schema::hashes::dsl::*; use schema::hashes::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let opt = hashes let opt = hashes
.select(identifier) .select(identifier)
@ -342,7 +400,7 @@ impl HashRepo for PostgresRepo {
) -> Result<Result<(), VariantAlreadyExists>, RepoError> { ) -> Result<Result<(), VariantAlreadyExists>, RepoError> {
use schema::variants::dsl::*; use schema::variants::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let res = diesel::insert_into(variants) let res = diesel::insert_into(variants)
.values(( .values((
@ -370,7 +428,7 @@ impl HashRepo for PostgresRepo {
) -> Result<Option<Arc<str>>, RepoError> { ) -> Result<Option<Arc<str>>, RepoError> {
use schema::variants::dsl::*; use schema::variants::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let opt = variants let opt = variants
.select(identifier) .select(identifier)
@ -388,7 +446,7 @@ impl HashRepo for PostgresRepo {
async fn variants(&self, input_hash: Hash) -> Result<Vec<(String, Arc<str>)>, RepoError> { async fn variants(&self, input_hash: Hash) -> Result<Vec<(String, Arc<str>)>, RepoError> {
use schema::variants::dsl::*; use schema::variants::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let vec = variants let vec = variants
.select((variant, identifier)) .select((variant, identifier))
@ -410,7 +468,7 @@ impl HashRepo for PostgresRepo {
) -> Result<(), RepoError> { ) -> Result<(), RepoError> {
use schema::variants::dsl::*; use schema::variants::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::delete(variants) diesel::delete(variants)
.filter(hash.eq(&input_hash)) .filter(hash.eq(&input_hash))
@ -429,7 +487,7 @@ impl HashRepo for PostgresRepo {
) -> Result<(), RepoError> { ) -> Result<(), RepoError> {
use schema::hashes::dsl::*; use schema::hashes::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::update(hashes) diesel::update(hashes)
.filter(hash.eq(&input_hash)) .filter(hash.eq(&input_hash))
@ -444,7 +502,7 @@ impl HashRepo for PostgresRepo {
async fn motion_identifier(&self, input_hash: Hash) -> Result<Option<Arc<str>>, RepoError> { async fn motion_identifier(&self, input_hash: Hash) -> Result<Option<Arc<str>>, RepoError> {
use schema::hashes::dsl::*; use schema::hashes::dsl::*;
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let opt = hashes let opt = hashes
.select(motion_identifier) .select(motion_identifier)
@ -460,7 +518,7 @@ impl HashRepo for PostgresRepo {
} }
async fn cleanup_hash(&self, input_hash: Hash) -> Result<(), RepoError> { async fn cleanup_hash(&self, input_hash: Hash) -> Result<(), RepoError> {
let mut conn = self.pool.get().await.map_err(PostgresError::Pool)?; let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
conn.transaction(|conn| { conn.transaction(|conn| {
Box::pin(async move { Box::pin(async move {
@ -482,6 +540,425 @@ impl HashRepo for PostgresRepo {
} }
} }
#[async_trait::async_trait(?Send)]
impl AliasRepo for PostgresRepo {
async fn create_alias(
&self,
input_alias: &Alias,
delete_token: &DeleteToken,
input_hash: Hash,
) -> Result<Result<(), AliasAlreadyExists>, RepoError> {
use schema::aliases::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let res = diesel::insert_into(aliases)
.values((
alias.eq(input_alias),
hash.eq(&input_hash),
token.eq(delete_token),
))
.execute(&mut conn)
.await;
match res {
Ok(_) => Ok(Ok(())),
Err(diesel::result::Error::DatabaseError(
diesel::result::DatabaseErrorKind::UniqueViolation,
_,
)) => Ok(Err(AliasAlreadyExists)),
Err(e) => Err(PostgresError::Diesel(e).into()),
}
}
async fn delete_token(&self, input_alias: &Alias) -> Result<Option<DeleteToken>, RepoError> {
use schema::aliases::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let opt = aliases
.select(token)
.filter(alias.eq(input_alias))
.get_result(&mut conn)
.await
.optional()
.map_err(PostgresError::Diesel)?;
Ok(opt)
}
async fn hash(&self, input_alias: &Alias) -> Result<Option<Hash>, RepoError> {
use schema::aliases::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let opt = aliases
.select(hash)
.filter(alias.eq(input_alias))
.get_result(&mut conn)
.await
.optional()
.map_err(PostgresError::Diesel)?;
Ok(opt)
}
async fn aliases_for_hash(&self, input_hash: Hash) -> Result<Vec<Alias>, RepoError> {
use schema::aliases::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let vec = aliases
.select(alias)
.filter(hash.eq(&input_hash))
.get_results(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(vec)
}
async fn cleanup_alias(&self, input_alias: &Alias) -> Result<(), RepoError> {
use schema::aliases::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::delete(aliases)
.filter(alias.eq(input_alias))
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(())
}
}
#[async_trait::async_trait(?Send)]
impl SettingsRepo for PostgresRepo {
async fn set(&self, input_key: &'static str, input_value: Arc<[u8]>) -> Result<(), RepoError> {
use schema::settings::dsl::*;
let input_value = hex::encode(input_value);
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::insert_into(settings)
.values((key.eq(input_key), value.eq(input_value)))
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(())
}
async fn get(&self, input_key: &'static str) -> Result<Option<Arc<[u8]>>, RepoError> {
use schema::settings::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let opt = settings
.select(value)
.filter(key.eq(input_key))
.get_result::<String>(&mut conn)
.await
.optional()
.map_err(PostgresError::Diesel)?
.map(hex::decode)
.transpose()
.map_err(PostgresError::Hex)?
.map(Arc::from);
Ok(opt)
}
async fn remove(&self, input_key: &'static str) -> Result<(), RepoError> {
use schema::settings::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::delete(settings)
.filter(key.eq(input_key))
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(())
}
}
#[async_trait::async_trait(?Send)]
impl DetailsRepo for PostgresRepo {
async fn relate_details(
&self,
input_identifier: &Arc<str>,
input_details: &Details,
) -> Result<(), RepoError> {
use schema::details::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let value =
serde_json::to_value(&input_details.inner).map_err(PostgresError::SerializeDetails)?;
diesel::insert_into(details)
.values((identifier.eq(input_identifier.as_ref()), json.eq(&value)))
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(())
}
async fn details(&self, input_identifier: &Arc<str>) -> Result<Option<Details>, RepoError> {
use schema::details::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let opt = details
.select(json)
.filter(identifier.eq(input_identifier.as_ref()))
.get_result::<serde_json::Value>(&mut conn)
.await
.optional()
.map_err(PostgresError::Diesel)?
.map(serde_json::from_value)
.transpose()
.map_err(PostgresError::DeserializeDetails)?
.map(|inner| Details { inner });
Ok(opt)
}
async fn cleanup_details(&self, input_identifier: &Arc<str>) -> Result<(), RepoError> {
use schema::details::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::delete(details)
.filter(identifier.eq(input_identifier.as_ref()))
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(())
}
}
#[async_trait::async_trait(?Send)]
impl QueueRepo for PostgresRepo {
async fn push(
&self,
queue_name: &'static str,
job_json: serde_json::Value,
) -> Result<JobId, RepoError> {
use schema::job_queue::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let job_id = diesel::insert_into(job_queue)
.values((queue.eq(queue_name), job.eq(job_json)))
.returning(id)
.get_result::<Uuid>(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(JobId(job_id))
}
async fn pop(
&self,
queue_name: &'static str,
worker_id: Uuid,
) -> Result<(JobId, serde_json::Value), RepoError> {
use schema::job_queue::dsl::*;
loop {
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let notifier: Arc<Notify> = self
.inner
.queue_notifications
.entry(String::from(queue_name))
.or_insert_with(|| Arc::new(Notify::new()))
.clone();
diesel::sql_query("LISTEN queue_status_channel;")
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
let timestamp = to_primitive(time::OffsetDateTime::now_utc());
diesel::update(job_queue)
.filter(heartbeat.le(timestamp.saturating_sub(time::Duration::minutes(2))))
.set((
heartbeat.eq(Option::<time::PrimitiveDateTime>::None),
status.eq(JobStatus::New),
))
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
// TODO: for_update().skip_locked()
let id_query = job_queue
.select(id)
.filter(status.eq(JobStatus::New).and(queue.eq(queue_name)))
.order(queue_time)
.into_boxed()
.single_value();
let opt = diesel::update(job_queue)
.filter(id.nullable().eq(id_query))
.set((
heartbeat.eq(timestamp),
status.eq(JobStatus::Running),
worker.eq(worker_id),
))
.returning((id, job))
.get_result(&mut conn)
.await
.optional()
.map_err(PostgresError::Diesel)?;
if let Some((job_id, job_json)) = opt {
diesel::sql_query("UNLISTEN queue_status_channel;")
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
return Ok((JobId(job_id), job_json));
}
let _ = actix_rt::time::timeout(std::time::Duration::from_secs(5), notifier.notified())
.await;
diesel::sql_query("UNLISTEN queue_status_channel;")
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
drop(conn);
}
}
async fn heartbeat(
&self,
queue_name: &'static str,
worker_id: Uuid,
job_id: JobId,
) -> Result<(), RepoError> {
use schema::job_queue::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let timestamp = to_primitive(time::OffsetDateTime::now_utc());
diesel::update(job_queue)
.filter(
id.eq(job_id.0)
.and(queue.eq(queue_name))
.and(worker.eq(worker_id)),
)
.set(heartbeat.eq(timestamp))
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(())
}
async fn complete_job(
&self,
queue_name: &'static str,
worker_id: Uuid,
job_id: JobId,
) -> Result<(), RepoError> {
use schema::job_queue::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::delete(job_queue)
.filter(
id.eq(job_id.0)
.and(queue.eq(queue_name))
.and(worker.eq(worker_id)),
)
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(())
}
}
#[async_trait::async_trait(?Send)]
impl StoreMigrationRepo for PostgresRepo {
async fn is_continuing_migration(&self) -> Result<bool, RepoError> {
use schema::store_migrations::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let count = store_migrations
.count()
.get_result::<i64>(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(count > 0)
}
async fn mark_migrated(
&self,
input_old_identifier: &Arc<str>,
input_new_identifier: &Arc<str>,
) -> Result<(), RepoError> {
use schema::store_migrations::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::insert_into(store_migrations)
.values((
old_identifier.eq(input_old_identifier.as_ref()),
new_identifier.eq(input_new_identifier.as_ref()),
))
.on_conflict((old_identifier, new_identifier))
.do_nothing()
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(())
}
async fn is_migrated(&self, input_old_identifier: &Arc<str>) -> Result<bool, RepoError> {
use schema::store_migrations::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
let b = diesel::select(diesel::dsl::exists(
store_migrations.filter(old_identifier.eq(input_old_identifier.as_ref())),
))
.get_result(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(b)
}
async fn clear(&self) -> Result<(), RepoError> {
use schema::store_migrations::dsl::*;
let mut conn = self.inner.pool.get().await.map_err(PostgresError::Pool)?;
diesel::delete(store_migrations)
.execute(&mut conn)
.await
.map_err(PostgresError::Diesel)?;
Ok(())
}
}
impl std::fmt::Debug for PostgresRepo { impl std::fmt::Debug for PostgresRepo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("PostgresRepo") f.debug_struct("PostgresRepo")

View file

@ -0,0 +1,6 @@
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, diesel_derive_enum::DbEnum)]
#[ExistingTypePath = "crate::repo::postgres::schema::sql_types::JobStatus"]
pub(super) enum JobStatus {
New,
Running,
}

View file

@ -11,6 +11,7 @@ pub(crate) fn migration() -> String {
t.inject_custom(r#""id" UUID PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL UNIQUE"#); t.inject_custom(r#""id" UUID PRIMARY KEY DEFAULT gen_random_uuid() NOT NULL UNIQUE"#);
t.add_column("queue", types::text().size(50).nullable(false)); t.add_column("queue", types::text().size(50).nullable(false));
t.add_column("job", types::custom("jsonb").nullable(false)); t.add_column("job", types::custom("jsonb").nullable(false));
t.add_column("worker", types::uuid().nullable(true));
t.add_column("status", types::custom("job_status").nullable(false)); t.add_column("status", types::custom("job_status").nullable(false));
t.add_column( t.add_column(
"queue_time", "queue_time",
@ -18,7 +19,7 @@ pub(crate) fn migration() -> String {
.nullable(false) .nullable(false)
.default(AutogenFunction::CurrentTimestamp), .default(AutogenFunction::CurrentTimestamp),
); );
t.add_column("heartbeat", types::datetime()); t.add_column("heartbeat", types::datetime().nullable(true));
t.add_index("queue_status_index", types::index(["queue", "status"])); t.add_index("queue_status_index", types::index(["queue", "status"]));
t.add_index("heartbeat_index", types::index(["heartbeat"])); t.add_index("heartbeat_index", types::index(["heartbeat"]));
@ -30,7 +31,7 @@ CREATE OR REPLACE FUNCTION queue_status_notify()
RETURNS trigger AS RETURNS trigger AS
$$ $$
BEGIN BEGIN
PERFORM pg_notify('queue_status_channel', NEW.id::text); PERFORM pg_notify('queue_status_channel', NEW.queue::text);
RETURN NEW; RETURN NEW;
END; END;
$$ LANGUAGE plpgsql; $$ LANGUAGE plpgsql;

View file

@ -7,9 +7,10 @@ pub(crate) fn migration() -> String {
m.create_table("store_migrations", |t| { m.create_table("store_migrations", |t| {
t.add_column( t.add_column(
"identifier", "old_identifier",
types::text().primary(true).nullable(false).unique(true), types::text().primary(true).nullable(false).unique(true),
); );
t.add_column("new_identifier", types::text().nullable(false).unique(true));
}); });
m.make::<Pg>().to_string() m.make::<Pg>().to_string()

View file

@ -38,9 +38,10 @@ diesel::table! {
id -> Uuid, id -> Uuid,
queue -> Text, queue -> Text,
job -> Jsonb, job -> Jsonb,
worker -> Nullable<Uuid>,
status -> JobStatus, status -> JobStatus,
queue_time -> Timestamp, queue_time -> Timestamp,
heartbeat -> Timestamp, heartbeat -> Nullable<Timestamp>,
} }
} }
@ -72,8 +73,9 @@ diesel::table! {
} }
diesel::table! { diesel::table! {
store_migrations (identifier) { store_migrations (old_identifier) {
identifier -> Text, old_identifier -> Text,
new_identifier -> Text,
} }
} }

View file

@ -46,10 +46,10 @@ pub(crate) enum SledError {
Sled(#[from] sled::Error), Sled(#[from] sled::Error),
#[error("Invalid details json")] #[error("Invalid details json")]
Details(serde_json::Error), Details(#[source] serde_json::Error),
#[error("Invalid upload result json")] #[error("Invalid upload result json")]
UploadResult(serde_json::Error), UploadResult(#[source] serde_json::Error),
#[error("Error parsing variant key")] #[error("Error parsing variant key")]
VariantKey(#[from] VariantKeyError), VariantKey(#[from] VariantKeyError),
@ -57,6 +57,9 @@ pub(crate) enum SledError {
#[error("Invalid string data in db")] #[error("Invalid string data in db")]
Utf8(#[source] std::str::Utf8Error), Utf8(#[source] std::str::Utf8Error),
#[error("Invalid job json")]
Job(#[source] serde_json::Error),
#[error("Operation panicked")] #[error("Operation panicked")]
Panic, Panic,
@ -70,6 +73,7 @@ impl SledError {
Self::Sled(_) | Self::VariantKey(_) | Self::Utf8(_) => ErrorCode::SLED_ERROR, Self::Sled(_) | Self::VariantKey(_) | Self::Utf8(_) => ErrorCode::SLED_ERROR,
Self::Details(_) => ErrorCode::EXTRACT_DETAILS, Self::Details(_) => ErrorCode::EXTRACT_DETAILS,
Self::UploadResult(_) => ErrorCode::EXTRACT_UPLOAD_RESULT, Self::UploadResult(_) => ErrorCode::EXTRACT_UPLOAD_RESULT,
Self::Job(_) => ErrorCode::EXTRACT_JOB,
Self::Panic => ErrorCode::PANIC, Self::Panic => ErrorCode::PANIC,
Self::Conflict => ErrorCode::CONFLICTED_RECORD, Self::Conflict => ErrorCode::CONFLICTED_RECORD,
} }
@ -660,11 +664,16 @@ fn try_into_arc_str(ivec: IVec) -> Result<Arc<str>, SledError> {
#[async_trait::async_trait(?Send)] #[async_trait::async_trait(?Send)]
impl QueueRepo for SledRepo { impl QueueRepo for SledRepo {
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
async fn push(&self, queue_name: &'static str, job: Arc<str>) -> Result<JobId, RepoError> { async fn push(
&self,
queue_name: &'static str,
job: serde_json::Value,
) -> Result<JobId, RepoError> {
let metrics_guard = PushMetricsGuard::guard(queue_name); let metrics_guard = PushMetricsGuard::guard(queue_name);
let id = JobId::gen(); let id = JobId::gen();
let key = job_key(queue_name, id); let key = job_key(queue_name, id);
let job = serde_json::to_vec(&job).map_err(SledError::Job)?;
let queue = self.queue.clone(); let queue = self.queue.clone();
let job_state = self.job_state.clone(); let job_state = self.job_state.clone();
@ -709,7 +718,7 @@ impl QueueRepo for SledRepo {
&self, &self,
queue_name: &'static str, queue_name: &'static str,
worker_id: Uuid, worker_id: Uuid,
) -> Result<(JobId, Arc<str>), RepoError> { ) -> Result<(JobId, serde_json::Value), RepoError> {
let metrics_guard = PopMetricsGuard::guard(queue_name); let metrics_guard = PopMetricsGuard::guard(queue_name);
let now = time::OffsetDateTime::now_utc(); let now = time::OffsetDateTime::now_utc();
@ -762,10 +771,14 @@ impl QueueRepo for SledRepo {
tracing::Span::current().record("job_id", &format!("{job_id:?}")); tracing::Span::current().record("job_id", &format!("{job_id:?}"));
let opt = queue.get(&key)?.map(try_into_arc_str).transpose()?; let opt = queue
.get(&key)?
.map(|ivec| serde_json::from_slice(&ivec[..]))
.transpose()
.map_err(SledError::Job)?;
return Ok(opt.map(|job| (job_id, job))) return Ok(opt.map(|job| (job_id, job)))
as Result<Option<(JobId, Arc<str>)>, SledError>; as Result<Option<(JobId, serde_json::Value)>, SledError>;
} }
Ok(None) Ok(None)