Unit tests and cleanup for outgoing federation code (#4733)

* test setup

* code cleanup

* cleanup

* move stats to own file

* basic test working

* cleanup

* processes test

* more test cases

* fmt

* add file

* add assert

* error handling

* fmt

* use instance id instead of domain for stats channel
This commit is contained in:
Nutomic 2024-05-27 15:34:58 +02:00 committed by GitHub
parent 0d5db29bc9
commit af034f3b5e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
10 changed files with 449 additions and 236 deletions

2
Cargo.lock generated
View file

@ -2986,10 +2986,12 @@ dependencies = [
"lemmy_apub", "lemmy_apub",
"lemmy_db_schema", "lemmy_db_schema",
"lemmy_db_views_actor", "lemmy_db_views_actor",
"lemmy_utils",
"moka", "moka",
"once_cell", "once_cell",
"reqwest 0.11.27", "reqwest 0.11.27",
"serde_json", "serde_json",
"serial_test",
"tokio", "tokio",
"tokio-util", "tokio-util",
"tracing", "tracing",

View file

@ -94,11 +94,15 @@ impl Instance {
.await .await
} }
#[cfg(test)] /// Only for use in tests
pub async fn delete_all(pool: &mut DbPool<'_>) -> Result<usize, Error> { pub async fn delete_all(pool: &mut DbPool<'_>) -> Result<usize, Error> {
let conn = &mut get_conn(pool).await?; let conn = &mut get_conn(pool).await?;
diesel::delete(federation_queue_state::table)
.execute(conn)
.await?;
diesel::delete(instance::table).execute(conn).await diesel::delete(instance::table).execute(conn).await
} }
pub async fn allowlist(pool: &mut DbPool<'_>) -> Result<Vec<Self>, Error> { pub async fn allowlist(pool: &mut DbPool<'_>) -> Result<Vec<Self>, Error> {
let conn = &mut get_conn(pool).await?; let conn = &mut get_conn(pool).await?;
instance::table instance::table

View file

@ -127,11 +127,13 @@ pub struct LanguageId(pub i32);
/// The comment reply id. /// The comment reply id.
pub struct CommentReplyId(i32); pub struct CommentReplyId(i32);
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, Default)] #[derive(
Debug, Copy, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, Default, Ord, PartialOrd,
)]
#[cfg_attr(feature = "full", derive(DieselNewType, TS))] #[cfg_attr(feature = "full", derive(DieselNewType, TS))]
#[cfg_attr(feature = "full", ts(export))] #[cfg_attr(feature = "full", ts(export))]
/// The instance id. /// The instance id.
pub struct InstanceId(i32); pub struct InstanceId(pub i32);
#[derive( #[derive(
Debug, Copy, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, Default, PartialOrd, Ord, Debug, Copy, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, Default, PartialOrd, Ord,

View file

@ -19,6 +19,7 @@ lemmy_api_common.workspace = true
lemmy_apub.workspace = true lemmy_apub.workspace = true
lemmy_db_schema = { workspace = true, features = ["full"] } lemmy_db_schema = { workspace = true, features = ["full"] }
lemmy_db_views_actor.workspace = true lemmy_db_views_actor.workspace = true
lemmy_utils.workspace = true
activitypub_federation.workspace = true activitypub_federation.workspace = true
anyhow.workspace = true anyhow.workspace = true
@ -33,3 +34,6 @@ tokio = { workspace = true, features = ["full"] }
tracing.workspace = true tracing.workspace = true
moka.workspace = true moka.workspace = true
tokio-util = "0.7.11" tokio-util = "0.7.11"
[dev-dependencies]
serial_test = { workspace = true }

View file

@ -1,20 +1,22 @@
use crate::{util::CancellableTask, worker::InstanceWorker}; use crate::{util::CancellableTask, worker::InstanceWorker};
use activitypub_federation::config::FederationConfig; use activitypub_federation::config::FederationConfig;
use chrono::{Local, Timelike}; use lemmy_api_common::context::LemmyContext;
use lemmy_api_common::{context::LemmyContext, federate_retry_sleep_duration};
use lemmy_db_schema::{ use lemmy_db_schema::{
newtypes::InstanceId, newtypes::InstanceId,
source::{federation_queue_state::FederationQueueState, instance::Instance}, source::{federation_queue_state::FederationQueueState, instance::Instance},
utils::{ActualDbPool, DbPool},
}; };
use lemmy_utils::error::LemmyResult;
use stats::receive_print_stats;
use std::{collections::HashMap, time::Duration}; use std::{collections::HashMap, time::Duration};
use tokio::{ use tokio::{
sync::mpsc::{unbounded_channel, UnboundedReceiver}, sync::mpsc::{unbounded_channel, UnboundedSender},
task::JoinHandle,
time::sleep, time::sleep,
}; };
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use tracing::info; use tracing::info;
mod stats;
mod util; mod util;
mod worker; mod worker;
@ -32,34 +34,60 @@ pub struct Opts {
pub process_index: i32, pub process_index: i32,
} }
async fn start_stop_federation_workers( pub struct SendManager {
opts: Opts, opts: Opts,
pool: ActualDbPool, workers: HashMap<InstanceId, CancellableTask>,
federation_config: FederationConfig<LemmyContext>, context: FederationConfig<LemmyContext>,
cancel: CancellationToken, stats_sender: UnboundedSender<(InstanceId, FederationQueueState)>,
) -> anyhow::Result<()> { exit_print: JoinHandle<()>,
let mut workers = HashMap::<InstanceId, CancellableTask>::new(); }
impl SendManager {
pub fn new(opts: Opts, context: FederationConfig<LemmyContext>) -> Self {
assert!(opts.process_count > 0);
assert!(opts.process_index > 0);
assert!(opts.process_index <= opts.process_count);
let (stats_sender, stats_receiver) = unbounded_channel(); let (stats_sender, stats_receiver) = unbounded_channel();
let exit_print = tokio::spawn(receive_print_stats(pool.clone(), stats_receiver)); Self {
let pool2 = &mut DbPool::Pool(&pool); opts,
let process_index = opts.process_index - 1; workers: HashMap::new(),
let local_domain = federation_config.settings().get_hostname_without_port()?; stats_sender,
exit_print: tokio::spawn(receive_print_stats(
context.inner_pool().clone(),
stats_receiver,
)),
context,
}
}
pub fn run(mut self) -> CancellableTask {
CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |cancel| async move {
self.do_loop(cancel).await?;
self.cancel().await?;
Ok(())
})
}
async fn do_loop(&mut self, cancel: CancellationToken) -> LemmyResult<()> {
let process_index = self.opts.process_index - 1;
info!( info!(
"Starting federation workers for process count {} and index {}", "Starting federation workers for process count {} and index {}",
opts.process_count, process_index self.opts.process_count, process_index
); );
let local_domain = self.context.settings().get_hostname_without_port()?;
let mut pool = self.context.pool();
loop { loop {
let mut total_count = 0; let mut total_count = 0;
let mut dead_count = 0; let mut dead_count = 0;
let mut disallowed_count = 0; let mut disallowed_count = 0;
for (instance, allowed, is_dead) in for (instance, allowed, is_dead) in
Instance::read_federated_with_blocked_and_dead(pool2).await? Instance::read_federated_with_blocked_and_dead(&mut pool).await?
{ {
if instance.domain == local_domain { if instance.domain == local_domain {
continue; continue;
} }
if instance.id.inner() % opts.process_count != process_index { if instance.id.inner() % self.opts.process_count != process_index {
continue; continue;
} }
total_count += 1; total_count += 1;
@ -71,136 +99,228 @@ async fn start_stop_federation_workers(
} }
let should_federate = allowed && !is_dead; let should_federate = allowed && !is_dead;
if should_federate { if should_federate {
if workers.contains_key(&instance.id) { if self.workers.contains_key(&instance.id) {
// worker already running // worker already running
continue; continue;
} }
// create new worker // create new worker
let config = federation_config.clone();
let stats_sender = stats_sender.clone();
let pool = pool.clone();
workers.insert(
instance.id,
CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |stop| {
let instance = instance.clone(); let instance = instance.clone();
let req_data = config.clone().to_request_data(); let req_data = self.context.to_request_data();
let stats_sender = stats_sender.clone(); let stats_sender = self.stats_sender.clone();
let pool = pool.clone(); self.workers.insert(
async move { instance.id,
InstanceWorker::init_and_loop( CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |stop| async move {
instance, InstanceWorker::init_and_loop(instance, req_data, stop, stats_sender).await?;
req_data, Ok(())
&mut DbPool::Pool(&pool),
stop,
stats_sender,
)
.await
}
}), }),
); );
} else if !should_federate { } else if !should_federate {
if let Some(worker) = workers.remove(&instance.id) { if let Some(worker) = self.workers.remove(&instance.id) {
if let Err(e) = worker.cancel().await { if let Err(e) = worker.cancel().await {
tracing::error!("error stopping worker: {e}"); tracing::error!("error stopping worker: {e}");
} }
} }
} }
} }
let worker_count = workers.len(); let worker_count = self.workers.len();
tracing::info!("Federating to {worker_count}/{total_count} instances ({dead_count} dead, {disallowed_count} disallowed)"); tracing::info!("Federating to {worker_count}/{total_count} instances ({dead_count} dead, {disallowed_count} disallowed)");
tokio::select! { tokio::select! {
() = sleep(INSTANCES_RECHECK_DELAY) => {}, () = sleep(INSTANCES_RECHECK_DELAY) => {},
_ = cancel.cancelled() => { break; } _ = cancel.cancelled() => { return Ok(()) }
} }
} }
drop(stats_sender); }
pub async fn cancel(self) -> LemmyResult<()> {
drop(self.stats_sender);
tracing::warn!( tracing::warn!(
"Waiting for {} workers ({:.2?} max)", "Waiting for {} workers ({:.2?} max)",
workers.len(), self.workers.len(),
WORKER_EXIT_TIMEOUT WORKER_EXIT_TIMEOUT
); );
// the cancel futures need to be awaited concurrently for the shutdown processes to be triggered // the cancel futures need to be awaited concurrently for the shutdown processes to be triggered
// concurrently // concurrently
futures::future::join_all(workers.into_values().map(util::CancellableTask::cancel)).await; futures::future::join_all(
exit_print.await?; self
.workers
.into_values()
.map(util::CancellableTask::cancel),
)
.await;
self.exit_print.await?;
Ok(()) Ok(())
}
} }
/// starts and stops federation workers depending on which instances are on db #[cfg(test)]
/// await the returned future to stop/cancel all workers gracefully #[allow(clippy::unwrap_used)]
pub fn start_stop_federation_workers_cancellable( #[allow(clippy::indexing_slicing)]
opts: Opts, mod test {
pool: ActualDbPool,
config: FederationConfig<LemmyContext>, use super::*;
) -> CancellableTask { use activitypub_federation::config::Data;
CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |stop| { use chrono::DateTime;
let opts = opts.clone(); use lemmy_db_schema::source::{
let pool = pool.clone(); federation_allowlist::FederationAllowList,
let config = config.clone(); federation_blocklist::FederationBlockList,
async move { start_stop_federation_workers(opts, pool, config, stop).await } instance::InstanceForm,
};
use lemmy_utils::error::LemmyError;
use serial_test::serial;
use std::{
collections::HashSet,
sync::{Arc, Mutex},
};
use tokio::{spawn, time::sleep};
struct TestData {
send_manager: SendManager,
context: Data<LemmyContext>,
instances: Vec<Instance>,
}
impl TestData {
async fn init(process_count: i32, process_index: i32) -> LemmyResult<Self> {
let context = LemmyContext::init_test_context().await;
let opts = Opts {
process_count,
process_index,
};
let federation_config = FederationConfig::builder()
.domain("local.com")
.app_data(context.clone())
.build()
.await?;
let pool = &mut context.pool();
let instances = vec![
Instance::read_or_create(pool, "alpha.com".to_string()).await?,
Instance::read_or_create(pool, "beta.com".to_string()).await?,
Instance::read_or_create(pool, "gamma.com".to_string()).await?,
];
let send_manager = SendManager::new(opts, federation_config);
Ok(Self {
send_manager,
context,
instances,
}) })
} }
/// every 60s, print the state for every instance. exits if the receiver is done (all senders async fn run(&mut self) -> LemmyResult<()> {
/// dropped) // start it and cancel after workers are running
async fn receive_print_stats( let cancel = CancellationToken::new();
pool: ActualDbPool, let cancel_ = cancel.clone();
mut receiver: UnboundedReceiver<(String, FederationQueueState)>, spawn(async move {
) { sleep(Duration::from_millis(100)).await;
let pool = &mut DbPool::Pool(&pool); cancel_.cancel();
let mut printerval = tokio::time::interval(Duration::from_secs(60)); });
printerval.tick().await; // skip first self.send_manager.do_loop(cancel.clone()).await?;
let mut stats = HashMap::new(); Ok(())
loop {
tokio::select! {
ele = receiver.recv() => {
let Some((domain, ele)) = ele else {
print_stats(pool, &stats).await;
return;
};
stats.insert(domain, ele);
},
_ = printerval.tick() => {
print_stats(pool, &stats).await;
} }
}
}
}
async fn print_stats(pool: &mut DbPool<'_>, stats: &HashMap<String, FederationQueueState>) { async fn cleanup(self) -> LemmyResult<()> {
let last_id = crate::util::get_latest_activity_id(pool).await; self.send_manager.cancel().await?;
let Ok(last_id) = last_id else { Instance::delete_all(&mut self.context.pool()).await?;
tracing::error!("could not get last id"); Ok(())
return; }
}
/// Basic test with default params and only active/allowed instances
#[tokio::test]
#[serial]
async fn test_send_manager() -> LemmyResult<()> {
let mut data = TestData::init(1, 1).await?;
data.run().await?;
assert_eq!(3, data.send_manager.workers.len());
let workers: HashSet<_> = data.send_manager.workers.keys().cloned().collect();
let instances: HashSet<_> = data.instances.iter().map(|i| i.id).collect();
assert_eq!(instances, workers);
data.cleanup().await?;
Ok(())
}
/// Running with multiple processes should start correct workers
#[tokio::test]
#[serial]
async fn test_send_manager_processes() -> LemmyResult<()> {
let active = Arc::new(Mutex::new(vec![]));
let execute = |count, index, active: Arc<Mutex<Vec<InstanceId>>>| async move {
let mut data = TestData::init(count, index).await?;
data.run().await?;
assert_eq!(1, data.send_manager.workers.len());
for k in data.send_manager.workers.keys() {
active.lock().unwrap().push(*k);
}
data.cleanup().await?;
Ok::<(), LemmyError>(())
}; };
// it's expected that the values are a bit out of date, everything < SAVE_STATE_EVERY should be execute(3, 1, active.clone()).await?;
// considered up to date execute(3, 2, active.clone()).await?;
tracing::info!( execute(3, 3, active.clone()).await?;
"Federation state as of {}:",
Local::now() // Should run exactly three workers
.with_nanosecond(0) assert_eq!(3, active.lock().unwrap().len());
.expect("0 is valid nanos")
.to_rfc3339() Ok(())
);
// todo: more stats (act/sec, avg http req duration)
let mut ok_count = 0;
let mut behind_count = 0;
for (domain, stat) in stats {
let behind = last_id.0 - stat.last_successful_id.map(|e| e.0).unwrap_or(0);
if stat.fail_count > 0 {
tracing::info!(
"{}: Warning. {} behind, {} consecutive fails, current retry delay {:.2?}",
domain,
behind,
stat.fail_count,
federate_retry_sleep_duration(stat.fail_count)
);
} else if behind > 0 {
tracing::debug!("{}: Ok. {} activities behind", domain, behind);
behind_count += 1;
} else {
ok_count += 1;
} }
/// Use blocklist, should not send to blocked instances
#[tokio::test]
#[serial]
async fn test_send_manager_blocked() -> LemmyResult<()> {
let mut data = TestData::init(1, 1).await?;
let domain = data.instances[0].domain.clone();
FederationBlockList::replace(&mut data.context.pool(), Some(vec![domain])).await?;
data.run().await?;
let workers = &data.send_manager.workers;
assert_eq!(2, workers.len());
assert!(workers.contains_key(&data.instances[1].id));
assert!(workers.contains_key(&data.instances[2].id));
data.cleanup().await?;
Ok(())
}
/// Use allowlist, should only send to allowed instance
#[tokio::test]
#[serial]
async fn test_send_manager_allowed() -> LemmyResult<()> {
let mut data = TestData::init(1, 1).await?;
let domain = data.instances[0].domain.clone();
FederationAllowList::replace(&mut data.context.pool(), Some(vec![domain])).await?;
data.run().await?;
let workers = &data.send_manager.workers;
assert_eq!(1, workers.len());
assert!(workers.contains_key(&data.instances[0].id));
data.cleanup().await?;
Ok(())
}
/// Mark instance as dead, there should be no worker created for it
#[tokio::test]
#[serial]
async fn test_send_manager_dead() -> LemmyResult<()> {
let mut data = TestData::init(1, 1).await?;
let instance = &data.instances[0];
let form = InstanceForm::builder()
.domain(instance.domain.clone())
.updated(DateTime::from_timestamp(0, 0))
.build();
Instance::update(&mut data.context.pool(), instance.id, form).await?;
data.run().await?;
let workers = &data.send_manager.workers;
assert_eq!(2, workers.len());
assert!(workers.contains_key(&data.instances[1].id));
assert!(workers.contains_key(&data.instances[2].id));
data.cleanup().await?;
Ok(())
} }
tracing::info!("{ok_count} others up to date. {behind_count} instances behind.");
} }

View file

@ -0,0 +1,97 @@
use crate::util::get_latest_activity_id;
use chrono::Local;
use diesel::result::Error::NotFound;
use lemmy_api_common::federate_retry_sleep_duration;
use lemmy_db_schema::{
newtypes::InstanceId,
source::{federation_queue_state::FederationQueueState, instance::Instance},
utils::{ActualDbPool, DbPool},
};
use lemmy_utils::{error::LemmyResult, CACHE_DURATION_FEDERATION};
use moka::future::Cache;
use once_cell::sync::Lazy;
use std::{collections::HashMap, time::Duration};
use tokio::{sync::mpsc::UnboundedReceiver, time::interval};
use tracing::{debug, info, warn};
/// every 60s, print the state for every instance. exits if the receiver is done (all senders
/// dropped)
pub(crate) async fn receive_print_stats(
pool: ActualDbPool,
mut receiver: UnboundedReceiver<(InstanceId, FederationQueueState)>,
) {
let pool = &mut DbPool::Pool(&pool);
let mut printerval = interval(Duration::from_secs(60));
let mut stats = HashMap::new();
loop {
tokio::select! {
ele = receiver.recv() => {
match ele {
// update stats for instance
Some((instance_id, ele)) => {stats.insert(instance_id, ele);},
// receiver closed, print stats and exit
None => {
print_stats(pool, &stats).await;
return;
}
}
},
_ = printerval.tick() => {
print_stats(pool, &stats).await;
}
}
}
}
async fn print_stats(pool: &mut DbPool<'_>, stats: &HashMap<InstanceId, FederationQueueState>) {
let res = print_stats_with_error(pool, stats).await;
if let Err(e) = res {
warn!("Failed to print stats: {e}");
}
}
async fn print_stats_with_error(
pool: &mut DbPool<'_>,
stats: &HashMap<InstanceId, FederationQueueState>,
) -> LemmyResult<()> {
static INSTANCE_CACHE: Lazy<Cache<(), Vec<Instance>>> = Lazy::new(|| {
Cache::builder()
.max_capacity(1)
.time_to_live(CACHE_DURATION_FEDERATION)
.build()
});
let instances = INSTANCE_CACHE
.try_get_with((), async { Instance::read_all(pool).await })
.await?;
let last_id = get_latest_activity_id(pool).await?;
// it's expected that the values are a bit out of date, everything < SAVE_STATE_EVERY should be
// considered up to date
info!("Federation state as of {}:", Local::now().to_rfc3339());
// todo: more stats (act/sec, avg http req duration)
let mut ok_count = 0;
let mut behind_count = 0;
for (instance_id, stat) in stats {
let domain = &instances
.iter()
.find(|i| &i.id == instance_id)
.ok_or(NotFound)?
.domain;
let behind = last_id.0 - stat.last_successful_id.map(|e| e.0).unwrap_or(0);
if stat.fail_count > 0 {
info!(
"{domain}: Warning. {behind} behind, {} consecutive fails, current retry delay {:.2?}",
stat.fail_count,
federate_retry_sleep_duration(stat.fail_count)
);
} else if behind > 0 {
debug!("{}: Ok. {} activities behind", domain, behind);
behind_count += 1;
} else {
ok_count += 1;
}
}
info!("{ok_count} others up to date. {behind_count} instances behind.");
Ok(())
}

View file

@ -17,6 +17,7 @@ use lemmy_db_schema::{
traits::ApubActor, traits::ApubActor,
utils::{get_conn, DbPool}, utils::{get_conn, DbPool},
}; };
use lemmy_utils::error::LemmyResult;
use moka::future::Cache; use moka::future::Cache;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
use reqwest::Url; use reqwest::Url;
@ -24,6 +25,7 @@ use serde_json::Value;
use std::{fmt::Debug, future::Future, pin::Pin, sync::Arc, time::Duration}; use std::{fmt::Debug, future::Future, pin::Pin, sync::Arc, time::Duration};
use tokio::{task::JoinHandle, time::sleep}; use tokio::{task::JoinHandle, time::sleep};
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use tracing::error;
/// Decrease the delays of the federation queue. /// Decrease the delays of the federation queue.
/// Should only be used for federation tests since it significantly increases CPU and DB load of the /// Should only be used for federation tests since it significantly increases CPU and DB load of the
@ -59,36 +61,29 @@ impl CancellableTask {
/// spawn a task but with graceful shutdown /// spawn a task but with graceful shutdown
pub fn spawn<F, R: Debug>( pub fn spawn<F, R: Debug>(
timeout: Duration, timeout: Duration,
task: impl Fn(CancellationToken) -> F + Send + 'static, task: impl FnOnce(CancellationToken) -> F + Send + 'static,
) -> CancellableTask ) -> CancellableTask
where where
F: Future<Output = R> + Send + 'static, F: Future<Output = LemmyResult<R>> + Send + 'static,
R: Send + 'static,
{ {
let stop = CancellationToken::new(); let stop = CancellationToken::new();
let stop2 = stop.clone(); let stop2 = stop.clone();
let task: JoinHandle<()> = tokio::spawn(async move { let task: JoinHandle<LemmyResult<R>> = tokio::spawn(task(stop2));
loop {
let res = task(stop2.clone()).await;
if stop2.is_cancelled() {
return;
} else {
tracing::warn!("task exited, restarting: {res:?}");
}
}
});
let abort = task.abort_handle(); let abort = task.abort_handle();
CancellableTask { CancellableTask {
f: Box::pin(async move { f: Box::pin(async move {
stop.cancel(); stop.cancel();
tokio::select! { tokio::select! {
r = task => { r = task => {
r.context("could not join")?; if let Err(ref e) = r? {
error!("CancellableTask threw error: {e}");
}
Ok(()) Ok(())
}, },
_ = sleep(timeout) => { _ = sleep(timeout) => {
abort.abort(); abort.abort();
tracing::warn!("Graceful shutdown timed out, aborting task"); Err(anyhow!("CancellableTask aborted due to shutdown timeout"))
Err(anyhow!("task aborted due to timeout"))
} }
} }
}), }),

View file

@ -22,7 +22,7 @@ use lemmy_db_schema::{
instance::{Instance, InstanceForm}, instance::{Instance, InstanceForm},
site::Site, site::Site,
}, },
utils::{naive_now, DbPool}, utils::naive_now,
}; };
use lemmy_db_views_actor::structs::CommunityFollowerView; use lemmy_db_views_actor::structs::CommunityFollowerView;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
@ -75,7 +75,7 @@ pub(crate) struct InstanceWorker {
followed_communities: HashMap<CommunityId, HashSet<Url>>, followed_communities: HashMap<CommunityId, HashSet<Url>>,
stop: CancellationToken, stop: CancellationToken,
context: Data<LemmyContext>, context: Data<LemmyContext>,
stats_sender: UnboundedSender<(String, FederationQueueState)>, stats_sender: UnboundedSender<(InstanceId, FederationQueueState)>,
last_full_communities_fetch: DateTime<Utc>, last_full_communities_fetch: DateTime<Utc>,
last_incremental_communities_fetch: DateTime<Utc>, last_incremental_communities_fetch: DateTime<Utc>,
state: FederationQueueState, state: FederationQueueState,
@ -86,12 +86,11 @@ impl InstanceWorker {
pub(crate) async fn init_and_loop( pub(crate) async fn init_and_loop(
instance: Instance, instance: Instance,
context: Data<LemmyContext>, context: Data<LemmyContext>,
pool: &mut DbPool<'_>, /* in theory there's a ref to the pool in context, but i couldn't get
* that to work wrt lifetimes */
stop: CancellationToken, stop: CancellationToken,
stats_sender: UnboundedSender<(String, FederationQueueState)>, stats_sender: UnboundedSender<(InstanceId, FederationQueueState)>,
) -> Result<(), anyhow::Error> { ) -> Result<(), anyhow::Error> {
let state = FederationQueueState::load(pool, instance.id).await?; let mut pool = context.pool();
let state = FederationQueueState::load(&mut pool, instance.id).await?;
let mut worker = InstanceWorker { let mut worker = InstanceWorker {
instance, instance,
site_loaded: false, site_loaded: false,
@ -105,32 +104,29 @@ impl InstanceWorker {
state, state,
last_state_insert: Utc.timestamp_nanos(0), last_state_insert: Utc.timestamp_nanos(0),
}; };
worker.loop_until_stopped(pool).await worker.loop_until_stopped().await
} }
/// loop fetch new activities from db and send them to the inboxes of the given instances /// loop fetch new activities from db and send them to the inboxes of the given instances
/// this worker only returns if (a) there is an internal error or (b) the cancellation token is /// this worker only returns if (a) there is an internal error or (b) the cancellation token is
/// cancelled (graceful exit) /// cancelled (graceful exit)
pub(crate) async fn loop_until_stopped( pub(crate) async fn loop_until_stopped(&mut self) -> Result<(), anyhow::Error> {
&mut self,
pool: &mut DbPool<'_>,
) -> Result<(), anyhow::Error> {
debug!("Starting federation worker for {}", self.instance.domain); debug!("Starting federation worker for {}", self.instance.domain);
let save_state_every = chrono::Duration::from_std(SAVE_STATE_EVERY_TIME).expect("not negative"); let save_state_every = chrono::Duration::from_std(SAVE_STATE_EVERY_TIME).expect("not negative");
self.update_communities(pool).await?; self.update_communities().await?;
self.initial_fail_sleep().await?; self.initial_fail_sleep().await?;
while !self.stop.is_cancelled() { while !self.stop.is_cancelled() {
self.loop_batch(pool).await?; self.loop_batch().await?;
if self.stop.is_cancelled() { if self.stop.is_cancelled() {
break; break;
} }
if (Utc::now() - self.last_state_insert) > save_state_every { if (Utc::now() - self.last_state_insert) > save_state_every {
self.save_and_send_state(pool).await?; self.save_and_send_state().await?;
} }
self.update_communities(pool).await?; self.update_communities().await?;
} }
// final update of state in db // final update of state in db
self.save_and_send_state(pool).await?; self.save_and_send_state().await?;
Ok(()) Ok(())
} }
@ -155,8 +151,8 @@ impl InstanceWorker {
Ok(()) Ok(())
} }
/// send out a batch of CHECK_SAVE_STATE_EVERY_IT activities /// send out a batch of CHECK_SAVE_STATE_EVERY_IT activities
async fn loop_batch(&mut self, pool: &mut DbPool<'_>) -> Result<()> { async fn loop_batch(&mut self) -> Result<()> {
let latest_id = get_latest_activity_id(pool).await?; let latest_id = get_latest_activity_id(&mut self.context.pool()).await?;
let mut id = if let Some(id) = self.state.last_successful_id { let mut id = if let Some(id) = self.state.last_successful_id {
id id
} else { } else {
@ -166,7 +162,7 @@ impl InstanceWorker {
// skip all past activities: // skip all past activities:
self.state.last_successful_id = Some(latest_id); self.state.last_successful_id = Some(latest_id);
// save here to ensure it's not read as 0 again later if no activities have happened // save here to ensure it's not read as 0 again later if no activities have happened
self.save_and_send_state(pool).await?; self.save_and_send_state().await?;
latest_id latest_id
}; };
if id >= latest_id { if id >= latest_id {
@ -184,7 +180,7 @@ impl InstanceWorker {
{ {
id = ActivityId(id.0 + 1); id = ActivityId(id.0 + 1);
processed_activities += 1; processed_activities += 1;
let Some(ele) = get_activity_cached(pool, id) let Some(ele) = get_activity_cached(&mut self.context.pool(), id)
.await .await
.context("failed reading activity from db")? .context("failed reading activity from db")?
else { else {
@ -192,7 +188,7 @@ impl InstanceWorker {
self.state.last_successful_id = Some(id); self.state.last_successful_id = Some(id);
continue; continue;
}; };
if let Err(e) = self.send_retry_loop(pool, &ele.0, &ele.1).await { if let Err(e) = self.send_retry_loop(&ele.0, &ele.1).await {
warn!( warn!(
"sending {} errored internally, skipping activity: {:?}", "sending {} errored internally, skipping activity: {:?}",
ele.0.ap_id, e ele.0.ap_id, e
@ -213,12 +209,11 @@ impl InstanceWorker {
// and will return an error if an internal error occurred (send errors cause an infinite loop) // and will return an error if an internal error occurred (send errors cause an infinite loop)
async fn send_retry_loop( async fn send_retry_loop(
&mut self, &mut self,
pool: &mut DbPool<'_>,
activity: &SentActivity, activity: &SentActivity,
object: &SharedInboxActivities, object: &SharedInboxActivities,
) -> Result<()> { ) -> Result<()> {
let inbox_urls = self let inbox_urls = self
.get_inbox_urls(pool, activity) .get_inbox_urls(activity)
.await .await
.context("failed figuring out inbox urls")?; .context("failed figuring out inbox urls")?;
if inbox_urls.is_empty() { if inbox_urls.is_empty() {
@ -230,7 +225,7 @@ impl InstanceWorker {
let Some(actor_apub_id) = &activity.actor_apub_id else { let Some(actor_apub_id) = &activity.actor_apub_id else {
return Ok(()); // activity was inserted before persistent queue was activated return Ok(()); // activity was inserted before persistent queue was activated
}; };
let actor = get_actor_cached(pool, activity.actor_type, actor_apub_id) let actor = get_actor_cached(&mut self.context.pool(), activity.actor_type, actor_apub_id)
.await .await
.context("failed getting actor instance (was it marked deleted / removed?)")?; .context("failed getting actor instance (was it marked deleted / removed?)")?;
@ -249,7 +244,7 @@ impl InstanceWorker {
"{}: retrying {:?} attempt {} with delay {retry_delay:.2?}. ({e})", "{}: retrying {:?} attempt {} with delay {retry_delay:.2?}. ({e})",
self.instance.domain, activity.id, self.state.fail_count self.instance.domain, activity.id, self.state.fail_count
); );
self.save_and_send_state(pool).await?; self.save_and_send_state().await?;
tokio::select! { tokio::select! {
() = sleep(retry_delay) => {}, () = sleep(retry_delay) => {},
() = self.stop.cancelled() => { () = self.stop.cancelled() => {
@ -268,7 +263,7 @@ impl InstanceWorker {
.domain(self.instance.domain.clone()) .domain(self.instance.domain.clone())
.updated(Some(naive_now())) .updated(Some(naive_now()))
.build(); .build();
Instance::update(pool, self.instance.id, form).await?; Instance::update(&mut self.context.pool(), self.instance.id, form).await?;
} }
} }
Ok(()) Ok(())
@ -278,16 +273,12 @@ impl InstanceWorker {
/// most often this will return 0 values (if instance doesn't care about the activity) /// most often this will return 0 values (if instance doesn't care about the activity)
/// or 1 value (the shared inbox) /// or 1 value (the shared inbox)
/// > 1 values only happens for non-lemmy software /// > 1 values only happens for non-lemmy software
async fn get_inbox_urls( async fn get_inbox_urls(&mut self, activity: &SentActivity) -> Result<HashSet<Url>> {
&mut self,
pool: &mut DbPool<'_>,
activity: &SentActivity,
) -> Result<HashSet<Url>> {
let mut inbox_urls: HashSet<Url> = HashSet::new(); let mut inbox_urls: HashSet<Url> = HashSet::new();
if activity.send_all_instances { if activity.send_all_instances {
if !self.site_loaded { if !self.site_loaded {
self.site = Site::read_from_instance_id(pool, self.instance.id).await?; self.site = Site::read_from_instance_id(&mut self.context.pool(), self.instance.id).await?;
self.site_loaded = true; self.site_loaded = true;
} }
if let Some(site) = &self.site { if let Some(site) = &self.site {
@ -312,22 +303,18 @@ impl InstanceWorker {
Ok(inbox_urls) Ok(inbox_urls)
} }
async fn update_communities(&mut self, pool: &mut DbPool<'_>) -> Result<()> { async fn update_communities(&mut self) -> Result<()> {
if (Utc::now() - self.last_full_communities_fetch) > *FOLLOW_REMOVALS_RECHECK_DELAY { if (Utc::now() - self.last_full_communities_fetch) > *FOLLOW_REMOVALS_RECHECK_DELAY {
// process removals every hour // process removals every hour
(self.followed_communities, self.last_full_communities_fetch) = self (self.followed_communities, self.last_full_communities_fetch) = self
.get_communities(pool, self.instance.id, Utc.timestamp_nanos(0)) .get_communities(self.instance.id, Utc.timestamp_nanos(0))
.await?; .await?;
self.last_incremental_communities_fetch = self.last_full_communities_fetch; self.last_incremental_communities_fetch = self.last_full_communities_fetch;
} }
if (Utc::now() - self.last_incremental_communities_fetch) > *FOLLOW_ADDITIONS_RECHECK_DELAY { if (Utc::now() - self.last_incremental_communities_fetch) > *FOLLOW_ADDITIONS_RECHECK_DELAY {
// process additions every minute // process additions every minute
let (news, time) = self let (news, time) = self
.get_communities( .get_communities(self.instance.id, self.last_incremental_communities_fetch)
pool,
self.instance.id,
self.last_incremental_communities_fetch,
)
.await?; .await?;
self.followed_communities.extend(news); self.followed_communities.extend(news);
self.last_incremental_communities_fetch = time; self.last_incremental_communities_fetch = time;
@ -339,7 +326,6 @@ impl InstanceWorker {
/// them /// them
async fn get_communities( async fn get_communities(
&mut self, &mut self,
pool: &mut DbPool<'_>,
instance_id: InstanceId, instance_id: InstanceId,
last_fetch: DateTime<Utc>, last_fetch: DateTime<Utc>,
) -> Result<(HashMap<CommunityId, HashSet<Url>>, DateTime<Utc>)> { ) -> Result<(HashMap<CommunityId, HashSet<Url>>, DateTime<Utc>)> {
@ -347,7 +333,11 @@ impl InstanceWorker {
Utc::now() - chrono::TimeDelta::try_seconds(10).expect("TimeDelta out of bounds"); // update to time before fetch to ensure overlap. subtract 10s to ensure overlap even if Utc::now() - chrono::TimeDelta::try_seconds(10).expect("TimeDelta out of bounds"); // update to time before fetch to ensure overlap. subtract 10s to ensure overlap even if
// published date is not exact // published date is not exact
Ok(( Ok((
CommunityFollowerView::get_instance_followed_community_inboxes(pool, instance_id, last_fetch) CommunityFollowerView::get_instance_followed_community_inboxes(
&mut self.context.pool(),
instance_id,
last_fetch,
)
.await? .await?
.into_iter() .into_iter()
.fold(HashMap::new(), |mut map, (c, u)| { .fold(HashMap::new(), |mut map, (c, u)| {
@ -357,12 +347,12 @@ impl InstanceWorker {
new_last_fetch, new_last_fetch,
)) ))
} }
async fn save_and_send_state(&mut self, pool: &mut DbPool<'_>) -> Result<()> { async fn save_and_send_state(&mut self) -> Result<()> {
self.last_state_insert = Utc::now(); self.last_state_insert = Utc::now();
FederationQueueState::upsert(pool, &self.state).await?; FederationQueueState::upsert(&mut self.context.pool(), &self.state).await?;
self self
.stats_sender .stats_sender
.send((self.instance.domain.clone(), self.state.clone()))?; .send((self.instance.id, self.state.clone()))?;
Ok(()) Ok(())
} }
} }

View file

@ -20,11 +20,10 @@ then
cargo test -p $PACKAGE --all-features --no-fail-fast $TEST cargo test -p $PACKAGE --all-features --no-fail-fast $TEST
else else
cargo test --workspace --no-fail-fast cargo test --workspace --no-fail-fast
# Testing lemmy utils all features in particular (for ts-rs bindings)
cargo test -p lemmy_utils --all-features --no-fail-fast
fi fi
# Testing lemmy utils all features in particular (for ts-rs bindings)
cargo test -p lemmy_utils --all-features --no-fail-fast
# Add this to do printlns: -- --nocapture # Add this to do printlns: -- --nocapture
pg_ctl stop --silent pg_ctl stop --silent

View file

@ -41,7 +41,7 @@ use lemmy_apub::{
FEDERATION_HTTP_FETCH_LIMIT, FEDERATION_HTTP_FETCH_LIMIT,
}; };
use lemmy_db_schema::{source::secret::Secret, utils::build_db_pool}; use lemmy_db_schema::{source::secret::Secret, utils::build_db_pool};
use lemmy_federate::{start_stop_federation_workers_cancellable, Opts}; use lemmy_federate::{Opts, SendManager};
use lemmy_routes::{feeds, images, nodeinfo, webfinger}; use lemmy_routes::{feeds, images, nodeinfo, webfinger};
use lemmy_utils::{ use lemmy_utils::{
error::LemmyResult, error::LemmyResult,
@ -210,14 +210,14 @@ pub async fn start_lemmy_server(args: CmdArgs) -> LemmyResult<()> {
None None
}; };
let federate = (!args.disable_activity_sending).then(|| { let federate = (!args.disable_activity_sending).then(|| {
start_stop_federation_workers_cancellable( let task = SendManager::new(
Opts { Opts {
process_index: args.federate_process_index, process_index: args.federate_process_index,
process_count: args.federate_process_count, process_count: args.federate_process_count,
}, },
pool.clone(), federation_config,
federation_config.clone(), );
) task.run()
}); });
let mut interrupt = tokio::signal::unix::signal(SignalKind::interrupt())?; let mut interrupt = tokio::signal::unix::signal(SignalKind::interrupt())?;
let mut terminate = tokio::signal::unix::signal(SignalKind::terminate())?; let mut terminate = tokio::signal::unix::signal(SignalKind::terminate())?;