mirror of
https://github.com/LemmyNet/lemmy.git
synced 2024-11-30 08:11:20 +00:00
Merge branch 'main' into migration-runner
This commit is contained in:
commit
0b8790ce1d
18 changed files with 533 additions and 295 deletions
28
Cargo.lock
generated
28
Cargo.lock
generated
|
@ -2737,7 +2737,7 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_api"
|
name = "lemmy_api"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"activitypub_federation",
|
"activitypub_federation",
|
||||||
"actix-web",
|
"actix-web",
|
||||||
|
@ -2766,7 +2766,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_api_common"
|
name = "lemmy_api_common"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"activitypub_federation",
|
"activitypub_federation",
|
||||||
"actix-web",
|
"actix-web",
|
||||||
|
@ -2804,7 +2804,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_api_crud"
|
name = "lemmy_api_crud"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"accept-language",
|
"accept-language",
|
||||||
"activitypub_federation",
|
"activitypub_federation",
|
||||||
|
@ -2827,7 +2827,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_apub"
|
name = "lemmy_apub"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"activitypub_federation",
|
"activitypub_federation",
|
||||||
"actix-web",
|
"actix-web",
|
||||||
|
@ -2865,7 +2865,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_db_perf"
|
name = "lemmy_db_perf"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"clap",
|
"clap",
|
||||||
|
@ -2880,7 +2880,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_db_schema"
|
name = "lemmy_db_schema"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"activitypub_federation",
|
"activitypub_federation",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
|
@ -2921,7 +2921,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_db_views"
|
name = "lemmy_db_views"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"actix-web",
|
"actix-web",
|
||||||
"chrono",
|
"chrono",
|
||||||
|
@ -2943,7 +2943,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_db_views_actor"
|
name = "lemmy_db_views_actor"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"chrono",
|
"chrono",
|
||||||
"diesel",
|
"diesel",
|
||||||
|
@ -2963,7 +2963,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_db_views_moderator"
|
name = "lemmy_db_views_moderator"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"diesel",
|
"diesel",
|
||||||
"diesel-async",
|
"diesel-async",
|
||||||
|
@ -2975,7 +2975,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_federate"
|
name = "lemmy_federate"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"activitypub_federation",
|
"activitypub_federation",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
|
@ -2987,10 +2987,12 @@ dependencies = [
|
||||||
"lemmy_apub",
|
"lemmy_apub",
|
||||||
"lemmy_db_schema",
|
"lemmy_db_schema",
|
||||||
"lemmy_db_views_actor",
|
"lemmy_db_views_actor",
|
||||||
|
"lemmy_utils",
|
||||||
"moka",
|
"moka",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"reqwest 0.11.27",
|
"reqwest 0.11.27",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
|
"serial_test",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"tracing",
|
"tracing",
|
||||||
|
@ -2998,7 +3000,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_routes"
|
name = "lemmy_routes"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"activitypub_federation",
|
"activitypub_federation",
|
||||||
"actix-web",
|
"actix-web",
|
||||||
|
@ -3023,7 +3025,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_server"
|
name = "lemmy_server"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"activitypub_federation",
|
"activitypub_federation",
|
||||||
"actix-cors",
|
"actix-cors",
|
||||||
|
@ -3066,7 +3068,7 @@ dependencies = [
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lemmy_utils"
|
name = "lemmy_utils"
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"actix-web",
|
"actix-web",
|
||||||
"anyhow",
|
"anyhow",
|
||||||
|
|
24
Cargo.toml
24
Cargo.toml
|
@ -1,5 +1,5 @@
|
||||||
[workspace.package]
|
[workspace.package]
|
||||||
version = "0.19.4-rc.2"
|
version = "0.19.4-rc.3"
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
description = "A link aggregator for the fediverse"
|
description = "A link aggregator for the fediverse"
|
||||||
license = "AGPL-3.0"
|
license = "AGPL-3.0"
|
||||||
|
@ -88,17 +88,17 @@ unused_self = "deny"
|
||||||
unwrap_used = "deny"
|
unwrap_used = "deny"
|
||||||
|
|
||||||
[workspace.dependencies]
|
[workspace.dependencies]
|
||||||
lemmy_api = { version = "=0.19.4-rc.2", path = "./crates/api" }
|
lemmy_api = { version = "=0.19.4-rc.3", path = "./crates/api" }
|
||||||
lemmy_api_crud = { version = "=0.19.4-rc.2", path = "./crates/api_crud" }
|
lemmy_api_crud = { version = "=0.19.4-rc.3", path = "./crates/api_crud" }
|
||||||
lemmy_apub = { version = "=0.19.4-rc.2", path = "./crates/apub" }
|
lemmy_apub = { version = "=0.19.4-rc.3", path = "./crates/apub" }
|
||||||
lemmy_utils = { version = "=0.19.4-rc.2", path = "./crates/utils", default-features = false }
|
lemmy_utils = { version = "=0.19.4-rc.3", path = "./crates/utils", default-features = false }
|
||||||
lemmy_db_schema = { version = "=0.19.4-rc.2", path = "./crates/db_schema" }
|
lemmy_db_schema = { version = "=0.19.4-rc.3", path = "./crates/db_schema" }
|
||||||
lemmy_api_common = { version = "=0.19.4-rc.2", path = "./crates/api_common" }
|
lemmy_api_common = { version = "=0.19.4-rc.3", path = "./crates/api_common" }
|
||||||
lemmy_routes = { version = "=0.19.4-rc.2", path = "./crates/routes" }
|
lemmy_routes = { version = "=0.19.4-rc.3", path = "./crates/routes" }
|
||||||
lemmy_db_views = { version = "=0.19.4-rc.2", path = "./crates/db_views" }
|
lemmy_db_views = { version = "=0.19.4-rc.3", path = "./crates/db_views" }
|
||||||
lemmy_db_views_actor = { version = "=0.19.4-rc.2", path = "./crates/db_views_actor" }
|
lemmy_db_views_actor = { version = "=0.19.4-rc.3", path = "./crates/db_views_actor" }
|
||||||
lemmy_db_views_moderator = { version = "=0.19.4-rc.2", path = "./crates/db_views_moderator" }
|
lemmy_db_views_moderator = { version = "=0.19.4-rc.3", path = "./crates/db_views_moderator" }
|
||||||
lemmy_federate = { version = "=0.19.4-rc.2", path = "./crates/federate" }
|
lemmy_federate = { version = "=0.19.4-rc.3", path = "./crates/federate" }
|
||||||
activitypub_federation = { version = "0.5.6", default-features = false, features = [
|
activitypub_federation = { version = "0.5.6", default-features = false, features = [
|
||||||
"actix-web",
|
"actix-web",
|
||||||
] }
|
] }
|
||||||
|
|
|
@ -68,7 +68,6 @@ pub async fn like_post(
|
||||||
.with_lemmy_type(LemmyErrorType::CouldntLikePost)?;
|
.with_lemmy_type(LemmyErrorType::CouldntLikePost)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark the post as read
|
|
||||||
mark_post_as_read(person_id, post_id, &mut context.pool()).await?;
|
mark_post_as_read(person_id, post_id, &mut context.pool()).await?;
|
||||||
|
|
||||||
let community = Community::read(&mut context.pool(), post.community_id)
|
let community = Community::read(&mut context.pool(), post.community_id)
|
||||||
|
|
|
@ -38,7 +38,6 @@ pub async fn save_post(
|
||||||
.await?
|
.await?
|
||||||
.ok_or(LemmyErrorType::CouldntFindPost)?;
|
.ok_or(LemmyErrorType::CouldntFindPost)?;
|
||||||
|
|
||||||
// Mark the post as read
|
|
||||||
mark_post_as_read(person_id, post_id, &mut context.pool()).await?;
|
mark_post_as_read(person_id, post_id, &mut context.pool()).await?;
|
||||||
|
|
||||||
Ok(Json(PostResponse { post_view }))
|
Ok(Json(PostResponse { post_view }))
|
||||||
|
|
|
@ -6,6 +6,7 @@ use crate::{
|
||||||
use chrono::{DateTime, Days, Local, TimeZone, Utc};
|
use chrono::{DateTime, Days, Local, TimeZone, Utc};
|
||||||
use enum_map::{enum_map, EnumMap};
|
use enum_map::{enum_map, EnumMap};
|
||||||
use lemmy_db_schema::{
|
use lemmy_db_schema::{
|
||||||
|
aggregates::structs::{PersonPostAggregates, PersonPostAggregatesForm},
|
||||||
newtypes::{CommunityId, DbUrl, InstanceId, PersonId, PostId},
|
newtypes::{CommunityId, DbUrl, InstanceId, PersonId, PostId},
|
||||||
source::{
|
source::{
|
||||||
comment::{Comment, CommentUpdateForm},
|
comment::{Comment, CommentUpdateForm},
|
||||||
|
@ -139,13 +140,7 @@ pub fn is_top_mod(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tracing::instrument(skip_all)]
|
/// Marks a post as read for a given person.
|
||||||
pub async fn get_post(post_id: PostId, pool: &mut DbPool<'_>) -> LemmyResult<Post> {
|
|
||||||
Post::read(pool, post_id)
|
|
||||||
.await?
|
|
||||||
.ok_or(LemmyErrorType::CouldntFindPost.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip_all)]
|
#[tracing::instrument(skip_all)]
|
||||||
pub async fn mark_post_as_read(
|
pub async fn mark_post_as_read(
|
||||||
person_id: PersonId,
|
person_id: PersonId,
|
||||||
|
@ -158,6 +153,28 @@ pub async fn mark_post_as_read(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Updates the read comment count for a post. Usually done when reading or creating a new comment.
|
||||||
|
#[tracing::instrument(skip_all)]
|
||||||
|
pub async fn update_read_comments(
|
||||||
|
person_id: PersonId,
|
||||||
|
post_id: PostId,
|
||||||
|
read_comments: i64,
|
||||||
|
pool: &mut DbPool<'_>,
|
||||||
|
) -> LemmyResult<()> {
|
||||||
|
let person_post_agg_form = PersonPostAggregatesForm {
|
||||||
|
person_id,
|
||||||
|
post_id,
|
||||||
|
read_comments,
|
||||||
|
..PersonPostAggregatesForm::default()
|
||||||
|
};
|
||||||
|
|
||||||
|
PersonPostAggregates::upsert(pool, &person_post_agg_form)
|
||||||
|
.await
|
||||||
|
.with_lemmy_type(LemmyErrorType::CouldntFindPost)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
pub fn check_user_valid(person: &Person) -> LemmyResult<()> {
|
pub fn check_user_valid(person: &Person) -> LemmyResult<()> {
|
||||||
// Check for a site ban
|
// Check for a site ban
|
||||||
if person.banned {
|
if person.banned {
|
||||||
|
|
|
@ -9,11 +9,11 @@ use lemmy_api_common::{
|
||||||
check_community_user_action,
|
check_community_user_action,
|
||||||
check_post_deleted_or_removed,
|
check_post_deleted_or_removed,
|
||||||
generate_local_apub_endpoint,
|
generate_local_apub_endpoint,
|
||||||
get_post,
|
|
||||||
get_url_blocklist,
|
get_url_blocklist,
|
||||||
is_mod_or_admin,
|
is_mod_or_admin,
|
||||||
local_site_to_slur_regex,
|
local_site_to_slur_regex,
|
||||||
process_markdown,
|
process_markdown,
|
||||||
|
update_read_comments,
|
||||||
EndpointType,
|
EndpointType,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
@ -28,7 +28,7 @@ use lemmy_db_schema::{
|
||||||
},
|
},
|
||||||
traits::{Crud, Likeable},
|
traits::{Crud, Likeable},
|
||||||
};
|
};
|
||||||
use lemmy_db_views::structs::LocalUserView;
|
use lemmy_db_views::structs::{LocalUserView, PostView};
|
||||||
use lemmy_utils::{
|
use lemmy_utils::{
|
||||||
error::{LemmyErrorExt, LemmyErrorType, LemmyResult},
|
error::{LemmyErrorExt, LemmyErrorType, LemmyResult},
|
||||||
utils::{mention::scrape_text_for_mentions, validation::is_valid_body_field},
|
utils::{mention::scrape_text_for_mentions, validation::is_valid_body_field},
|
||||||
|
@ -51,8 +51,19 @@ pub async fn create_comment(
|
||||||
|
|
||||||
// Check for a community ban
|
// Check for a community ban
|
||||||
let post_id = data.post_id;
|
let post_id = data.post_id;
|
||||||
let post = get_post(post_id, &mut context.pool()).await?;
|
|
||||||
let community_id = post.community_id;
|
// Read the full post view in order to get the comments count.
|
||||||
|
let post_view = PostView::read(
|
||||||
|
&mut context.pool(),
|
||||||
|
post_id,
|
||||||
|
Some(local_user_view.person.id),
|
||||||
|
true,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
.ok_or(LemmyErrorType::CouldntFindPost)?;
|
||||||
|
|
||||||
|
let post = post_view.post;
|
||||||
|
let community_id = post_view.community.id;
|
||||||
|
|
||||||
check_community_user_action(&local_user_view.person, community_id, &mut context.pool()).await?;
|
check_community_user_action(&local_user_view.person, community_id, &mut context.pool()).await?;
|
||||||
check_post_deleted_or_removed(&post)?;
|
check_post_deleted_or_removed(&post)?;
|
||||||
|
@ -164,6 +175,15 @@ pub async fn create_comment(
|
||||||
)
|
)
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
|
// Update the read comments, so your own new comment doesn't appear as a +1 unread
|
||||||
|
update_read_comments(
|
||||||
|
local_user_view.person.id,
|
||||||
|
post_id,
|
||||||
|
post_view.counts.comments + 1,
|
||||||
|
&mut context.pool(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
// If we're responding to a comment where we're the recipient,
|
// If we're responding to a comment where we're the recipient,
|
||||||
// (ie we're the grandparent, or the recipient of the parent comment_reply),
|
// (ie we're the grandparent, or the recipient of the parent comment_reply),
|
||||||
// then mark the parent as read.
|
// then mark the parent as read.
|
||||||
|
|
|
@ -176,7 +176,6 @@ pub async fn create_post(
|
||||||
.await
|
.await
|
||||||
.with_lemmy_type(LemmyErrorType::CouldntLikePost)?;
|
.with_lemmy_type(LemmyErrorType::CouldntLikePost)?;
|
||||||
|
|
||||||
// Mark the post as read
|
|
||||||
mark_post_as_read(person_id, post_id, &mut context.pool()).await?;
|
mark_post_as_read(person_id, post_id, &mut context.pool()).await?;
|
||||||
|
|
||||||
if let Some(url) = updated_post.url.clone() {
|
if let Some(url) = updated_post.url.clone() {
|
||||||
|
|
|
@ -2,10 +2,9 @@ use actix_web::web::{Data, Json, Query};
|
||||||
use lemmy_api_common::{
|
use lemmy_api_common::{
|
||||||
context::LemmyContext,
|
context::LemmyContext,
|
||||||
post::{GetPost, GetPostResponse},
|
post::{GetPost, GetPostResponse},
|
||||||
utils::{check_private_instance, is_mod_or_admin_opt, mark_post_as_read},
|
utils::{check_private_instance, is_mod_or_admin_opt, mark_post_as_read, update_read_comments},
|
||||||
};
|
};
|
||||||
use lemmy_db_schema::{
|
use lemmy_db_schema::{
|
||||||
aggregates::structs::{PersonPostAggregates, PersonPostAggregatesForm},
|
|
||||||
source::{comment::Comment, post::Post},
|
source::{comment::Comment, post::Post},
|
||||||
traits::Crud,
|
traits::Crud,
|
||||||
};
|
};
|
||||||
|
@ -14,7 +13,7 @@ use lemmy_db_views::{
|
||||||
structs::{LocalUserView, PostView, SiteView},
|
structs::{LocalUserView, PostView, SiteView},
|
||||||
};
|
};
|
||||||
use lemmy_db_views_actor::structs::{CommunityModeratorView, CommunityView};
|
use lemmy_db_views_actor::structs::{CommunityModeratorView, CommunityView};
|
||||||
use lemmy_utils::error::{LemmyErrorExt, LemmyErrorType, LemmyResult};
|
use lemmy_utils::error::{LemmyErrorType, LemmyResult};
|
||||||
|
|
||||||
#[tracing::instrument(skip(context))]
|
#[tracing::instrument(skip(context))]
|
||||||
pub async fn get_post(
|
pub async fn get_post(
|
||||||
|
@ -60,10 +59,17 @@ pub async fn get_post(
|
||||||
.await?
|
.await?
|
||||||
.ok_or(LemmyErrorType::CouldntFindPost)?;
|
.ok_or(LemmyErrorType::CouldntFindPost)?;
|
||||||
|
|
||||||
// Mark the post as read
|
|
||||||
let post_id = post_view.post.id;
|
let post_id = post_view.post.id;
|
||||||
if let Some(person_id) = person_id {
|
if let Some(person_id) = person_id {
|
||||||
mark_post_as_read(person_id, post_id, &mut context.pool()).await?;
|
mark_post_as_read(person_id, post_id, &mut context.pool()).await?;
|
||||||
|
|
||||||
|
update_read_comments(
|
||||||
|
person_id,
|
||||||
|
post_id,
|
||||||
|
post_view.counts.comments,
|
||||||
|
&mut context.pool(),
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Necessary for the sidebar subscribed
|
// Necessary for the sidebar subscribed
|
||||||
|
@ -76,21 +82,6 @@ pub async fn get_post(
|
||||||
.await?
|
.await?
|
||||||
.ok_or(LemmyErrorType::CouldntFindCommunity)?;
|
.ok_or(LemmyErrorType::CouldntFindCommunity)?;
|
||||||
|
|
||||||
// Insert into PersonPostAggregates
|
|
||||||
// to update the read_comments count
|
|
||||||
if let Some(person_id) = person_id {
|
|
||||||
let read_comments = post_view.counts.comments;
|
|
||||||
let person_post_agg_form = PersonPostAggregatesForm {
|
|
||||||
person_id,
|
|
||||||
post_id,
|
|
||||||
read_comments,
|
|
||||||
..PersonPostAggregatesForm::default()
|
|
||||||
};
|
|
||||||
PersonPostAggregates::upsert(&mut context.pool(), &person_post_agg_form)
|
|
||||||
.await
|
|
||||||
.with_lemmy_type(LemmyErrorType::CouldntFindPost)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let moderators = CommunityModeratorView::for_community(&mut context.pool(), community_id).await?;
|
let moderators = CommunityModeratorView::for_community(&mut context.pool(), community_id).await?;
|
||||||
|
|
||||||
// Fetch the cross_posts
|
// Fetch the cross_posts
|
||||||
|
|
|
@ -94,11 +94,15 @@ impl Instance {
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(test)]
|
/// Only for use in tests
|
||||||
pub async fn delete_all(pool: &mut DbPool<'_>) -> Result<usize, Error> {
|
pub async fn delete_all(pool: &mut DbPool<'_>) -> Result<usize, Error> {
|
||||||
let conn = &mut get_conn(pool).await?;
|
let conn = &mut get_conn(pool).await?;
|
||||||
|
diesel::delete(federation_queue_state::table)
|
||||||
|
.execute(conn)
|
||||||
|
.await?;
|
||||||
diesel::delete(instance::table).execute(conn).await
|
diesel::delete(instance::table).execute(conn).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn allowlist(pool: &mut DbPool<'_>) -> Result<Vec<Self>, Error> {
|
pub async fn allowlist(pool: &mut DbPool<'_>) -> Result<Vec<Self>, Error> {
|
||||||
let conn = &mut get_conn(pool).await?;
|
let conn = &mut get_conn(pool).await?;
|
||||||
instance::table
|
instance::table
|
||||||
|
|
|
@ -127,11 +127,13 @@ pub struct LanguageId(pub i32);
|
||||||
/// The comment reply id.
|
/// The comment reply id.
|
||||||
pub struct CommentReplyId(i32);
|
pub struct CommentReplyId(i32);
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, Default)]
|
#[derive(
|
||||||
|
Debug, Copy, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, Default, Ord, PartialOrd,
|
||||||
|
)]
|
||||||
#[cfg_attr(feature = "full", derive(DieselNewType, TS))]
|
#[cfg_attr(feature = "full", derive(DieselNewType, TS))]
|
||||||
#[cfg_attr(feature = "full", ts(export))]
|
#[cfg_attr(feature = "full", ts(export))]
|
||||||
/// The instance id.
|
/// The instance id.
|
||||||
pub struct InstanceId(i32);
|
pub struct InstanceId(pub i32);
|
||||||
|
|
||||||
#[derive(
|
#[derive(
|
||||||
Debug, Copy, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, Default, PartialOrd, Ord,
|
Debug, Copy, Clone, Hash, Eq, PartialEq, Serialize, Deserialize, Default, PartialOrd, Ord,
|
||||||
|
|
|
@ -19,6 +19,7 @@ lemmy_api_common.workspace = true
|
||||||
lemmy_apub.workspace = true
|
lemmy_apub.workspace = true
|
||||||
lemmy_db_schema = { workspace = true, features = ["full"] }
|
lemmy_db_schema = { workspace = true, features = ["full"] }
|
||||||
lemmy_db_views_actor.workspace = true
|
lemmy_db_views_actor.workspace = true
|
||||||
|
lemmy_utils.workspace = true
|
||||||
|
|
||||||
activitypub_federation.workspace = true
|
activitypub_federation.workspace = true
|
||||||
anyhow.workspace = true
|
anyhow.workspace = true
|
||||||
|
@ -33,3 +34,6 @@ tokio = { workspace = true, features = ["full"] }
|
||||||
tracing.workspace = true
|
tracing.workspace = true
|
||||||
moka.workspace = true
|
moka.workspace = true
|
||||||
tokio-util = "0.7.11"
|
tokio-util = "0.7.11"
|
||||||
|
|
||||||
|
[dev-dependencies]
|
||||||
|
serial_test = { workspace = true }
|
||||||
|
|
|
@ -1,20 +1,22 @@
|
||||||
use crate::{util::CancellableTask, worker::InstanceWorker};
|
use crate::{util::CancellableTask, worker::InstanceWorker};
|
||||||
use activitypub_federation::config::FederationConfig;
|
use activitypub_federation::config::FederationConfig;
|
||||||
use chrono::{Local, Timelike};
|
use lemmy_api_common::context::LemmyContext;
|
||||||
use lemmy_api_common::{context::LemmyContext, federate_retry_sleep_duration};
|
|
||||||
use lemmy_db_schema::{
|
use lemmy_db_schema::{
|
||||||
newtypes::InstanceId,
|
newtypes::InstanceId,
|
||||||
source::{federation_queue_state::FederationQueueState, instance::Instance},
|
source::{federation_queue_state::FederationQueueState, instance::Instance},
|
||||||
utils::{ActualDbPool, DbPool},
|
|
||||||
};
|
};
|
||||||
|
use lemmy_utils::error::LemmyResult;
|
||||||
|
use stats::receive_print_stats;
|
||||||
use std::{collections::HashMap, time::Duration};
|
use std::{collections::HashMap, time::Duration};
|
||||||
use tokio::{
|
use tokio::{
|
||||||
sync::mpsc::{unbounded_channel, UnboundedReceiver},
|
sync::mpsc::{unbounded_channel, UnboundedSender},
|
||||||
|
task::JoinHandle,
|
||||||
time::sleep,
|
time::sleep,
|
||||||
};
|
};
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::info;
|
use tracing::info;
|
||||||
|
|
||||||
|
mod stats;
|
||||||
mod util;
|
mod util;
|
||||||
mod worker;
|
mod worker;
|
||||||
|
|
||||||
|
@ -32,175 +34,293 @@ pub struct Opts {
|
||||||
pub process_index: i32,
|
pub process_index: i32,
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn start_stop_federation_workers(
|
pub struct SendManager {
|
||||||
opts: Opts,
|
opts: Opts,
|
||||||
pool: ActualDbPool,
|
workers: HashMap<InstanceId, CancellableTask>,
|
||||||
federation_config: FederationConfig<LemmyContext>,
|
context: FederationConfig<LemmyContext>,
|
||||||
cancel: CancellationToken,
|
stats_sender: UnboundedSender<(InstanceId, FederationQueueState)>,
|
||||||
) -> anyhow::Result<()> {
|
exit_print: JoinHandle<()>,
|
||||||
let mut workers = HashMap::<InstanceId, CancellableTask>::new();
|
}
|
||||||
|
|
||||||
let (stats_sender, stats_receiver) = unbounded_channel();
|
impl SendManager {
|
||||||
let exit_print = tokio::spawn(receive_print_stats(pool.clone(), stats_receiver));
|
pub fn new(opts: Opts, context: FederationConfig<LemmyContext>) -> Self {
|
||||||
let pool2 = &mut DbPool::Pool(&pool);
|
assert!(opts.process_count > 0);
|
||||||
let process_index = opts.process_index - 1;
|
assert!(opts.process_index > 0);
|
||||||
let local_domain = federation_config.settings().get_hostname_without_port()?;
|
assert!(opts.process_index <= opts.process_count);
|
||||||
info!(
|
|
||||||
"Starting federation workers for process count {} and index {}",
|
let (stats_sender, stats_receiver) = unbounded_channel();
|
||||||
opts.process_count, process_index
|
Self {
|
||||||
);
|
opts,
|
||||||
loop {
|
workers: HashMap::new(),
|
||||||
let mut total_count = 0;
|
stats_sender,
|
||||||
let mut dead_count = 0;
|
exit_print: tokio::spawn(receive_print_stats(
|
||||||
let mut disallowed_count = 0;
|
context.inner_pool().clone(),
|
||||||
for (instance, allowed, is_dead) in
|
stats_receiver,
|
||||||
Instance::read_federated_with_blocked_and_dead(pool2).await?
|
)),
|
||||||
{
|
context,
|
||||||
if instance.domain == local_domain {
|
}
|
||||||
continue;
|
}
|
||||||
}
|
|
||||||
if instance.id.inner() % opts.process_count != process_index {
|
pub fn run(mut self) -> CancellableTask {
|
||||||
continue;
|
CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |cancel| async move {
|
||||||
}
|
self.do_loop(cancel).await?;
|
||||||
total_count += 1;
|
self.cancel().await?;
|
||||||
if !allowed {
|
Ok(())
|
||||||
disallowed_count += 1;
|
})
|
||||||
}
|
}
|
||||||
if is_dead {
|
|
||||||
dead_count += 1;
|
async fn do_loop(&mut self, cancel: CancellationToken) -> LemmyResult<()> {
|
||||||
}
|
let process_index = self.opts.process_index - 1;
|
||||||
let should_federate = allowed && !is_dead;
|
info!(
|
||||||
if should_federate {
|
"Starting federation workers for process count {} and index {}",
|
||||||
if workers.contains_key(&instance.id) {
|
self.opts.process_count, process_index
|
||||||
// worker already running
|
);
|
||||||
|
let local_domain = self.context.settings().get_hostname_without_port()?;
|
||||||
|
let mut pool = self.context.pool();
|
||||||
|
loop {
|
||||||
|
let mut total_count = 0;
|
||||||
|
let mut dead_count = 0;
|
||||||
|
let mut disallowed_count = 0;
|
||||||
|
for (instance, allowed, is_dead) in
|
||||||
|
Instance::read_federated_with_blocked_and_dead(&mut pool).await?
|
||||||
|
{
|
||||||
|
if instance.domain == local_domain {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
// create new worker
|
if instance.id.inner() % self.opts.process_count != process_index {
|
||||||
let config = federation_config.clone();
|
continue;
|
||||||
let stats_sender = stats_sender.clone();
|
}
|
||||||
let pool = pool.clone();
|
total_count += 1;
|
||||||
workers.insert(
|
if !allowed {
|
||||||
instance.id,
|
disallowed_count += 1;
|
||||||
CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |stop| {
|
}
|
||||||
let instance = instance.clone();
|
if is_dead {
|
||||||
let req_data = config.clone().to_request_data();
|
dead_count += 1;
|
||||||
let stats_sender = stats_sender.clone();
|
}
|
||||||
let pool = pool.clone();
|
let should_federate = allowed && !is_dead;
|
||||||
async move {
|
if should_federate {
|
||||||
InstanceWorker::init_and_loop(
|
if self.workers.contains_key(&instance.id) {
|
||||||
instance,
|
// worker already running
|
||||||
req_data,
|
continue;
|
||||||
&mut DbPool::Pool(&pool),
|
}
|
||||||
stop,
|
// create new worker
|
||||||
stats_sender,
|
let instance = instance.clone();
|
||||||
)
|
let req_data = self.context.to_request_data();
|
||||||
.await
|
let stats_sender = self.stats_sender.clone();
|
||||||
|
self.workers.insert(
|
||||||
|
instance.id,
|
||||||
|
CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |stop| async move {
|
||||||
|
InstanceWorker::init_and_loop(instance, req_data, stop, stats_sender).await?;
|
||||||
|
Ok(())
|
||||||
|
}),
|
||||||
|
);
|
||||||
|
} else if !should_federate {
|
||||||
|
if let Some(worker) = self.workers.remove(&instance.id) {
|
||||||
|
if let Err(e) = worker.cancel().await {
|
||||||
|
tracing::error!("error stopping worker: {e}");
|
||||||
}
|
}
|
||||||
}),
|
|
||||||
);
|
|
||||||
} else if !should_federate {
|
|
||||||
if let Some(worker) = workers.remove(&instance.id) {
|
|
||||||
if let Err(e) = worker.cancel().await {
|
|
||||||
tracing::error!("error stopping worker: {e}");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
let worker_count = self.workers.len();
|
||||||
let worker_count = workers.len();
|
tracing::info!("Federating to {worker_count}/{total_count} instances ({dead_count} dead, {disallowed_count} disallowed)");
|
||||||
tracing::info!("Federating to {worker_count}/{total_count} instances ({dead_count} dead, {disallowed_count} disallowed)");
|
tokio::select! {
|
||||||
tokio::select! {
|
() = sleep(INSTANCES_RECHECK_DELAY) => {},
|
||||||
() = sleep(INSTANCES_RECHECK_DELAY) => {},
|
_ = cancel.cancelled() => { return Ok(()) }
|
||||||
_ = cancel.cancelled() => { break; }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
drop(stats_sender);
|
|
||||||
tracing::warn!(
|
|
||||||
"Waiting for {} workers ({:.2?} max)",
|
|
||||||
workers.len(),
|
|
||||||
WORKER_EXIT_TIMEOUT
|
|
||||||
);
|
|
||||||
// the cancel futures need to be awaited concurrently for the shutdown processes to be triggered
|
|
||||||
// concurrently
|
|
||||||
futures::future::join_all(workers.into_values().map(util::CancellableTask::cancel)).await;
|
|
||||||
exit_print.await?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// starts and stops federation workers depending on which instances are on db
|
|
||||||
/// await the returned future to stop/cancel all workers gracefully
|
|
||||||
pub fn start_stop_federation_workers_cancellable(
|
|
||||||
opts: Opts,
|
|
||||||
pool: ActualDbPool,
|
|
||||||
config: FederationConfig<LemmyContext>,
|
|
||||||
) -> CancellableTask {
|
|
||||||
CancellableTask::spawn(WORKER_EXIT_TIMEOUT, move |stop| {
|
|
||||||
let opts = opts.clone();
|
|
||||||
let pool = pool.clone();
|
|
||||||
let config = config.clone();
|
|
||||||
async move { start_stop_federation_workers(opts, pool, config, stop).await }
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// every 60s, print the state for every instance. exits if the receiver is done (all senders
|
|
||||||
/// dropped)
|
|
||||||
async fn receive_print_stats(
|
|
||||||
pool: ActualDbPool,
|
|
||||||
mut receiver: UnboundedReceiver<(String, FederationQueueState)>,
|
|
||||||
) {
|
|
||||||
let pool = &mut DbPool::Pool(&pool);
|
|
||||||
let mut printerval = tokio::time::interval(Duration::from_secs(60));
|
|
||||||
printerval.tick().await; // skip first
|
|
||||||
let mut stats = HashMap::new();
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
ele = receiver.recv() => {
|
|
||||||
let Some((domain, ele)) = ele else {
|
|
||||||
print_stats(pool, &stats).await;
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
stats.insert(domain, ele);
|
|
||||||
},
|
|
||||||
_ = printerval.tick() => {
|
|
||||||
print_stats(pool, &stats).await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn cancel(self) -> LemmyResult<()> {
|
||||||
|
drop(self.stats_sender);
|
||||||
|
tracing::warn!(
|
||||||
|
"Waiting for {} workers ({:.2?} max)",
|
||||||
|
self.workers.len(),
|
||||||
|
WORKER_EXIT_TIMEOUT
|
||||||
|
);
|
||||||
|
// the cancel futures need to be awaited concurrently for the shutdown processes to be triggered
|
||||||
|
// concurrently
|
||||||
|
futures::future::join_all(
|
||||||
|
self
|
||||||
|
.workers
|
||||||
|
.into_values()
|
||||||
|
.map(util::CancellableTask::cancel),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
self.exit_print.await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn print_stats(pool: &mut DbPool<'_>, stats: &HashMap<String, FederationQueueState>) {
|
#[cfg(test)]
|
||||||
let last_id = crate::util::get_latest_activity_id(pool).await;
|
#[allow(clippy::unwrap_used)]
|
||||||
let Ok(last_id) = last_id else {
|
#[allow(clippy::indexing_slicing)]
|
||||||
tracing::error!("could not get last id");
|
mod test {
|
||||||
return;
|
|
||||||
|
use super::*;
|
||||||
|
use activitypub_federation::config::Data;
|
||||||
|
use chrono::DateTime;
|
||||||
|
use lemmy_db_schema::source::{
|
||||||
|
federation_allowlist::FederationAllowList,
|
||||||
|
federation_blocklist::FederationBlockList,
|
||||||
|
instance::InstanceForm,
|
||||||
};
|
};
|
||||||
// it's expected that the values are a bit out of date, everything < SAVE_STATE_EVERY should be
|
use lemmy_utils::error::LemmyError;
|
||||||
// considered up to date
|
use serial_test::serial;
|
||||||
tracing::info!(
|
use std::{
|
||||||
"Federation state as of {}:",
|
collections::HashSet,
|
||||||
Local::now()
|
sync::{Arc, Mutex},
|
||||||
.with_nanosecond(0)
|
};
|
||||||
.expect("0 is valid nanos")
|
use tokio::{spawn, time::sleep};
|
||||||
.to_rfc3339()
|
|
||||||
);
|
struct TestData {
|
||||||
// todo: more stats (act/sec, avg http req duration)
|
send_manager: SendManager,
|
||||||
let mut ok_count = 0;
|
context: Data<LemmyContext>,
|
||||||
let mut behind_count = 0;
|
instances: Vec<Instance>,
|
||||||
for (domain, stat) in stats {
|
}
|
||||||
let behind = last_id.0 - stat.last_successful_id.map(|e| e.0).unwrap_or(0);
|
impl TestData {
|
||||||
if stat.fail_count > 0 {
|
async fn init(process_count: i32, process_index: i32) -> LemmyResult<Self> {
|
||||||
tracing::info!(
|
let context = LemmyContext::init_test_context().await;
|
||||||
"{}: Warning. {} behind, {} consecutive fails, current retry delay {:.2?}",
|
let opts = Opts {
|
||||||
domain,
|
process_count,
|
||||||
behind,
|
process_index,
|
||||||
stat.fail_count,
|
};
|
||||||
federate_retry_sleep_duration(stat.fail_count)
|
let federation_config = FederationConfig::builder()
|
||||||
);
|
.domain("local.com")
|
||||||
} else if behind > 0 {
|
.app_data(context.clone())
|
||||||
tracing::debug!("{}: Ok. {} activities behind", domain, behind);
|
.build()
|
||||||
behind_count += 1;
|
.await?;
|
||||||
} else {
|
|
||||||
ok_count += 1;
|
let pool = &mut context.pool();
|
||||||
|
let instances = vec![
|
||||||
|
Instance::read_or_create(pool, "alpha.com".to_string()).await?,
|
||||||
|
Instance::read_or_create(pool, "beta.com".to_string()).await?,
|
||||||
|
Instance::read_or_create(pool, "gamma.com".to_string()).await?,
|
||||||
|
];
|
||||||
|
|
||||||
|
let send_manager = SendManager::new(opts, federation_config);
|
||||||
|
Ok(Self {
|
||||||
|
send_manager,
|
||||||
|
context,
|
||||||
|
instances,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run(&mut self) -> LemmyResult<()> {
|
||||||
|
// start it and cancel after workers are running
|
||||||
|
let cancel = CancellationToken::new();
|
||||||
|
let cancel_ = cancel.clone();
|
||||||
|
spawn(async move {
|
||||||
|
sleep(Duration::from_millis(100)).await;
|
||||||
|
cancel_.cancel();
|
||||||
|
});
|
||||||
|
self.send_manager.do_loop(cancel.clone()).await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn cleanup(self) -> LemmyResult<()> {
|
||||||
|
self.send_manager.cancel().await?;
|
||||||
|
Instance::delete_all(&mut self.context.pool()).await?;
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tracing::info!("{ok_count} others up to date. {behind_count} instances behind.");
|
|
||||||
|
/// Basic test with default params and only active/allowed instances
|
||||||
|
#[tokio::test]
|
||||||
|
#[serial]
|
||||||
|
async fn test_send_manager() -> LemmyResult<()> {
|
||||||
|
let mut data = TestData::init(1, 1).await?;
|
||||||
|
|
||||||
|
data.run().await?;
|
||||||
|
assert_eq!(3, data.send_manager.workers.len());
|
||||||
|
let workers: HashSet<_> = data.send_manager.workers.keys().cloned().collect();
|
||||||
|
let instances: HashSet<_> = data.instances.iter().map(|i| i.id).collect();
|
||||||
|
assert_eq!(instances, workers);
|
||||||
|
|
||||||
|
data.cleanup().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Running with multiple processes should start correct workers
|
||||||
|
#[tokio::test]
|
||||||
|
#[serial]
|
||||||
|
async fn test_send_manager_processes() -> LemmyResult<()> {
|
||||||
|
let active = Arc::new(Mutex::new(vec![]));
|
||||||
|
let execute = |count, index, active: Arc<Mutex<Vec<InstanceId>>>| async move {
|
||||||
|
let mut data = TestData::init(count, index).await?;
|
||||||
|
data.run().await?;
|
||||||
|
assert_eq!(1, data.send_manager.workers.len());
|
||||||
|
for k in data.send_manager.workers.keys() {
|
||||||
|
active.lock().unwrap().push(*k);
|
||||||
|
}
|
||||||
|
data.cleanup().await?;
|
||||||
|
Ok::<(), LemmyError>(())
|
||||||
|
};
|
||||||
|
execute(3, 1, active.clone()).await?;
|
||||||
|
execute(3, 2, active.clone()).await?;
|
||||||
|
execute(3, 3, active.clone()).await?;
|
||||||
|
|
||||||
|
// Should run exactly three workers
|
||||||
|
assert_eq!(3, active.lock().unwrap().len());
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Use blocklist, should not send to blocked instances
|
||||||
|
#[tokio::test]
|
||||||
|
#[serial]
|
||||||
|
async fn test_send_manager_blocked() -> LemmyResult<()> {
|
||||||
|
let mut data = TestData::init(1, 1).await?;
|
||||||
|
|
||||||
|
let domain = data.instances[0].domain.clone();
|
||||||
|
FederationBlockList::replace(&mut data.context.pool(), Some(vec![domain])).await?;
|
||||||
|
data.run().await?;
|
||||||
|
let workers = &data.send_manager.workers;
|
||||||
|
assert_eq!(2, workers.len());
|
||||||
|
assert!(workers.contains_key(&data.instances[1].id));
|
||||||
|
assert!(workers.contains_key(&data.instances[2].id));
|
||||||
|
|
||||||
|
data.cleanup().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Use allowlist, should only send to allowed instance
|
||||||
|
#[tokio::test]
|
||||||
|
#[serial]
|
||||||
|
async fn test_send_manager_allowed() -> LemmyResult<()> {
|
||||||
|
let mut data = TestData::init(1, 1).await?;
|
||||||
|
|
||||||
|
let domain = data.instances[0].domain.clone();
|
||||||
|
FederationAllowList::replace(&mut data.context.pool(), Some(vec![domain])).await?;
|
||||||
|
data.run().await?;
|
||||||
|
let workers = &data.send_manager.workers;
|
||||||
|
assert_eq!(1, workers.len());
|
||||||
|
assert!(workers.contains_key(&data.instances[0].id));
|
||||||
|
|
||||||
|
data.cleanup().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Mark instance as dead, there should be no worker created for it
|
||||||
|
#[tokio::test]
|
||||||
|
#[serial]
|
||||||
|
async fn test_send_manager_dead() -> LemmyResult<()> {
|
||||||
|
let mut data = TestData::init(1, 1).await?;
|
||||||
|
|
||||||
|
let instance = &data.instances[0];
|
||||||
|
let form = InstanceForm::builder()
|
||||||
|
.domain(instance.domain.clone())
|
||||||
|
.updated(DateTime::from_timestamp(0, 0))
|
||||||
|
.build();
|
||||||
|
Instance::update(&mut data.context.pool(), instance.id, form).await?;
|
||||||
|
|
||||||
|
data.run().await?;
|
||||||
|
let workers = &data.send_manager.workers;
|
||||||
|
assert_eq!(2, workers.len());
|
||||||
|
assert!(workers.contains_key(&data.instances[1].id));
|
||||||
|
assert!(workers.contains_key(&data.instances[2].id));
|
||||||
|
|
||||||
|
data.cleanup().await?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
97
crates/federate/src/stats.rs
Normal file
97
crates/federate/src/stats.rs
Normal file
|
@ -0,0 +1,97 @@
|
||||||
|
use crate::util::get_latest_activity_id;
|
||||||
|
use chrono::Local;
|
||||||
|
use diesel::result::Error::NotFound;
|
||||||
|
use lemmy_api_common::federate_retry_sleep_duration;
|
||||||
|
use lemmy_db_schema::{
|
||||||
|
newtypes::InstanceId,
|
||||||
|
source::{federation_queue_state::FederationQueueState, instance::Instance},
|
||||||
|
utils::{ActualDbPool, DbPool},
|
||||||
|
};
|
||||||
|
use lemmy_utils::{error::LemmyResult, CACHE_DURATION_FEDERATION};
|
||||||
|
use moka::future::Cache;
|
||||||
|
use once_cell::sync::Lazy;
|
||||||
|
use std::{collections::HashMap, time::Duration};
|
||||||
|
use tokio::{sync::mpsc::UnboundedReceiver, time::interval};
|
||||||
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
|
/// every 60s, print the state for every instance. exits if the receiver is done (all senders
|
||||||
|
/// dropped)
|
||||||
|
pub(crate) async fn receive_print_stats(
|
||||||
|
pool: ActualDbPool,
|
||||||
|
mut receiver: UnboundedReceiver<(InstanceId, FederationQueueState)>,
|
||||||
|
) {
|
||||||
|
let pool = &mut DbPool::Pool(&pool);
|
||||||
|
let mut printerval = interval(Duration::from_secs(60));
|
||||||
|
let mut stats = HashMap::new();
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
ele = receiver.recv() => {
|
||||||
|
match ele {
|
||||||
|
// update stats for instance
|
||||||
|
Some((instance_id, ele)) => {stats.insert(instance_id, ele);},
|
||||||
|
// receiver closed, print stats and exit
|
||||||
|
None => {
|
||||||
|
print_stats(pool, &stats).await;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
_ = printerval.tick() => {
|
||||||
|
print_stats(pool, &stats).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn print_stats(pool: &mut DbPool<'_>, stats: &HashMap<InstanceId, FederationQueueState>) {
|
||||||
|
let res = print_stats_with_error(pool, stats).await;
|
||||||
|
if let Err(e) = res {
|
||||||
|
warn!("Failed to print stats: {e}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn print_stats_with_error(
|
||||||
|
pool: &mut DbPool<'_>,
|
||||||
|
stats: &HashMap<InstanceId, FederationQueueState>,
|
||||||
|
) -> LemmyResult<()> {
|
||||||
|
static INSTANCE_CACHE: Lazy<Cache<(), Vec<Instance>>> = Lazy::new(|| {
|
||||||
|
Cache::builder()
|
||||||
|
.max_capacity(1)
|
||||||
|
.time_to_live(CACHE_DURATION_FEDERATION)
|
||||||
|
.build()
|
||||||
|
});
|
||||||
|
let instances = INSTANCE_CACHE
|
||||||
|
.try_get_with((), async { Instance::read_all(pool).await })
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let last_id = get_latest_activity_id(pool).await?;
|
||||||
|
|
||||||
|
// it's expected that the values are a bit out of date, everything < SAVE_STATE_EVERY should be
|
||||||
|
// considered up to date
|
||||||
|
info!("Federation state as of {}:", Local::now().to_rfc3339());
|
||||||
|
// todo: more stats (act/sec, avg http req duration)
|
||||||
|
let mut ok_count = 0;
|
||||||
|
let mut behind_count = 0;
|
||||||
|
for (instance_id, stat) in stats {
|
||||||
|
let domain = &instances
|
||||||
|
.iter()
|
||||||
|
.find(|i| &i.id == instance_id)
|
||||||
|
.ok_or(NotFound)?
|
||||||
|
.domain;
|
||||||
|
let behind = last_id.0 - stat.last_successful_id.map(|e| e.0).unwrap_or(0);
|
||||||
|
if stat.fail_count > 0 {
|
||||||
|
info!(
|
||||||
|
"{domain}: Warning. {behind} behind, {} consecutive fails, current retry delay {:.2?}",
|
||||||
|
stat.fail_count,
|
||||||
|
federate_retry_sleep_duration(stat.fail_count)
|
||||||
|
);
|
||||||
|
} else if behind > 0 {
|
||||||
|
debug!("{}: Ok. {} activities behind", domain, behind);
|
||||||
|
behind_count += 1;
|
||||||
|
} else {
|
||||||
|
ok_count += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
info!("{ok_count} others up to date. {behind_count} instances behind.");
|
||||||
|
Ok(())
|
||||||
|
}
|
|
@ -17,6 +17,7 @@ use lemmy_db_schema::{
|
||||||
traits::ApubActor,
|
traits::ApubActor,
|
||||||
utils::{get_conn, DbPool},
|
utils::{get_conn, DbPool},
|
||||||
};
|
};
|
||||||
|
use lemmy_utils::error::LemmyResult;
|
||||||
use moka::future::Cache;
|
use moka::future::Cache;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
use reqwest::Url;
|
use reqwest::Url;
|
||||||
|
@ -24,6 +25,7 @@ use serde_json::Value;
|
||||||
use std::{fmt::Debug, future::Future, pin::Pin, sync::Arc, time::Duration};
|
use std::{fmt::Debug, future::Future, pin::Pin, sync::Arc, time::Duration};
|
||||||
use tokio::{task::JoinHandle, time::sleep};
|
use tokio::{task::JoinHandle, time::sleep};
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
/// Decrease the delays of the federation queue.
|
/// Decrease the delays of the federation queue.
|
||||||
/// Should only be used for federation tests since it significantly increases CPU and DB load of the
|
/// Should only be used for federation tests since it significantly increases CPU and DB load of the
|
||||||
|
@ -59,36 +61,29 @@ impl CancellableTask {
|
||||||
/// spawn a task but with graceful shutdown
|
/// spawn a task but with graceful shutdown
|
||||||
pub fn spawn<F, R: Debug>(
|
pub fn spawn<F, R: Debug>(
|
||||||
timeout: Duration,
|
timeout: Duration,
|
||||||
task: impl Fn(CancellationToken) -> F + Send + 'static,
|
task: impl FnOnce(CancellationToken) -> F + Send + 'static,
|
||||||
) -> CancellableTask
|
) -> CancellableTask
|
||||||
where
|
where
|
||||||
F: Future<Output = R> + Send + 'static,
|
F: Future<Output = LemmyResult<R>> + Send + 'static,
|
||||||
|
R: Send + 'static,
|
||||||
{
|
{
|
||||||
let stop = CancellationToken::new();
|
let stop = CancellationToken::new();
|
||||||
let stop2 = stop.clone();
|
let stop2 = stop.clone();
|
||||||
let task: JoinHandle<()> = tokio::spawn(async move {
|
let task: JoinHandle<LemmyResult<R>> = tokio::spawn(task(stop2));
|
||||||
loop {
|
|
||||||
let res = task(stop2.clone()).await;
|
|
||||||
if stop2.is_cancelled() {
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
tracing::warn!("task exited, restarting: {res:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
let abort = task.abort_handle();
|
let abort = task.abort_handle();
|
||||||
CancellableTask {
|
CancellableTask {
|
||||||
f: Box::pin(async move {
|
f: Box::pin(async move {
|
||||||
stop.cancel();
|
stop.cancel();
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
r = task => {
|
r = task => {
|
||||||
r.context("could not join")?;
|
if let Err(ref e) = r? {
|
||||||
Ok(())
|
error!("CancellableTask threw error: {e}");
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
},
|
},
|
||||||
_ = sleep(timeout) => {
|
_ = sleep(timeout) => {
|
||||||
abort.abort();
|
abort.abort();
|
||||||
tracing::warn!("Graceful shutdown timed out, aborting task");
|
Err(anyhow!("CancellableTask aborted due to shutdown timeout"))
|
||||||
Err(anyhow!("task aborted due to timeout"))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}),
|
}),
|
||||||
|
|
|
@ -22,7 +22,7 @@ use lemmy_db_schema::{
|
||||||
instance::{Instance, InstanceForm},
|
instance::{Instance, InstanceForm},
|
||||||
site::Site,
|
site::Site,
|
||||||
},
|
},
|
||||||
utils::{naive_now, DbPool},
|
utils::naive_now,
|
||||||
};
|
};
|
||||||
use lemmy_db_views_actor::structs::CommunityFollowerView;
|
use lemmy_db_views_actor::structs::CommunityFollowerView;
|
||||||
use once_cell::sync::Lazy;
|
use once_cell::sync::Lazy;
|
||||||
|
@ -75,7 +75,7 @@ pub(crate) struct InstanceWorker {
|
||||||
followed_communities: HashMap<CommunityId, HashSet<Url>>,
|
followed_communities: HashMap<CommunityId, HashSet<Url>>,
|
||||||
stop: CancellationToken,
|
stop: CancellationToken,
|
||||||
context: Data<LemmyContext>,
|
context: Data<LemmyContext>,
|
||||||
stats_sender: UnboundedSender<(String, FederationQueueState)>,
|
stats_sender: UnboundedSender<(InstanceId, FederationQueueState)>,
|
||||||
last_full_communities_fetch: DateTime<Utc>,
|
last_full_communities_fetch: DateTime<Utc>,
|
||||||
last_incremental_communities_fetch: DateTime<Utc>,
|
last_incremental_communities_fetch: DateTime<Utc>,
|
||||||
state: FederationQueueState,
|
state: FederationQueueState,
|
||||||
|
@ -86,12 +86,11 @@ impl InstanceWorker {
|
||||||
pub(crate) async fn init_and_loop(
|
pub(crate) async fn init_and_loop(
|
||||||
instance: Instance,
|
instance: Instance,
|
||||||
context: Data<LemmyContext>,
|
context: Data<LemmyContext>,
|
||||||
pool: &mut DbPool<'_>, /* in theory there's a ref to the pool in context, but i couldn't get
|
|
||||||
* that to work wrt lifetimes */
|
|
||||||
stop: CancellationToken,
|
stop: CancellationToken,
|
||||||
stats_sender: UnboundedSender<(String, FederationQueueState)>,
|
stats_sender: UnboundedSender<(InstanceId, FederationQueueState)>,
|
||||||
) -> Result<(), anyhow::Error> {
|
) -> Result<(), anyhow::Error> {
|
||||||
let state = FederationQueueState::load(pool, instance.id).await?;
|
let mut pool = context.pool();
|
||||||
|
let state = FederationQueueState::load(&mut pool, instance.id).await?;
|
||||||
let mut worker = InstanceWorker {
|
let mut worker = InstanceWorker {
|
||||||
instance,
|
instance,
|
||||||
site_loaded: false,
|
site_loaded: false,
|
||||||
|
@ -105,32 +104,29 @@ impl InstanceWorker {
|
||||||
state,
|
state,
|
||||||
last_state_insert: Utc.timestamp_nanos(0),
|
last_state_insert: Utc.timestamp_nanos(0),
|
||||||
};
|
};
|
||||||
worker.loop_until_stopped(pool).await
|
worker.loop_until_stopped().await
|
||||||
}
|
}
|
||||||
/// loop fetch new activities from db and send them to the inboxes of the given instances
|
/// loop fetch new activities from db and send them to the inboxes of the given instances
|
||||||
/// this worker only returns if (a) there is an internal error or (b) the cancellation token is
|
/// this worker only returns if (a) there is an internal error or (b) the cancellation token is
|
||||||
/// cancelled (graceful exit)
|
/// cancelled (graceful exit)
|
||||||
pub(crate) async fn loop_until_stopped(
|
pub(crate) async fn loop_until_stopped(&mut self) -> Result<(), anyhow::Error> {
|
||||||
&mut self,
|
|
||||||
pool: &mut DbPool<'_>,
|
|
||||||
) -> Result<(), anyhow::Error> {
|
|
||||||
debug!("Starting federation worker for {}", self.instance.domain);
|
debug!("Starting federation worker for {}", self.instance.domain);
|
||||||
let save_state_every = chrono::Duration::from_std(SAVE_STATE_EVERY_TIME).expect("not negative");
|
let save_state_every = chrono::Duration::from_std(SAVE_STATE_EVERY_TIME).expect("not negative");
|
||||||
|
|
||||||
self.update_communities(pool).await?;
|
self.update_communities().await?;
|
||||||
self.initial_fail_sleep().await?;
|
self.initial_fail_sleep().await?;
|
||||||
while !self.stop.is_cancelled() {
|
while !self.stop.is_cancelled() {
|
||||||
self.loop_batch(pool).await?;
|
self.loop_batch().await?;
|
||||||
if self.stop.is_cancelled() {
|
if self.stop.is_cancelled() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (Utc::now() - self.last_state_insert) > save_state_every {
|
if (Utc::now() - self.last_state_insert) > save_state_every {
|
||||||
self.save_and_send_state(pool).await?;
|
self.save_and_send_state().await?;
|
||||||
}
|
}
|
||||||
self.update_communities(pool).await?;
|
self.update_communities().await?;
|
||||||
}
|
}
|
||||||
// final update of state in db
|
// final update of state in db
|
||||||
self.save_and_send_state(pool).await?;
|
self.save_and_send_state().await?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -155,8 +151,8 @@ impl InstanceWorker {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
/// send out a batch of CHECK_SAVE_STATE_EVERY_IT activities
|
/// send out a batch of CHECK_SAVE_STATE_EVERY_IT activities
|
||||||
async fn loop_batch(&mut self, pool: &mut DbPool<'_>) -> Result<()> {
|
async fn loop_batch(&mut self) -> Result<()> {
|
||||||
let latest_id = get_latest_activity_id(pool).await?;
|
let latest_id = get_latest_activity_id(&mut self.context.pool()).await?;
|
||||||
let mut id = if let Some(id) = self.state.last_successful_id {
|
let mut id = if let Some(id) = self.state.last_successful_id {
|
||||||
id
|
id
|
||||||
} else {
|
} else {
|
||||||
|
@ -166,7 +162,7 @@ impl InstanceWorker {
|
||||||
// skip all past activities:
|
// skip all past activities:
|
||||||
self.state.last_successful_id = Some(latest_id);
|
self.state.last_successful_id = Some(latest_id);
|
||||||
// save here to ensure it's not read as 0 again later if no activities have happened
|
// save here to ensure it's not read as 0 again later if no activities have happened
|
||||||
self.save_and_send_state(pool).await?;
|
self.save_and_send_state().await?;
|
||||||
latest_id
|
latest_id
|
||||||
};
|
};
|
||||||
if id >= latest_id {
|
if id >= latest_id {
|
||||||
|
@ -184,7 +180,7 @@ impl InstanceWorker {
|
||||||
{
|
{
|
||||||
id = ActivityId(id.0 + 1);
|
id = ActivityId(id.0 + 1);
|
||||||
processed_activities += 1;
|
processed_activities += 1;
|
||||||
let Some(ele) = get_activity_cached(pool, id)
|
let Some(ele) = get_activity_cached(&mut self.context.pool(), id)
|
||||||
.await
|
.await
|
||||||
.context("failed reading activity from db")?
|
.context("failed reading activity from db")?
|
||||||
else {
|
else {
|
||||||
|
@ -192,7 +188,7 @@ impl InstanceWorker {
|
||||||
self.state.last_successful_id = Some(id);
|
self.state.last_successful_id = Some(id);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
if let Err(e) = self.send_retry_loop(pool, &ele.0, &ele.1).await {
|
if let Err(e) = self.send_retry_loop(&ele.0, &ele.1).await {
|
||||||
warn!(
|
warn!(
|
||||||
"sending {} errored internally, skipping activity: {:?}",
|
"sending {} errored internally, skipping activity: {:?}",
|
||||||
ele.0.ap_id, e
|
ele.0.ap_id, e
|
||||||
|
@ -213,12 +209,11 @@ impl InstanceWorker {
|
||||||
// and will return an error if an internal error occurred (send errors cause an infinite loop)
|
// and will return an error if an internal error occurred (send errors cause an infinite loop)
|
||||||
async fn send_retry_loop(
|
async fn send_retry_loop(
|
||||||
&mut self,
|
&mut self,
|
||||||
pool: &mut DbPool<'_>,
|
|
||||||
activity: &SentActivity,
|
activity: &SentActivity,
|
||||||
object: &SharedInboxActivities,
|
object: &SharedInboxActivities,
|
||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let inbox_urls = self
|
let inbox_urls = self
|
||||||
.get_inbox_urls(pool, activity)
|
.get_inbox_urls(activity)
|
||||||
.await
|
.await
|
||||||
.context("failed figuring out inbox urls")?;
|
.context("failed figuring out inbox urls")?;
|
||||||
if inbox_urls.is_empty() {
|
if inbox_urls.is_empty() {
|
||||||
|
@ -230,7 +225,7 @@ impl InstanceWorker {
|
||||||
let Some(actor_apub_id) = &activity.actor_apub_id else {
|
let Some(actor_apub_id) = &activity.actor_apub_id else {
|
||||||
return Ok(()); // activity was inserted before persistent queue was activated
|
return Ok(()); // activity was inserted before persistent queue was activated
|
||||||
};
|
};
|
||||||
let actor = get_actor_cached(pool, activity.actor_type, actor_apub_id)
|
let actor = get_actor_cached(&mut self.context.pool(), activity.actor_type, actor_apub_id)
|
||||||
.await
|
.await
|
||||||
.context("failed getting actor instance (was it marked deleted / removed?)")?;
|
.context("failed getting actor instance (was it marked deleted / removed?)")?;
|
||||||
|
|
||||||
|
@ -249,7 +244,7 @@ impl InstanceWorker {
|
||||||
"{}: retrying {:?} attempt {} with delay {retry_delay:.2?}. ({e})",
|
"{}: retrying {:?} attempt {} with delay {retry_delay:.2?}. ({e})",
|
||||||
self.instance.domain, activity.id, self.state.fail_count
|
self.instance.domain, activity.id, self.state.fail_count
|
||||||
);
|
);
|
||||||
self.save_and_send_state(pool).await?;
|
self.save_and_send_state().await?;
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
() = sleep(retry_delay) => {},
|
() = sleep(retry_delay) => {},
|
||||||
() = self.stop.cancelled() => {
|
() = self.stop.cancelled() => {
|
||||||
|
@ -268,7 +263,7 @@ impl InstanceWorker {
|
||||||
.domain(self.instance.domain.clone())
|
.domain(self.instance.domain.clone())
|
||||||
.updated(Some(naive_now()))
|
.updated(Some(naive_now()))
|
||||||
.build();
|
.build();
|
||||||
Instance::update(pool, self.instance.id, form).await?;
|
Instance::update(&mut self.context.pool(), self.instance.id, form).await?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -278,16 +273,12 @@ impl InstanceWorker {
|
||||||
/// most often this will return 0 values (if instance doesn't care about the activity)
|
/// most often this will return 0 values (if instance doesn't care about the activity)
|
||||||
/// or 1 value (the shared inbox)
|
/// or 1 value (the shared inbox)
|
||||||
/// > 1 values only happens for non-lemmy software
|
/// > 1 values only happens for non-lemmy software
|
||||||
async fn get_inbox_urls(
|
async fn get_inbox_urls(&mut self, activity: &SentActivity) -> Result<HashSet<Url>> {
|
||||||
&mut self,
|
|
||||||
pool: &mut DbPool<'_>,
|
|
||||||
activity: &SentActivity,
|
|
||||||
) -> Result<HashSet<Url>> {
|
|
||||||
let mut inbox_urls: HashSet<Url> = HashSet::new();
|
let mut inbox_urls: HashSet<Url> = HashSet::new();
|
||||||
|
|
||||||
if activity.send_all_instances {
|
if activity.send_all_instances {
|
||||||
if !self.site_loaded {
|
if !self.site_loaded {
|
||||||
self.site = Site::read_from_instance_id(pool, self.instance.id).await?;
|
self.site = Site::read_from_instance_id(&mut self.context.pool(), self.instance.id).await?;
|
||||||
self.site_loaded = true;
|
self.site_loaded = true;
|
||||||
}
|
}
|
||||||
if let Some(site) = &self.site {
|
if let Some(site) = &self.site {
|
||||||
|
@ -312,22 +303,18 @@ impl InstanceWorker {
|
||||||
Ok(inbox_urls)
|
Ok(inbox_urls)
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn update_communities(&mut self, pool: &mut DbPool<'_>) -> Result<()> {
|
async fn update_communities(&mut self) -> Result<()> {
|
||||||
if (Utc::now() - self.last_full_communities_fetch) > *FOLLOW_REMOVALS_RECHECK_DELAY {
|
if (Utc::now() - self.last_full_communities_fetch) > *FOLLOW_REMOVALS_RECHECK_DELAY {
|
||||||
// process removals every hour
|
// process removals every hour
|
||||||
(self.followed_communities, self.last_full_communities_fetch) = self
|
(self.followed_communities, self.last_full_communities_fetch) = self
|
||||||
.get_communities(pool, self.instance.id, Utc.timestamp_nanos(0))
|
.get_communities(self.instance.id, Utc.timestamp_nanos(0))
|
||||||
.await?;
|
.await?;
|
||||||
self.last_incremental_communities_fetch = self.last_full_communities_fetch;
|
self.last_incremental_communities_fetch = self.last_full_communities_fetch;
|
||||||
}
|
}
|
||||||
if (Utc::now() - self.last_incremental_communities_fetch) > *FOLLOW_ADDITIONS_RECHECK_DELAY {
|
if (Utc::now() - self.last_incremental_communities_fetch) > *FOLLOW_ADDITIONS_RECHECK_DELAY {
|
||||||
// process additions every minute
|
// process additions every minute
|
||||||
let (news, time) = self
|
let (news, time) = self
|
||||||
.get_communities(
|
.get_communities(self.instance.id, self.last_incremental_communities_fetch)
|
||||||
pool,
|
|
||||||
self.instance.id,
|
|
||||||
self.last_incremental_communities_fetch,
|
|
||||||
)
|
|
||||||
.await?;
|
.await?;
|
||||||
self.followed_communities.extend(news);
|
self.followed_communities.extend(news);
|
||||||
self.last_incremental_communities_fetch = time;
|
self.last_incremental_communities_fetch = time;
|
||||||
|
@ -339,7 +326,6 @@ impl InstanceWorker {
|
||||||
/// them
|
/// them
|
||||||
async fn get_communities(
|
async fn get_communities(
|
||||||
&mut self,
|
&mut self,
|
||||||
pool: &mut DbPool<'_>,
|
|
||||||
instance_id: InstanceId,
|
instance_id: InstanceId,
|
||||||
last_fetch: DateTime<Utc>,
|
last_fetch: DateTime<Utc>,
|
||||||
) -> Result<(HashMap<CommunityId, HashSet<Url>>, DateTime<Utc>)> {
|
) -> Result<(HashMap<CommunityId, HashSet<Url>>, DateTime<Utc>)> {
|
||||||
|
@ -347,22 +333,26 @@ impl InstanceWorker {
|
||||||
Utc::now() - chrono::TimeDelta::try_seconds(10).expect("TimeDelta out of bounds"); // update to time before fetch to ensure overlap. subtract 10s to ensure overlap even if
|
Utc::now() - chrono::TimeDelta::try_seconds(10).expect("TimeDelta out of bounds"); // update to time before fetch to ensure overlap. subtract 10s to ensure overlap even if
|
||||||
// published date is not exact
|
// published date is not exact
|
||||||
Ok((
|
Ok((
|
||||||
CommunityFollowerView::get_instance_followed_community_inboxes(pool, instance_id, last_fetch)
|
CommunityFollowerView::get_instance_followed_community_inboxes(
|
||||||
.await?
|
&mut self.context.pool(),
|
||||||
.into_iter()
|
instance_id,
|
||||||
.fold(HashMap::new(), |mut map, (c, u)| {
|
last_fetch,
|
||||||
map.entry(c).or_default().insert(u.into());
|
)
|
||||||
map
|
.await?
|
||||||
}),
|
.into_iter()
|
||||||
|
.fold(HashMap::new(), |mut map, (c, u)| {
|
||||||
|
map.entry(c).or_default().insert(u.into());
|
||||||
|
map
|
||||||
|
}),
|
||||||
new_last_fetch,
|
new_last_fetch,
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
async fn save_and_send_state(&mut self, pool: &mut DbPool<'_>) -> Result<()> {
|
async fn save_and_send_state(&mut self) -> Result<()> {
|
||||||
self.last_state_insert = Utc::now();
|
self.last_state_insert = Utc::now();
|
||||||
FederationQueueState::upsert(pool, &self.state).await?;
|
FederationQueueState::upsert(&mut self.context.pool(), &self.state).await?;
|
||||||
self
|
self
|
||||||
.stats_sender
|
.stats_sender
|
||||||
.send((self.instance.domain.clone(), self.state.clone()))?;
|
.send((self.instance.id, self.state.clone()))?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
Subproject commit 4e7c7ad4fcb4d8618d93ec17d72379367aa085b1
|
Subproject commit e9b3b25fa1af7e06c4ffab86624d95da0836ef36
|
|
@ -20,11 +20,10 @@ then
|
||||||
cargo test -p $PACKAGE --all-features --no-fail-fast $TEST
|
cargo test -p $PACKAGE --all-features --no-fail-fast $TEST
|
||||||
else
|
else
|
||||||
cargo test --workspace --no-fail-fast
|
cargo test --workspace --no-fail-fast
|
||||||
|
# Testing lemmy utils all features in particular (for ts-rs bindings)
|
||||||
|
cargo test -p lemmy_utils --all-features --no-fail-fast
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Testing lemmy utils all features in particular (for ts-rs bindings)
|
|
||||||
cargo test -p lemmy_utils --all-features --no-fail-fast
|
|
||||||
|
|
||||||
# Add this to do printlns: -- --nocapture
|
# Add this to do printlns: -- --nocapture
|
||||||
|
|
||||||
pg_ctl stop --silent
|
pg_ctl stop --silent
|
||||||
|
|
10
src/lib.rs
10
src/lib.rs
|
@ -41,7 +41,7 @@ use lemmy_apub::{
|
||||||
FEDERATION_HTTP_FETCH_LIMIT,
|
FEDERATION_HTTP_FETCH_LIMIT,
|
||||||
};
|
};
|
||||||
use lemmy_db_schema::{schema_setup, source::secret::Secret, utils::build_db_pool};
|
use lemmy_db_schema::{schema_setup, source::secret::Secret, utils::build_db_pool};
|
||||||
use lemmy_federate::{start_stop_federation_workers_cancellable, Opts};
|
use lemmy_federate::{Opts, SendManager};
|
||||||
use lemmy_routes::{feeds, images, nodeinfo, webfinger};
|
use lemmy_routes::{feeds, images, nodeinfo, webfinger};
|
||||||
use lemmy_utils::{
|
use lemmy_utils::{
|
||||||
error::LemmyResult,
|
error::LemmyResult,
|
||||||
|
@ -253,14 +253,14 @@ pub async fn start_lemmy_server(args: CmdArgs) -> LemmyResult<()> {
|
||||||
None
|
None
|
||||||
};
|
};
|
||||||
let federate = (!args.disable_activity_sending).then(|| {
|
let federate = (!args.disable_activity_sending).then(|| {
|
||||||
start_stop_federation_workers_cancellable(
|
let task = SendManager::new(
|
||||||
Opts {
|
Opts {
|
||||||
process_index: args.federate_process_index,
|
process_index: args.federate_process_index,
|
||||||
process_count: args.federate_process_count,
|
process_count: args.federate_process_count,
|
||||||
},
|
},
|
||||||
pool.clone(),
|
federation_config,
|
||||||
federation_config.clone(),
|
);
|
||||||
)
|
task.run()
|
||||||
});
|
});
|
||||||
let mut interrupt = tokio::signal::unix::signal(SignalKind::interrupt())?;
|
let mut interrupt = tokio::signal::unix::signal(SignalKind::interrupt())?;
|
||||||
let mut terminate = tokio::signal::unix::signal(SignalKind::terminate())?;
|
let mut terminate = tokio::signal::unix::signal(SignalKind::terminate())?;
|
||||||
|
|
Loading…
Reference in a new issue