Compare commits

..

2 commits

Author SHA1 Message Date
8224b70405 some progress 2022-04-05 14:31:12 +02:00
1bd510fbb8 wip: try to implement parallel crawl 2022-04-05 13:54:39 +02:00
7 changed files with 582 additions and 2999 deletions

2975
Cargo.lock generated

File diff suppressed because it is too large Load diff

View file

@ -5,18 +5,10 @@ authors = ["Felix Ableitner"]
edition = "2018"
[dependencies]
reqwest = { version = "0.11.13", default-features = false, features = ["json", "rustls-tls"] }
serde = { version = "1.0.149", features = ["derive"] }
anyhow = "1.0.66"
tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] }
futures = "0.3.25"
serde_json = "1.0.89"
semver = "1.0.14"
once_cell = "1.16.0"
lemmy_api_common = "=0.16.0"
lemmy_db_schema = "=0.16.0"
async-recursion = "1.0.0"
log = "0.4.17"
derive-new = "0.5.9"
stderrlog = "0.5.4"
structopt = "0.3.26"
reqwest = { version = "0.10.10", default-features = false, features = ["json", "rustls-tls"] }
serde = { version = "1.0.123", features = ["derive"] }
anyhow = "1.0.38"
tokio = { version = "0.2.25", features = ["rt-threaded", "macros"] }
futures = "0.3.13"
serde_json = "1.0.64"
clap = "2.33.3"

View file

@ -1,135 +1,178 @@
use crate::node_info::{NodeInfo, NodeInfoWellKnown};
use crate::CLIENT;
use anyhow::{anyhow, Error};
use async_recursion::async_recursion;
use crate::federated_instances::GetSiteResponse;
use crate::node_info::NodeInfo;
use crate::REQUEST_TIMEOUT;
use anyhow::anyhow;
use anyhow::Error;
use futures::executor::block_on_stream;
use futures::future::join_all;
use lemmy_api_common::site::GetSiteResponse;
use log::debug;
use reqwest::Url;
use semver::Version;
use std::collections::HashSet;
use std::ops::Deref;
use std::sync::Arc;
use tokio::sync::Mutex;
use futures::stream::FuturesUnordered;
use futures::{future, stream, try_join, StreamExt, TryStreamExt};
use reqwest::Client;
use serde::Serialize;
use std::cmp::max;
use std::collections::VecDeque;
use std::future::Future;
use std::sync::{Arc, Mutex};
#[derive(new)]
pub struct CrawlJob {
domain: String,
current_distance: i32,
params: Arc<CrawlParams>,
}
#[derive(new)]
pub struct CrawlParams {
min_lemmy_version: Version,
exclude_domains: Vec<String>,
pub async fn crawl(
start_instances: Vec<String>,
exclude: Vec<String>,
max_depth: i32,
crawled_instances: Arc<Mutex<HashSet<String>>>,
) -> Result<(Vec<InstanceDetails>, i32), Error> {
let exclude = Arc::new(exclude);
let mut pending_instances: VecDeque<CrawlInstanceTask> = start_instances
.iter()
.map(|s| CrawlInstanceTask::new(s.to_string(), 0, exclude.clone()))
.collect();
let mut crawled_instances = Mutex::new(vec![]);
//let mut instance_details = vec![];
//let mut failed_instances = 0;
let stream = Box::pin(
stream::iter(pending_instances)
.then(|task: CrawlInstanceTask| async {
crawled_instances.lock().unwrap().push(task.domain.clone());
crawl_instance(task, max_depth).await.unwrap()
})
.flat_map(|(instance_details, task)| {
let futures = instance_details.linked_instances.iter().map(|i| {
crawled_instances.lock().unwrap().push(i.clone());
crawl_instance(
CrawlInstanceTask::new(i.clone(), task.depth + 1, task.exclude.clone()),
max_depth,
)
});
stream::iter(futures)
}),
);
let crawl_result: Vec<Result<InstanceDetails, Error>> = stream
.buffer_unordered(10)
.map_ok(|(details, _)| details)
.collect()
.await;
todo!()
/*
// Sort by active monthly users descending
crawl_result.sort_by_key(|i| i.users_active_month);
crawl_result.reverse();
Ok((crawl_result, failed_instances))
*/
}
#[derive(Debug)]
pub struct CrawlResult {
async fn crawl_instance(
task: CrawlInstanceTask,
max_depth: i32,
) -> Result<(InstanceDetails, CrawlInstanceTask), Error> {
if task.depth > max_depth || task.exclude.contains(&task.domain) {
return Err(anyhow!("max depth reached"));
}
Ok((fetch_instance_details(&task.domain).await?, task))
}
#[derive(Serialize, Clone)]
pub struct InstanceDetails {
pub domain: String,
pub node_info: NodeInfo,
pub site_info: Option<GetSiteResponse>,
pub name: String,
pub description: Option<String>,
pub version: String,
pub icon: Option<String>,
pub online_users: i32,
pub total_users: i64,
pub users_active_halfyear: i64,
pub users_active_month: i64,
pub open_registrations: bool,
pub linked_instances_count: i32,
pub require_application: bool,
// The following fields are only used for aggregation, but not shown in output
#[serde(skip)]
pub linked_instances: Vec<String>,
}
impl CrawlJob {
#[async_recursion]
pub async fn crawl(self) -> Vec<Result<CrawlResult, Error>> {
// need to acquire and release mutex before recursing, otherwise it will deadlock
{
let mut crawled_instances = self.params.crawled_instances.deref().lock().await;
if crawled_instances.contains(&self.domain) {
return vec![];
} else {
crawled_instances.insert(self.domain.clone());
}
struct CrawlInstanceTask {
domain: String,
depth: i32,
exclude: Arc<Vec<String>>,
}
impl CrawlInstanceTask {
pub fn new(domain: String, depth: i32, exclude: Arc<Vec<String>>) -> CrawlInstanceTask {
CrawlInstanceTask {
domain,
depth,
exclude,
}
}
}
if self.current_distance > self.params.max_depth
|| self.params.exclude_domains.contains(&self.domain)
{
return vec![];
}
async fn fetch_instance_details(domain: &str) -> Result<InstanceDetails, Error> {
let client = Client::default();
debug!(
"Starting crawl for {}, distance {}",
&self.domain, &self.current_distance
);
let (node_info, site_info) = match self.fetch_instance_details().await {
Ok(o) => o,
Err(e) => return vec![Err(e)],
};
let mut crawl_result = CrawlResult {
domain: self.domain.clone(),
node_info,
site_info: None,
};
let node_info_url = format!("https://{}/nodeinfo/2.0.json", domain);
let node_info_request = client.get(&node_info_url).timeout(REQUEST_TIMEOUT).send();
if let Some(site_info) = site_info {
match Version::parse(&site_info.version) {
Ok(version) => {
if version < self.params.min_lemmy_version {
return vec![Ok(crawl_result)];
}
}
Err(e) => return vec![Err(e.into())],
}
let site_info_url_v2 = format!("https://{}/api/v2/site", domain);
let site_info_request_v2 = client
.get(&site_info_url_v2)
.timeout(REQUEST_TIMEOUT)
.send();
let site_info_url_v3 = format!("https://{}/api/v3/site", domain);
let site_info_request_v3 = client
.get(&site_info_url_v3)
.timeout(REQUEST_TIMEOUT)
.send();
let mut result = vec![];
if let Some(federated) = &site_info.federated_instances {
for domain in federated.linked.iter() {
let crawl_job = CrawlJob::new(
domain.clone(),
self.current_distance + 1,
self.params.clone(),
);
result.push(crawl_job.crawl());
}
}
let (node_info, site_info_v2, site_info_v3) = try_join!(
node_info_request,
site_info_request_v2,
site_info_request_v3
)?;
let node_info: NodeInfo = node_info.json().await?;
let site_info_v2 = site_info_v2.json::<GetSiteResponse>().await.ok();
let site_info_v3 = site_info_v3.json::<GetSiteResponse>().await.ok();
let mut site_info: GetSiteResponse = if let Some(site_info_v2) = site_info_v2 {
site_info_v2
} else if let Some(site_info_v3) = site_info_v3 {
site_info_v3
} else {
return Err(anyhow!("Failed to read site_info"));
};
let mut result2: Vec<Result<CrawlResult, Error>> =
join_all(result).await.into_iter().flatten().collect();
debug!("Successfully finished crawl for {}", &self.domain);
crawl_result.site_info = Some(site_info);
result2.push(Ok(crawl_result));
result2
} else {
vec![Ok(crawl_result)]
if let Some(description) = &site_info.site_view.site.description {
if description.len() > 150 {
site_info.site_view.site.description = None;
}
}
async fn fetch_instance_details(&self) -> Result<(NodeInfo, Option<GetSiteResponse>), Error> {
let rel_node_info: Url = Url::parse("http://nodeinfo.diaspora.software/ns/schema/2.0")
.expect("parse nodeinfo relation url");
let node_info_well_known = CLIENT
.get(&format!("https://{}/.well-known/nodeinfo", &self.domain))
.send()
.await?
.json::<NodeInfoWellKnown>()
.await?;
let node_info_url = node_info_well_known
.links
.into_iter()
.find(|l| l.rel == rel_node_info)
.ok_or_else(|| anyhow!("failed to find nodeinfo link for {}", &self.domain))?
.href;
let node_info = CLIENT
.get(node_info_url)
.send()
.await?
.json::<NodeInfo>()
.await?;
let site_info = CLIENT
.get(&format!("https://{}/api/v3/site", &self.domain))
.send()
.await?
.json::<GetSiteResponse>()
.await
.ok();
Ok((node_info, site_info))
}
let require_application = site_info
.site_view
.site
.require_application
.unwrap_or(false);
let linked_instances: Vec<String> = site_info
.federated_instances
.map(|f| f.linked)
.unwrap_or_default()
.iter()
.map(|l| l.to_lowercase())
.collect();
Ok(InstanceDetails {
domain: domain.to_owned(),
name: site_info.site_view.site.name,
description: site_info.site_view.site.description,
version: node_info.software.version,
icon: site_info.site_view.site.icon,
online_users: site_info.online as i32,
total_users: node_info.usage.users.total,
users_active_halfyear: node_info.usage.users.active_halfyear,
users_active_month: node_info.usage.users.active_month,
open_registrations: node_info.open_registrations,
linked_instances_count: linked_instances.len() as i32,
require_application,
linked_instances,
})
}

View file

@ -0,0 +1,28 @@
use serde::Deserialize;
#[derive(Deserialize, Debug, Clone)]
pub struct GetSiteResponse {
pub site_view: SiteView,
pub online: usize,
pub federated_instances: Option<FederatedInstances>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct FederatedInstances {
pub linked: Vec<String>,
pub allowed: Option<Vec<String>>,
pub blocked: Option<Vec<String>>,
}
#[derive(Deserialize, Debug, Clone)]
pub struct SiteView {
pub site: Site,
}
#[derive(Deserialize, Debug, Clone)]
pub struct Site {
pub name: String,
pub icon: Option<String>,
pub description: Option<String>,
pub require_application: Option<bool>,
}

View file

@ -1,138 +1,11 @@
#[macro_use]
extern crate derive_new;
use crate::crawl::{CrawlJob, CrawlParams, CrawlResult};
use crate::node_info::{NodeInfo, NodeInfoUsage, NodeInfoUsers};
use anyhow::Error;
use futures::future::join_all;
use lemmy_api_common::site::GetSiteResponse;
use log::warn;
use once_cell::sync::Lazy;
use reqwest::{Client, ClientBuilder};
use semver::Version;
use serde::Serialize;
use std::collections::HashSet;
use std::sync::Arc;
use std::time::Duration;
use tokio::sync::Mutex;
pub mod crawl;
mod node_info;
pub mod federated_instances;
pub mod node_info;
const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
static CLIENT: Lazy<Client> = Lazy::new(|| {
ClientBuilder::new()
.timeout(REQUEST_TIMEOUT)
.user_agent("lemmy-stats-crawler")
.build()
.expect("build reqwest client")
});
#[derive(Serialize, Debug)]
pub struct CrawlResult2 {
pub domain: String,
pub site_info: GetSiteResponse,
pub federated_counts: Option<NodeInfoUsage>,
}
pub async fn start_crawl(
start_instances: Vec<String>,
exclude_domains: Vec<String>,
max_distance: i32,
) -> Result<Vec<CrawlResult2>, Error> {
let params = Arc::new(CrawlParams::new(
min_lemmy_version().await?,
exclude_domains,
max_distance,
Arc::new(Mutex::new(HashSet::new())),
));
let mut jobs = vec![];
for domain in start_instances.into_iter() {
let job = CrawlJob::new(domain, 0, params.clone());
jobs.push(job.crawl());
}
let crawl_results: Vec<CrawlResult> = join_all(jobs)
.await
.into_iter()
.flatten()
.inspect(|r| {
if let Err(e) = r {
warn!("{}", e)
}
})
.filter_map(Result::ok)
.collect();
let mut crawl_results = calculate_federated_site_aggregates(crawl_results)?;
// Sort by active monthly users descending
crawl_results.sort_unstable_by_key(|i| {
i.site_info
.site_view
.as_ref()
.map(|s| s.counts.users_active_month)
.unwrap_or(0)
});
crawl_results.reverse();
Ok(crawl_results)
}
/// calculate minimum allowed lemmy version based on current version. in case of current version
/// 0.16.3, the minimum from this function is 0.15.3. this is to avoid rejecting all instances on
/// the previous version when a major lemmy release is published.
async fn min_lemmy_version() -> Result<Version, Error> {
let lemmy_version_url = "https://raw.githubusercontent.com/LemmyNet/lemmy-ansible/main/VERSION";
let req = CLIENT
.get(lemmy_version_url)
.timeout(REQUEST_TIMEOUT)
.send()
.await?;
let mut version = Version::parse(req.text().await?.trim())?;
version.minor -= 1;
Ok(version)
}
fn calculate_federated_site_aggregates(
crawl_results: Vec<CrawlResult>,
) -> Result<Vec<CrawlResult2>, Error> {
let node_info: Vec<(String, NodeInfo)> = crawl_results
.iter()
.map(|c| (c.domain.clone(), c.node_info.clone()))
.collect();
let lemmy_instances: Vec<(String, GetSiteResponse)> = crawl_results
.into_iter()
.filter_map(|c| {
let domain = c.domain;
c.site_info.map(|c2| (domain, c2))
})
.collect();
let mut ret = vec![];
for instance in &lemmy_instances {
let federated_counts = if let Some(federated_instances) = &instance.1.federated_instances {
node_info
.iter()
.filter(|i| federated_instances.linked.contains(&i.0) || i.0 == instance.0)
.map(|i| i.1.usage.clone())
.reduce(|a, b| NodeInfoUsage {
users: NodeInfoUsers {
total: a.users.total + b.users.total,
active_halfyear: a.users.active_halfyear + b.users.active_halfyear,
active_month: a.users.active_month + b.users.active_month,
},
posts: a.posts + b.posts,
comments: a.comments + b.comments,
})
} else {
None
};
// TODO: workaround because GetSiteResponse doesnt implement clone
let site_info = serde_json::from_str(&serde_json::to_string(&instance.1)?)?;
ret.push(CrawlResult2 {
domain: instance.0.clone(),
site_info,
federated_counts,
});
}
Ok(ret)
}
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
pub const DEFAULT_START_INSTANCES: &str = "lemmy.ml";
pub const DEFAULT_MAX_CRAWL_DEPTH: &str = "20";
pub const EXCLUDE_INSTANCES: &str =
"ds9.lemmy.ml, enterprise.lemmy.ml, voyager.lemmy.ml, test.lemmy.ml";

View file

@ -1,91 +1,73 @@
use anyhow::Error;
use lemmy_stats_crawler::{start_crawl, CrawlResult2};
use clap::{App, Arg};
use lemmy_stats_crawler::crawl::{crawl, InstanceDetails};
use lemmy_stats_crawler::{DEFAULT_MAX_CRAWL_DEPTH, DEFAULT_START_INSTANCES, EXCLUDE_INSTANCES};
use serde::Serialize;
use structopt::StructOpt;
#[derive(StructOpt, Debug)]
#[structopt()]
struct Parameters {
#[structopt(short, long, use_delimiter = true, default_value = "lemmy.ml")]
start_instances: Vec<String>,
#[structopt(
short,
long,
use_delimiter = true,
default_value = "ds9.lemmy.ml,enterprise.lemmy.ml,voyager.lemmy.ml,test.lemmy.ml"
)]
exclude_instances: Vec<String>,
#[structopt(short, long, default_value = "20")]
max_crawl_distance: i32,
/// Silence all output
#[structopt(short, long)]
quiet: bool,
}
#[tokio::main]
pub async fn main() -> Result<(), Error> {
let params = Parameters::from_args();
stderrlog::new()
.module(module_path!())
.quiet(params.quiet)
.verbosity(1)
.init()?;
let matches = App::new("Lemmy Stats Crawler")
.arg(
Arg::with_name("start-instances")
.long("start-instances")
.takes_value(true),
)
.arg(Arg::with_name("exclude").long("exclude").takes_value(true))
.arg(
Arg::with_name("max-crawl-depth")
.long("max-crawl-depth")
.takes_value(true),
)
.get_matches();
let start_instances: Vec<String> = matches
.value_of("start-instances")
.unwrap_or(DEFAULT_START_INSTANCES)
.split(',')
.map(|s| s.trim().to_string())
.collect();
let exclude: Vec<String> = matches
.value_of("exclude")
.unwrap_or(EXCLUDE_INSTANCES)
.split(',')
.map(|s| s.trim().to_string())
.collect();
let max_crawl_depth: i32 = matches
.value_of("max-crawl-depth")
.unwrap_or(DEFAULT_MAX_CRAWL_DEPTH)
.parse()?;
eprintln!("Crawling...");
let instance_details = start_crawl(
params.start_instances,
params.exclude_instances,
params.max_crawl_distance,
)
.await?;
let total_stats = aggregate(instance_details);
let (instance_details, failed_instances) =
crawl(start_instances, exclude, max_crawl_depth).await?;
let total_stats = aggregate(instance_details, failed_instances);
println!("{}", serde_json::to_string_pretty(&total_stats)?);
Ok(())
}
// TODO: lemmy stores these numbers in SiteAggregates, would be good to simply use that as a member
// (to avoid many members). but SiteAggregates also has id, site_id fields
#[derive(Serialize)]
struct TotalStats {
crawled_instances: i32,
online_users: usize,
failed_instances: i32,
total_users: i64,
users_active_day: i64,
users_active_week: i64,
users_active_month: i64,
users_active_halfyear: i64,
instance_details: Vec<CrawlResult2>,
total_online_users: i32,
instance_details: Vec<InstanceDetails>,
}
fn aggregate(instance_details: Vec<CrawlResult2>) -> TotalStats {
let mut online_users = 0;
let mut total_users = 0;
let mut users_active_day = 0;
let mut users_active_week = 0;
let mut users_active_month = 0;
let mut users_active_halfyear = 0;
fn aggregate(instance_details: Vec<InstanceDetails>, failed_instances: i32) -> TotalStats {
let mut crawled_instances = 0;
let mut total_users = 0;
let mut total_online_users = 0;
for i in &instance_details {
crawled_instances += 1;
online_users += i.site_info.online;
if let Some(site_view) = &i.site_info.site_view {
total_users += site_view.counts.users;
users_active_day += site_view.counts.users_active_day;
users_active_week += site_view.counts.users_active_week;
users_active_month += site_view.counts.users_active_month;
users_active_halfyear += site_view.counts.users_active_half_year;
}
total_users += i.total_users;
total_online_users += i.online_users;
}
TotalStats {
crawled_instances,
online_users,
failed_instances,
total_users,
users_active_day,
users_active_week,
users_active_halfyear,
users_active_month,
total_online_users,
instance_details,
}
}

View file

@ -1,18 +1,6 @@
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde::Deserialize;
#[derive(Deserialize, Debug)]
pub struct NodeInfoWellKnown {
pub links: Vec<NodeInfoWellKnownLinks>,
}
#[derive(Deserialize, Debug)]
pub struct NodeInfoWellKnownLinks {
pub rel: Url,
pub href: Url,
}
#[derive(Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct NodeInfo {
pub version: String,
@ -22,24 +10,22 @@ pub struct NodeInfo {
pub open_registrations: bool,
}
#[derive(Deserialize, Debug, Clone)]
#[derive(Deserialize, Debug)]
pub struct NodeInfoSoftware {
pub name: String,
pub version: String,
}
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(rename_all = "camelCase", default)]
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct NodeInfoUsage {
pub users: NodeInfoUsers,
#[serde(rename(deserialize = "localPosts"))]
pub posts: i64,
#[serde(rename(deserialize = "localComments"))]
pub comments: i64,
pub local_posts: i64,
pub local_comments: i64,
}
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
#[serde(rename_all = "camelCase", default)]
#[derive(Deserialize, Debug)]
#[serde(rename_all = "camelCase")]
pub struct NodeInfoUsers {
pub total: i64,
pub active_halfyear: i64,