Recursive, parallel crawl #11

Merged
nutomic merged 2 commits from recursive-crawl into main 2022-05-13 11:37:53 +00:00
6 changed files with 185 additions and 116 deletions
Showing only changes of commit c254e50211 - Show all commits

25
Cargo.lock generated
View file

@ -220,6 +220,17 @@ dependencies = [
"event-listener", "event-listener",
] ]
[[package]]
name = "async-recursion"
version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2cda8f4bcc10624c4e85bc66b3f452cca98cfa5ca002dc83a16aad2367641bea"
dependencies = [
"proc-macro2 1.0.37",
"quote 1.0.18",
"syn 1.0.92",
]
[[package]] [[package]]
name = "async-trait" name = "async-trait"
version = "0.1.53" version = "0.1.53"
@ -560,6 +571,17 @@ dependencies = [
"syn 1.0.92", "syn 1.0.92",
] ]
[[package]]
name = "derive-new"
version = "0.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3418329ca0ad70234b9735dc4ceed10af4df60eff9c8e7b06cb5e520d92c3535"
dependencies = [
"proc-macro2 1.0.37",
"quote 1.0.18",
"syn 1.0.92",
]
[[package]] [[package]]
name = "derive_more" name = "derive_more"
version = "0.99.17" version = "0.99.17"
@ -1295,9 +1317,12 @@ name = "lemmy-stats-crawler"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"anyhow", "anyhow",
"async-recursion",
"clap", "clap",
"derive-new",
"futures", "futures",
"lemmy_api_common", "lemmy_api_common",
"log",
"once_cell", "once_cell",
"reqwest", "reqwest",
"semver", "semver",

View file

@ -15,3 +15,6 @@ clap = "3.1.15"
semver = "1.0.9" semver = "1.0.9"
once_cell = "1.10.0" once_cell = "1.10.0"
lemmy_api_common = "0.16.0" lemmy_api_common = "0.16.0"
async-recursion = "1.0.0"
log = "0.4.17"
derive-new = "0.5.9"

View file

@ -1,66 +1,16 @@
use crate::CLIENT;
use crate::REQUEST_TIMEOUT; use crate::REQUEST_TIMEOUT;
use anyhow::anyhow;
use anyhow::Error; use anyhow::Error;
use async_recursion::async_recursion;
use futures::future::join_all;
use lemmy_api_common::site::GetSiteResponse; use lemmy_api_common::site::GetSiteResponse;
use once_cell::sync::Lazy; use log::info;
use reqwest::Client;
use semver::Version; use semver::Version;
use serde::Serialize; use serde::Serialize;
use std::collections::VecDeque; use std::collections::HashSet;
use std::ops::Deref;
static CLIENT: Lazy<Client> = Lazy::new(Client::default); use std::sync::Arc;
use tokio::sync::Mutex;
pub async fn crawl(
start_instances: Vec<String>,
exclude: Vec<String>,
max_depth: i32,
) -> Result<(Vec<InstanceDetails>, i32), Error> {
let mut pending_instances: VecDeque<CrawlInstance> = start_instances
.iter()
.map(|s| CrawlInstance::new(s.to_string(), 0))
.collect();
let min_lemmy_version = min_lemmy_version().await?;
let mut crawled_instances = vec![];
let mut instance_details = vec![];
let mut failed_instances = 0;
while let Some(current_instance) = pending_instances.pop_back() {
crawled_instances.push(current_instance.domain.clone());
if current_instance.depth > max_depth || exclude.contains(&current_instance.domain) {
continue;
}
match fetch_instance_details(&current_instance.domain, &min_lemmy_version).await {
Ok(details) => {
if let Some(federated) = &details.site_info.federated_instances.as_ref() {
for i in &federated.linked {
let is_in_crawled = crawled_instances.contains(i);
let is_in_pending = pending_instances.iter().any(|p| &p.domain == i);
if !is_in_crawled && !is_in_pending {
let ci = CrawlInstance::new(i.clone(), current_instance.depth + 1);
pending_instances.push_back(ci);
}
}
}
instance_details.push(details);
}
Err(e) => {
failed_instances += 1;
eprintln!("Failed to crawl {}: {}", current_instance.domain, e)
}
}
}
// Sort by active monthly users descending
instance_details.sort_by_key(|i| {
i.site_info
.site_view
.as_ref()
.map(|s| s.counts.users_active_month)
.unwrap_or(0)
});
instance_details.reverse();
Ok((instance_details, failed_instances))
}
#[derive(Serialize, Debug)] #[derive(Serialize, Debug)]
pub struct InstanceDetails { pub struct InstanceDetails {
@ -68,54 +18,76 @@ pub struct InstanceDetails {
pub site_info: GetSiteResponse, pub site_info: GetSiteResponse,
} }
struct CrawlInstance { #[derive(new)]
pub struct CrawlParams {
min_lemmy_version: Version,
exclude_domains: Vec<String>,
max_depth: i32,
crawled_instances: Arc<Mutex<HashSet<String>>>,
}
#[derive(new)]
pub struct CrawlJob {
domain: String, domain: String,
depth: i32, current_depth: i32,
params: Arc<CrawlParams>,
} }
impl CrawlInstance { impl CrawlJob {
pub fn new(domain: String, depth: i32) -> CrawlInstance { #[async_recursion]
CrawlInstance { domain, depth } pub async fn crawl(self) -> Result<Vec<Result<InstanceDetails, Error>>, Error> {
// need to acquire and release mutix before recursing, otherwise it will deadlock
{
let mut crawled_instances = self.params.crawled_instances.deref().lock().await;
if crawled_instances.contains(&self.domain) {
return Ok(vec![]);
} else {
crawled_instances.insert(self.domain.clone());
}
}
if self.current_depth > self.params.max_depth
|| self.params.exclude_domains.contains(&self.domain)
{
return Ok(vec![]);
}
info!("Starting crawl for {}", &self.domain);
let site_info_url = format!("https://{}/api/v3/site", &self.domain);
let site_info = CLIENT
.get(&site_info_url)
.timeout(REQUEST_TIMEOUT)
.send()
.await?
.json::<GetSiteResponse>()
.await?;
let version = Version::parse(&site_info.version)?;
if version < self.params.min_lemmy_version {
return Ok(vec![]);
}
let mut result = vec![];
if let Some(federated) = &site_info.federated_instances {
for domain in federated.linked.iter() {
let crawl_job =
CrawlJob::new(domain.clone(), self.current_depth + 1, self.params.clone());
result.push(crawl_job.crawl());
}
}
let mut result2: Vec<Result<InstanceDetails, Error>> = join_all(result)
.await
.into_iter()
.filter_map(|r| r.ok())
.flat_map(|r| r.into_iter())
.collect();
info!("Successfully finished crawl for {}", &self.domain);
result2.push(Ok(InstanceDetails {
domain: self.domain,
site_info,
}));
Ok(result2)
} }
} }
async fn fetch_instance_details(
domain: &str,
min_lemmy_version: &Version,
) -> Result<InstanceDetails, Error> {
let client = Client::default();
let site_info_url = format!("https://{}/api/v3/site", domain);
let site_info = client
.get(&site_info_url)
.timeout(REQUEST_TIMEOUT)
.send()
.await?
.json::<GetSiteResponse>()
.await?;
let version = Version::parse(&site_info.version)?;
if &version < min_lemmy_version {
return Err(anyhow!("lemmy version is too old ({})", version));
}
Ok(InstanceDetails {
domain: domain.to_owned(),
site_info,
})
}
/// calculate minimum allowed lemmy version based on current version. in case of current version
/// 0.16.3, the minimum from this function is 0.15.3. this is to avoid rejecting all instances on
/// the previous version when a major lemmy release is published.
async fn min_lemmy_version() -> Result<Version, Error> {
let lemmy_version_url = "https://raw.githubusercontent.com/LemmyNet/lemmy-ansible/main/VERSION";
let req = CLIENT
.get(lemmy_version_url)
.timeout(REQUEST_TIMEOUT)
.send()
.await?;
let mut version = Version::parse(req.text().await?.trim())?;
version.minor -= 1;
Ok(version)
}

4
src/defaults.rs Normal file
View file

@ -0,0 +1,4 @@
pub const DEFAULT_START_INSTANCES: &str = "lemmy.ml";
pub const DEFAULT_MAX_CRAWL_DEPTH: &str = "20";
pub const EXCLUDE_INSTANCES: &str =
"ds9.lemmy.ml, enterprise.lemmy.ml, voyager.lemmy.ml, test.lemmy.ml";

View file

@ -1,9 +1,74 @@
#[macro_use]
extern crate derive_new;
use crate::crawl::{CrawlJob, CrawlParams, InstanceDetails};
use anyhow::Error;
use futures::future::join_all;
use once_cell::sync::Lazy;
use reqwest::Client;
use semver::Version;
use std::collections::HashSet;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use tokio::sync::Mutex;
pub mod crawl; pub mod crawl;
pub mod defaults;
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(10); pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
pub const DEFAULT_START_INSTANCES: &str = "lemmy.ml";
pub const DEFAULT_MAX_CRAWL_DEPTH: &str = "20"; static CLIENT: Lazy<Client> = Lazy::new(Client::default);
pub const EXCLUDE_INSTANCES: &str =
"ds9.lemmy.ml, enterprise.lemmy.ml, voyager.lemmy.ml, test.lemmy.ml"; pub async fn start_crawl(
start_instances: Vec<String>,
exclude_domains: Vec<String>,
max_depth: i32,
) -> Result<Vec<InstanceDetails>, Error> {
let params = Arc::new(CrawlParams::new(
min_lemmy_version().await?,
exclude_domains,
max_depth,
Arc::new(Mutex::new(HashSet::new())),
));
let mut jobs = vec![];
for domain in start_instances.into_iter() {
let job = CrawlJob::new(domain, 0, params.clone());
jobs.push(job.crawl());
}
// TODO: optionally log the errors
let mut instance_details: Vec<InstanceDetails> = join_all(jobs)
.await
.into_iter()
.filter_map(|r| r.ok())
.flat_map(|r| r.into_iter())
.filter_map(|r| r.ok())
.collect();
// Sort by active monthly users descending
instance_details.sort_unstable_by_key(|i| {
i.site_info
.site_view
.as_ref()
.map(|s| s.counts.users_active_month)
.unwrap_or(0)
});
instance_details.reverse();
Ok(instance_details)
}
/// calculate minimum allowed lemmy version based on current version. in case of current version
/// 0.16.3, the minimum from this function is 0.15.3. this is to avoid rejecting all instances on
/// the previous version when a major lemmy release is published.
async fn min_lemmy_version() -> Result<Version, Error> {
let lemmy_version_url = "https://raw.githubusercontent.com/LemmyNet/lemmy-ansible/main/VERSION";
let req = CLIENT
.get(lemmy_version_url)
.timeout(REQUEST_TIMEOUT)
.send()
.await?;
let mut version = Version::parse(req.text().await?.trim())?;
version.minor -= 1;
Ok(version)
}

View file

@ -1,7 +1,10 @@
use anyhow::Error; use anyhow::Error;
use clap::{Arg, Command}; use clap::{Arg, Command};
use lemmy_stats_crawler::crawl::{crawl, InstanceDetails}; use lemmy_stats_crawler::crawl::InstanceDetails;
use lemmy_stats_crawler::{DEFAULT_MAX_CRAWL_DEPTH, DEFAULT_START_INSTANCES, EXCLUDE_INSTANCES}; use lemmy_stats_crawler::defaults::{
DEFAULT_MAX_CRAWL_DEPTH, DEFAULT_START_INSTANCES, EXCLUDE_INSTANCES,
};
use lemmy_stats_crawler::start_crawl;
use serde::Serialize; use serde::Serialize;
#[tokio::main] #[tokio::main]
@ -37,9 +40,8 @@ pub async fn main() -> Result<(), Error> {
.parse()?; .parse()?;
eprintln!("Crawling..."); eprintln!("Crawling...");
let (instance_details, failed_instances) = let instance_details = start_crawl(start_instances, exclude, max_crawl_depth).await?;
crawl(start_instances, exclude, max_crawl_depth).await?; let total_stats = aggregate(instance_details);
let total_stats = aggregate(instance_details, failed_instances);
println!("{}", serde_json::to_string_pretty(&total_stats)?); println!("{}", serde_json::to_string_pretty(&total_stats)?);
Ok(()) Ok(())
@ -48,7 +50,6 @@ pub async fn main() -> Result<(), Error> {
#[derive(Serialize)] #[derive(Serialize)]
struct TotalStats { struct TotalStats {
crawled_instances: i32, crawled_instances: i32,
failed_instances: i32,
online_users: usize, online_users: usize,
total_users: i64, total_users: i64,
users_active_day: i64, users_active_day: i64,
@ -58,7 +59,7 @@ struct TotalStats {
instance_details: Vec<InstanceDetails>, instance_details: Vec<InstanceDetails>,
} }
fn aggregate(instance_details: Vec<InstanceDetails>, failed_instances: i32) -> TotalStats { fn aggregate(instance_details: Vec<InstanceDetails>) -> TotalStats {
let mut online_users = 0; let mut online_users = 0;
let mut total_users = 0; let mut total_users = 0;
let mut users_active_day = 0; let mut users_active_day = 0;
@ -79,7 +80,6 @@ fn aggregate(instance_details: Vec<InstanceDetails>, failed_instances: i32) -> T
} }
TotalStats { TotalStats {
crawled_instances, crawled_instances,
failed_instances,
online_users, online_users,
total_users, total_users,
users_active_day, users_active_day,