Compare commits
No commits in common. "main" and "lemmy-structs" have entirely different histories.
main
...
lemmy-stru
6 changed files with 817 additions and 1135 deletions
1439
Cargo.lock
generated
1439
Cargo.lock
generated
File diff suppressed because it is too large
Load diff
25
Cargo.toml
25
Cargo.toml
|
@ -5,18 +5,13 @@ authors = ["Felix Ableitner"]
|
|||
edition = "2018"
|
||||
|
||||
[dependencies]
|
||||
reqwest = { version = "0.11.13", default-features = false, features = ["json", "rustls-tls"] }
|
||||
serde = { version = "1.0.149", features = ["derive"] }
|
||||
anyhow = "1.0.66"
|
||||
tokio = { version = "1.22.0", features = ["macros", "rt-multi-thread"] }
|
||||
futures = "0.3.25"
|
||||
serde_json = "1.0.89"
|
||||
semver = "1.0.14"
|
||||
once_cell = "1.16.0"
|
||||
lemmy_api_common = "=0.16.0"
|
||||
lemmy_db_schema = "=0.16.0"
|
||||
async-recursion = "1.0.0"
|
||||
log = "0.4.17"
|
||||
derive-new = "0.5.9"
|
||||
stderrlog = "0.5.4"
|
||||
structopt = "0.3.26"
|
||||
reqwest = { version = "0.11.10", default-features = false, features = ["json", "rustls-tls"] }
|
||||
serde = { version = "1.0.137", features = ["derive"] }
|
||||
anyhow = "1.0.57"
|
||||
tokio = { version = "1.18.1", features = ["macros", "rt-multi-thread"] }
|
||||
futures = "0.3.21"
|
||||
serde_json = "1.0.81"
|
||||
clap = "3.1.15"
|
||||
semver = "1.0.9"
|
||||
once_cell = "1.10.0"
|
||||
lemmy_api_common = "0.16.0"
|
||||
|
|
224
src/crawl.rs
224
src/crawl.rs
|
@ -1,135 +1,121 @@
|
|||
use crate::node_info::{NodeInfo, NodeInfoWellKnown};
|
||||
use crate::CLIENT;
|
||||
use anyhow::{anyhow, Error};
|
||||
use async_recursion::async_recursion;
|
||||
use futures::future::join_all;
|
||||
use crate::REQUEST_TIMEOUT;
|
||||
use anyhow::anyhow;
|
||||
use anyhow::Error;
|
||||
use lemmy_api_common::site::GetSiteResponse;
|
||||
use log::debug;
|
||||
use reqwest::Url;
|
||||
use once_cell::sync::Lazy;
|
||||
use reqwest::Client;
|
||||
use semver::Version;
|
||||
use std::collections::HashSet;
|
||||
use std::ops::Deref;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Mutex;
|
||||
use serde::Serialize;
|
||||
use std::collections::VecDeque;
|
||||
|
||||
#[derive(new)]
|
||||
pub struct CrawlJob {
|
||||
domain: String,
|
||||
current_distance: i32,
|
||||
params: Arc<CrawlParams>,
|
||||
}
|
||||
static CLIENT: Lazy<Client> = Lazy::new(Client::default);
|
||||
|
||||
#[derive(new)]
|
||||
pub struct CrawlParams {
|
||||
min_lemmy_version: Version,
|
||||
exclude_domains: Vec<String>,
|
||||
pub async fn crawl(
|
||||
start_instances: Vec<String>,
|
||||
exclude: Vec<String>,
|
||||
max_depth: i32,
|
||||
crawled_instances: Arc<Mutex<HashSet<String>>>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct CrawlResult {
|
||||
pub domain: String,
|
||||
pub node_info: NodeInfo,
|
||||
pub site_info: Option<GetSiteResponse>,
|
||||
}
|
||||
|
||||
impl CrawlJob {
|
||||
#[async_recursion]
|
||||
pub async fn crawl(self) -> Vec<Result<CrawlResult, Error>> {
|
||||
// need to acquire and release mutex before recursing, otherwise it will deadlock
|
||||
{
|
||||
let mut crawled_instances = self.params.crawled_instances.deref().lock().await;
|
||||
if crawled_instances.contains(&self.domain) {
|
||||
return vec![];
|
||||
} else {
|
||||
crawled_instances.insert(self.domain.clone());
|
||||
}
|
||||
) -> Result<(Vec<InstanceDetails>, i32), Error> {
|
||||
let mut pending_instances: VecDeque<CrawlInstance> = start_instances
|
||||
.iter()
|
||||
.map(|s| CrawlInstance::new(s.to_string(), 0))
|
||||
.collect();
|
||||
let min_lemmy_version = min_lemmy_version().await?;
|
||||
let mut crawled_instances = vec![];
|
||||
let mut instance_details = vec![];
|
||||
let mut failed_instances = 0;
|
||||
while let Some(current_instance) = pending_instances.pop_back() {
|
||||
crawled_instances.push(current_instance.domain.clone());
|
||||
if current_instance.depth > max_depth || exclude.contains(¤t_instance.domain) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if self.current_distance > self.params.max_depth
|
||||
|| self.params.exclude_domains.contains(&self.domain)
|
||||
{
|
||||
return vec![];
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Starting crawl for {}, distance {}",
|
||||
&self.domain, &self.current_distance
|
||||
);
|
||||
let (node_info, site_info) = match self.fetch_instance_details().await {
|
||||
Ok(o) => o,
|
||||
Err(e) => return vec![Err(e)],
|
||||
};
|
||||
let mut crawl_result = CrawlResult {
|
||||
domain: self.domain.clone(),
|
||||
node_info,
|
||||
site_info: None,
|
||||
};
|
||||
|
||||
if let Some(site_info) = site_info {
|
||||
match Version::parse(&site_info.version) {
|
||||
Ok(version) => {
|
||||
if version < self.params.min_lemmy_version {
|
||||
return vec![Ok(crawl_result)];
|
||||
match fetch_instance_details(¤t_instance.domain, &min_lemmy_version).await {
|
||||
Ok(details) => {
|
||||
if let Some(federated) = &details.site_info.federated_instances.as_ref() {
|
||||
for i in &federated.linked {
|
||||
let is_in_crawled = crawled_instances.contains(i);
|
||||
let is_in_pending = pending_instances.iter().any(|p| &p.domain == i);
|
||||
if !is_in_crawled && !is_in_pending {
|
||||
let ci = CrawlInstance::new(i.clone(), current_instance.depth + 1);
|
||||
pending_instances.push_back(ci);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => return vec![Err(e.into())],
|
||||
instance_details.push(details);
|
||||
}
|
||||
|
||||
let mut result = vec![];
|
||||
if let Some(federated) = &site_info.federated_instances {
|
||||
for domain in federated.linked.iter() {
|
||||
let crawl_job = CrawlJob::new(
|
||||
domain.clone(),
|
||||
self.current_distance + 1,
|
||||
self.params.clone(),
|
||||
);
|
||||
result.push(crawl_job.crawl());
|
||||
}
|
||||
Err(e) => {
|
||||
failed_instances += 1;
|
||||
eprintln!("Failed to crawl {}: {}", current_instance.domain, e)
|
||||
}
|
||||
|
||||
let mut result2: Vec<Result<CrawlResult, Error>> =
|
||||
join_all(result).await.into_iter().flatten().collect();
|
||||
debug!("Successfully finished crawl for {}", &self.domain);
|
||||
crawl_result.site_info = Some(site_info);
|
||||
result2.push(Ok(crawl_result));
|
||||
|
||||
result2
|
||||
} else {
|
||||
vec![Ok(crawl_result)]
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_instance_details(&self) -> Result<(NodeInfo, Option<GetSiteResponse>), Error> {
|
||||
let rel_node_info: Url = Url::parse("http://nodeinfo.diaspora.software/ns/schema/2.0")
|
||||
.expect("parse nodeinfo relation url");
|
||||
let node_info_well_known = CLIENT
|
||||
.get(&format!("https://{}/.well-known/nodeinfo", &self.domain))
|
||||
.send()
|
||||
.await?
|
||||
.json::<NodeInfoWellKnown>()
|
||||
.await?;
|
||||
let node_info_url = node_info_well_known
|
||||
.links
|
||||
.into_iter()
|
||||
.find(|l| l.rel == rel_node_info)
|
||||
.ok_or_else(|| anyhow!("failed to find nodeinfo link for {}", &self.domain))?
|
||||
.href;
|
||||
let node_info = CLIENT
|
||||
.get(node_info_url)
|
||||
.send()
|
||||
.await?
|
||||
.json::<NodeInfo>()
|
||||
.await?;
|
||||
// Sort by active monthly users descending
|
||||
instance_details.sort_by_key(|i| {
|
||||
i.site_info
|
||||
.site_view
|
||||
.as_ref()
|
||||
.map(|s| s.counts.users_active_month)
|
||||
.unwrap_or(0)
|
||||
});
|
||||
instance_details.reverse();
|
||||
|
||||
let site_info = CLIENT
|
||||
.get(&format!("https://{}/api/v3/site", &self.domain))
|
||||
.send()
|
||||
.await?
|
||||
.json::<GetSiteResponse>()
|
||||
.await
|
||||
.ok();
|
||||
Ok((node_info, site_info))
|
||||
Ok((instance_details, failed_instances))
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
pub struct InstanceDetails {
|
||||
pub domain: String,
|
||||
pub site_info: GetSiteResponse,
|
||||
}
|
||||
|
||||
struct CrawlInstance {
|
||||
domain: String,
|
||||
depth: i32,
|
||||
}
|
||||
|
||||
impl CrawlInstance {
|
||||
pub fn new(domain: String, depth: i32) -> CrawlInstance {
|
||||
CrawlInstance { domain, depth }
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_instance_details(
|
||||
domain: &str,
|
||||
min_lemmy_version: &Version,
|
||||
) -> Result<InstanceDetails, Error> {
|
||||
let client = Client::default();
|
||||
|
||||
let site_info_url = format!("https://{}/api/v3/site", domain);
|
||||
let site_info = client
|
||||
.get(&site_info_url)
|
||||
.timeout(REQUEST_TIMEOUT)
|
||||
.send()
|
||||
.await?
|
||||
.json::<GetSiteResponse>()
|
||||
.await?;
|
||||
|
||||
let version = Version::parse(&site_info.version)?;
|
||||
if &version < min_lemmy_version {
|
||||
return Err(anyhow!("lemmy version is too old ({})", version));
|
||||
}
|
||||
|
||||
Ok(InstanceDetails {
|
||||
domain: domain.to_owned(),
|
||||
site_info,
|
||||
})
|
||||
}
|
||||
|
||||
/// calculate minimum allowed lemmy version based on current version. in case of current version
|
||||
/// 0.16.3, the minimum from this function is 0.15.3. this is to avoid rejecting all instances on
|
||||
/// the previous version when a major lemmy release is published.
|
||||
async fn min_lemmy_version() -> Result<Version, Error> {
|
||||
let lemmy_version_url = "https://raw.githubusercontent.com/LemmyNet/lemmy-ansible/main/VERSION";
|
||||
let req = CLIENT
|
||||
.get(lemmy_version_url)
|
||||
.timeout(REQUEST_TIMEOUT)
|
||||
.send()
|
||||
.await?;
|
||||
let mut version = Version::parse(req.text().await?.trim())?;
|
||||
version.minor -= 1;
|
||||
Ok(version)
|
||||
}
|
||||
|
|
139
src/lib.rs
139
src/lib.rs
|
@ -1,138 +1,9 @@
|
|||
#[macro_use]
|
||||
extern crate derive_new;
|
||||
|
||||
use crate::crawl::{CrawlJob, CrawlParams, CrawlResult};
|
||||
use crate::node_info::{NodeInfo, NodeInfoUsage, NodeInfoUsers};
|
||||
use anyhow::Error;
|
||||
use futures::future::join_all;
|
||||
use lemmy_api_common::site::GetSiteResponse;
|
||||
use log::warn;
|
||||
use once_cell::sync::Lazy;
|
||||
use reqwest::{Client, ClientBuilder};
|
||||
use semver::Version;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
pub mod crawl;
|
||||
mod node_info;
|
||||
|
||||
const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
static CLIENT: Lazy<Client> = Lazy::new(|| {
|
||||
ClientBuilder::new()
|
||||
.timeout(REQUEST_TIMEOUT)
|
||||
.user_agent("lemmy-stats-crawler")
|
||||
.build()
|
||||
.expect("build reqwest client")
|
||||
});
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
pub struct CrawlResult2 {
|
||||
pub domain: String,
|
||||
pub site_info: GetSiteResponse,
|
||||
pub federated_counts: Option<NodeInfoUsage>,
|
||||
}
|
||||
|
||||
pub async fn start_crawl(
|
||||
start_instances: Vec<String>,
|
||||
exclude_domains: Vec<String>,
|
||||
max_distance: i32,
|
||||
) -> Result<Vec<CrawlResult2>, Error> {
|
||||
let params = Arc::new(CrawlParams::new(
|
||||
min_lemmy_version().await?,
|
||||
exclude_domains,
|
||||
max_distance,
|
||||
Arc::new(Mutex::new(HashSet::new())),
|
||||
));
|
||||
let mut jobs = vec![];
|
||||
for domain in start_instances.into_iter() {
|
||||
let job = CrawlJob::new(domain, 0, params.clone());
|
||||
jobs.push(job.crawl());
|
||||
}
|
||||
|
||||
let crawl_results: Vec<CrawlResult> = join_all(jobs)
|
||||
.await
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.inspect(|r| {
|
||||
if let Err(e) = r {
|
||||
warn!("{}", e)
|
||||
}
|
||||
})
|
||||
.filter_map(Result::ok)
|
||||
.collect();
|
||||
let mut crawl_results = calculate_federated_site_aggregates(crawl_results)?;
|
||||
|
||||
// Sort by active monthly users descending
|
||||
crawl_results.sort_unstable_by_key(|i| {
|
||||
i.site_info
|
||||
.site_view
|
||||
.as_ref()
|
||||
.map(|s| s.counts.users_active_month)
|
||||
.unwrap_or(0)
|
||||
});
|
||||
crawl_results.reverse();
|
||||
Ok(crawl_results)
|
||||
}
|
||||
|
||||
/// calculate minimum allowed lemmy version based on current version. in case of current version
|
||||
/// 0.16.3, the minimum from this function is 0.15.3. this is to avoid rejecting all instances on
|
||||
/// the previous version when a major lemmy release is published.
|
||||
async fn min_lemmy_version() -> Result<Version, Error> {
|
||||
let lemmy_version_url = "https://raw.githubusercontent.com/LemmyNet/lemmy-ansible/main/VERSION";
|
||||
let req = CLIENT
|
||||
.get(lemmy_version_url)
|
||||
.timeout(REQUEST_TIMEOUT)
|
||||
.send()
|
||||
.await?;
|
||||
let mut version = Version::parse(req.text().await?.trim())?;
|
||||
version.minor -= 1;
|
||||
Ok(version)
|
||||
}
|
||||
|
||||
fn calculate_federated_site_aggregates(
|
||||
crawl_results: Vec<CrawlResult>,
|
||||
) -> Result<Vec<CrawlResult2>, Error> {
|
||||
let node_info: Vec<(String, NodeInfo)> = crawl_results
|
||||
.iter()
|
||||
.map(|c| (c.domain.clone(), c.node_info.clone()))
|
||||
.collect();
|
||||
let lemmy_instances: Vec<(String, GetSiteResponse)> = crawl_results
|
||||
.into_iter()
|
||||
.filter_map(|c| {
|
||||
let domain = c.domain;
|
||||
c.site_info.map(|c2| (domain, c2))
|
||||
})
|
||||
.collect();
|
||||
let mut ret = vec![];
|
||||
for instance in &lemmy_instances {
|
||||
let federated_counts = if let Some(federated_instances) = &instance.1.federated_instances {
|
||||
node_info
|
||||
.iter()
|
||||
.filter(|i| federated_instances.linked.contains(&i.0) || i.0 == instance.0)
|
||||
.map(|i| i.1.usage.clone())
|
||||
.reduce(|a, b| NodeInfoUsage {
|
||||
users: NodeInfoUsers {
|
||||
total: a.users.total + b.users.total,
|
||||
active_halfyear: a.users.active_halfyear + b.users.active_halfyear,
|
||||
active_month: a.users.active_month + b.users.active_month,
|
||||
},
|
||||
posts: a.posts + b.posts,
|
||||
comments: a.comments + b.comments,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
// TODO: workaround because GetSiteResponse doesnt implement clone
|
||||
let site_info = serde_json::from_str(&serde_json::to_string(&instance.1)?)?;
|
||||
ret.push(CrawlResult2 {
|
||||
domain: instance.0.clone(),
|
||||
site_info,
|
||||
federated_counts,
|
||||
});
|
||||
}
|
||||
Ok(ret)
|
||||
}
|
||||
pub const REQUEST_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
pub const DEFAULT_START_INSTANCES: &str = "lemmy.ml";
|
||||
pub const DEFAULT_MAX_CRAWL_DEPTH: &str = "20";
|
||||
pub const EXCLUDE_INSTANCES: &str =
|
||||
"ds9.lemmy.ml, enterprise.lemmy.ml, voyager.lemmy.ml, test.lemmy.ml";
|
||||
|
|
78
src/main.rs
78
src/main.rs
|
@ -1,65 +1,64 @@
|
|||
use anyhow::Error;
|
||||
use lemmy_stats_crawler::{start_crawl, CrawlResult2};
|
||||
use clap::{Arg, Command};
|
||||
use lemmy_stats_crawler::crawl::{crawl, InstanceDetails};
|
||||
use lemmy_stats_crawler::{DEFAULT_MAX_CRAWL_DEPTH, DEFAULT_START_INSTANCES, EXCLUDE_INSTANCES};
|
||||
use serde::Serialize;
|
||||
use structopt::StructOpt;
|
||||
|
||||
#[derive(StructOpt, Debug)]
|
||||
#[structopt()]
|
||||
struct Parameters {
|
||||
#[structopt(short, long, use_delimiter = true, default_value = "lemmy.ml")]
|
||||
start_instances: Vec<String>,
|
||||
#[structopt(
|
||||
short,
|
||||
long,
|
||||
use_delimiter = true,
|
||||
default_value = "ds9.lemmy.ml,enterprise.lemmy.ml,voyager.lemmy.ml,test.lemmy.ml"
|
||||
)]
|
||||
exclude_instances: Vec<String>,
|
||||
#[structopt(short, long, default_value = "20")]
|
||||
max_crawl_distance: i32,
|
||||
/// Silence all output
|
||||
#[structopt(short, long)]
|
||||
quiet: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
pub async fn main() -> Result<(), Error> {
|
||||
let params = Parameters::from_args();
|
||||
|
||||
stderrlog::new()
|
||||
.module(module_path!())
|
||||
.quiet(params.quiet)
|
||||
.verbosity(1)
|
||||
.init()?;
|
||||
let matches = Command::new("Lemmy Stats Crawler")
|
||||
.arg(
|
||||
Arg::new("start-instances")
|
||||
.long("start-instances")
|
||||
.takes_value(true),
|
||||
)
|
||||
.arg(Arg::new("exclude").long("exclude").takes_value(true))
|
||||
.arg(
|
||||
Arg::new("max-crawl-depth")
|
||||
.long("max-crawl-depth")
|
||||
.takes_value(true),
|
||||
)
|
||||
.get_matches();
|
||||
let start_instances: Vec<String> = matches
|
||||
.value_of("start-instances")
|
||||
.unwrap_or(DEFAULT_START_INSTANCES)
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.collect();
|
||||
let exclude: Vec<String> = matches
|
||||
.value_of("exclude")
|
||||
.unwrap_or(EXCLUDE_INSTANCES)
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.collect();
|
||||
let max_crawl_depth: i32 = matches
|
||||
.value_of("max-crawl-depth")
|
||||
.unwrap_or(DEFAULT_MAX_CRAWL_DEPTH)
|
||||
.parse()?;
|
||||
|
||||
eprintln!("Crawling...");
|
||||
let instance_details = start_crawl(
|
||||
params.start_instances,
|
||||
params.exclude_instances,
|
||||
params.max_crawl_distance,
|
||||
)
|
||||
.await?;
|
||||
let total_stats = aggregate(instance_details);
|
||||
let (instance_details, failed_instances) =
|
||||
crawl(start_instances, exclude, max_crawl_depth).await?;
|
||||
let total_stats = aggregate(instance_details, failed_instances);
|
||||
|
||||
println!("{}", serde_json::to_string_pretty(&total_stats)?);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO: lemmy stores these numbers in SiteAggregates, would be good to simply use that as a member
|
||||
// (to avoid many members). but SiteAggregates also has id, site_id fields
|
||||
#[derive(Serialize)]
|
||||
struct TotalStats {
|
||||
crawled_instances: i32,
|
||||
failed_instances: i32,
|
||||
online_users: usize,
|
||||
total_users: i64,
|
||||
users_active_day: i64,
|
||||
users_active_week: i64,
|
||||
users_active_month: i64,
|
||||
users_active_halfyear: i64,
|
||||
instance_details: Vec<CrawlResult2>,
|
||||
instance_details: Vec<InstanceDetails>,
|
||||
}
|
||||
|
||||
fn aggregate(instance_details: Vec<CrawlResult2>) -> TotalStats {
|
||||
fn aggregate(instance_details: Vec<InstanceDetails>, failed_instances: i32) -> TotalStats {
|
||||
let mut online_users = 0;
|
||||
let mut total_users = 0;
|
||||
let mut users_active_day = 0;
|
||||
|
@ -80,6 +79,7 @@ fn aggregate(instance_details: Vec<CrawlResult2>) -> TotalStats {
|
|||
}
|
||||
TotalStats {
|
||||
crawled_instances,
|
||||
failed_instances,
|
||||
online_users,
|
||||
total_users,
|
||||
users_active_day,
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
use reqwest::Url;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct NodeInfoWellKnown {
|
||||
pub links: Vec<NodeInfoWellKnownLinks>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct NodeInfoWellKnownLinks {
|
||||
pub rel: Url,
|
||||
pub href: Url,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct NodeInfo {
|
||||
pub version: String,
|
||||
pub software: NodeInfoSoftware,
|
||||
pub protocols: Vec<String>,
|
||||
pub usage: NodeInfoUsage,
|
||||
pub open_registrations: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug, Clone)]
|
||||
pub struct NodeInfoSoftware {
|
||||
pub name: String,
|
||||
pub version: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
|
||||
#[serde(rename_all = "camelCase", default)]
|
||||
pub struct NodeInfoUsage {
|
||||
pub users: NodeInfoUsers,
|
||||
#[serde(rename(deserialize = "localPosts"))]
|
||||
pub posts: i64,
|
||||
#[serde(rename(deserialize = "localComments"))]
|
||||
pub comments: i64,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, Clone, Default)]
|
||||
#[serde(rename_all = "camelCase", default)]
|
||||
pub struct NodeInfoUsers {
|
||||
pub total: i64,
|
||||
pub active_halfyear: i64,
|
||||
pub active_month: i64,
|
||||
}
|
Loading…
Reference in a new issue