This commit is contained in:
dull b 2023-12-20 22:50:55 +00:00
parent 191e2f0c57
commit 82e37f7b57
6 changed files with 87 additions and 107 deletions

1
Cargo.lock generated
View file

@ -2662,6 +2662,7 @@ name = "lemmy_db_schema"
version = "0.19.0" version = "0.19.0"
dependencies = [ dependencies = [
"activitypub_federation", "activitypub_federation",
"anyhow",
"async-trait", "async-trait",
"bcrypt", "bcrypt",
"chrono", "chrono",

View file

@ -1,6 +1,11 @@
use clap::{Parser, Subcommand}; use clap::Parser;
use diesel::{dsl::{self, sql}, sql_query, sql_types, ExpressionMethods, IntoSql}; use diesel::{
use diesel_async::RunQueryDsl; dsl::{self, sql},
sql_types,
ExpressionMethods,
IntoSql,
};
use diesel_async::{RunQueryDsl, SimpleAsyncConnection};
use lemmy_db_schema::{ use lemmy_db_schema::{
schema::post, schema::post,
source::{ source::{
@ -12,17 +17,14 @@ use lemmy_db_schema::{
utils::{ utils::{
build_db_pool, build_db_pool,
get_conn, get_conn,
series::{self, ValuesFromSeries}, DbConn, DbPool, now, now,
series::{self, ValuesFromSeries},
}, },
SortType, SortType,
}; };
use lemmy_db_views::{ use lemmy_db_views::{post_view::PostQuery, structs::PaginationCursor};
post_view::{PaginationCursorData, PostQuery},
structs::PaginationCursor,
};
use lemmy_utils::error::LemmyResult; use lemmy_utils::error::LemmyResult;
use std::num::NonZeroU32; use std::num::NonZeroU32;
use diesel::pg::expression::dsl::IntervalDsl;
#[derive(Parser, Debug)] #[derive(Parser, Debug)]
struct CmdArgs { struct CmdArgs {
@ -40,52 +42,53 @@ struct CmdArgs {
#[tokio::main] #[tokio::main]
async fn main() -> LemmyResult<()> { async fn main() -> LemmyResult<()> {
if let Err(err) = try_main().await {
println!("Error: {err:?}");
}
if let Ok(path) = std::env::var("PGDATA") {
println!("🪵 query plans and error details written in {path}/log");
}
Ok(())
}
async fn try_main() -> LemmyResult<()> {
let args = CmdArgs::parse(); let args = CmdArgs::parse();
let pool = &build_db_pool().await?; let pool = &build_db_pool().await?;
let pool = &mut pool.into(); let pool = &mut pool.into();
let conn = &mut get_conn(pool).await?; let conn = &mut get_conn(pool).await?;
if args.explain_insertions { if args.explain_insertions {
sql_query("SET auto_explain.log_min_duration = 0") // log_nested_statements is enabled to log trigger execution
.execute(conn) conn
.batch_execute(
"SET auto_explain.log_min_duration = 0; SET auto_explain.log_nested_statements = on;",
)
.await?; .await?;
} }
let pool = &mut conn.into();
let instance = Instance::read_or_create(pool, "reddit.com".to_owned()).await?; let instance = Instance::read_or_create(&mut conn.into(), "reddit.com".to_owned()).await?;
println!("🫃 creating {} people", args.people); println!("🫃 creating {} people", args.people);
let mut person_ids = vec![]; let mut person_ids = vec![];
for i in 0..args.people.get() { for i in 0..args.people.get() {
person_ids.push( let form = PersonInsertForm::builder()
Person::create( .name(format!("p{i}"))
pool, .public_key("pubkey".to_owned())
&PersonInsertForm::builder() .instance_id(instance.id)
.name(format!("p{i}")) .build();
.public_key("pubkey".to_owned()) person_ids.push(Person::create(&mut conn.into(), &form).await?.id);
.instance_id(instance.id)
.build(),
)
.await?
.id,
);
} }
println!("🏠 creating {} communities", args.communities); println!("🏠 creating {} communities", args.communities);
let mut community_ids = vec![]; let mut community_ids = vec![];
for i in 0..args.communities.get() { for i in 0..args.communities.get() {
community_ids.push( let form = CommunityInsertForm::builder()
Community::create( .name(format!("c{i}"))
pool, .title(i.to_string())
&CommunityInsertForm::builder() .instance_id(instance.id)
.name(format!("c{i}")) .build();
.title(i.to_string()) community_ids.push(Community::create(&mut conn.into(), &form).await?.id);
.instance_id(instance.id)
.build(),
)
.await?
.id,
);
} }
let post_batches = args.people.get() * args.communities.get(); let post_batches = args.people.get() * args.communities.get();
@ -108,7 +111,10 @@ async fn main() -> LemmyResult<()> {
person_id.into_sql::<sql_types::Integer>(), person_id.into_sql::<sql_types::Integer>(),
community_id.into_sql::<sql_types::Integer>(), community_id.into_sql::<sql_types::Integer>(),
series::current_value.eq(1), series::current_value.eq(1),
now() - sql::<sql_types::Interval>("make_interval(secs => ").bind::<sql_types::BigInt, _>(series::current_value).sql(")"), now()
- sql::<sql_types::Interval>("make_interval(secs => ")
.bind::<sql_types::BigInt, _>(series::current_value)
.sql(")"),
), ),
}) })
.into_columns(( .into_columns((
@ -118,64 +124,50 @@ async fn main() -> LemmyResult<()> {
post::featured_community, post::featured_community,
post::published, post::published,
)) ))
.execute(&mut get_conn(pool).await?) .execute(conn)
.await?; .await?;
num_inserted_posts += n; num_inserted_posts += n;
} }
} }
// Make sure the println above shows the correct amount
// Lie detector for the println above
assert_eq!(num_inserted_posts, num_posts as usize); assert_eq!(num_inserted_posts, num_posts as usize);
// Enable auto_explain // Enable auto_explain
let conn = &mut get_conn(pool).await?; conn
sql_query("SET auto_explain.log_min_duration = 0") .batch_execute(
.execute(conn) "SET auto_explain.log_min_duration = 0; SET auto_explain.log_nested_statements = off;",
)
.await?;
let mut page_after = None;
for page_num in 1..=args.read_post_pages {
println!(
"👀 getting page {page_num} of posts (pagination cursor used: {})",
page_after.is_some()
);
// TODO: include local_user
let post_views = PostQuery {
community_id: community_ids.get(0).cloned(),
sort: Some(SortType::New),
limit: Some(20),
page_after,
..Default::default()
}
.list(&mut conn.into())
.await?; .await?;
let pool = &mut conn.into();
{
let mut page_after = None;
for page_num in 1..=args.read_post_pages {
println!(
"👀 getting page {page_num} of posts (pagination cursor used: {})",
page_after.is_some()
);
// TODO: include local_user
let post_views = PostQuery {
community_id: community_ids.get(0).cloned(),
sort: Some(SortType::New),
limit: Some(20),
page_after,
..Default::default()
}
.list(pool)
.await?;
if let Some(post_view) = post_views.into_iter().last() { if let Some(post_view) = post_views.into_iter().last() {
println!("👀 getting pagination cursor data for next page"); println!("👀 getting pagination cursor data for next page");
let cursor_data = PaginationCursor::after_post(&post_view).read(pool).await?; let cursor_data = PaginationCursor::after_post(&post_view)
.read(&mut conn.into())
.await?;
page_after = Some(cursor_data); page_after = Some(cursor_data);
} else { } else {
println!("🚫 reached empty page"); println!("👀 reached empty page");
break; break;
} }
}
}
// TODO show this path when there's an error
if let Ok(path) = std::env::var("PGDATA") {
println!("🪵 query plans written in {path}/log");
} }
Ok(()) Ok(())
} }
async fn conn_with_auto_explain<'a, 'b: 'a>(pool: &'a mut DbPool<'b>) -> LemmyResult<DbConn<'a>> {
let mut conn = get_conn(pool).await?;
sql_query("SET auto_explain.log_min_duration = 0")
.execute(&mut conn)
.await?;
Ok(conn)
}

View file

@ -76,6 +76,7 @@ tokio-postgres = { workspace = true, optional = true }
tokio-postgres-rustls = { workspace = true, optional = true } tokio-postgres-rustls = { workspace = true, optional = true }
rustls = { workspace = true, optional = true } rustls = { workspace = true, optional = true }
uuid = { workspace = true, features = ["v4"] } uuid = { workspace = true, features = ["v4"] }
anyhow = { workspace = true }
[dev-dependencies] [dev-dependencies]
serial_test = { workspace = true } serial_test = { workspace = true }

View file

@ -41,14 +41,7 @@ use crate::{
}; };
use ::url::Url; use ::url::Url;
use chrono::{Duration, Utc}; use chrono::{Duration, Utc};
use diesel::{ use diesel::{dsl::insert_into, result::Error, ExpressionMethods, QueryDsl, TextExpressionMethods};
dsl::insert_into,
result::Error,
ExpressionMethods,
Insertable,
QueryDsl,
TextExpressionMethods,
};
use diesel_async::RunQueryDsl; use diesel_async::RunQueryDsl;
use std::collections::HashSet; use std::collections::HashSet;

View file

@ -8,6 +8,7 @@ use crate::{
SortType, SortType,
}; };
use activitypub_federation::{fetch::object_id::ObjectId, traits::Object}; use activitypub_federation::{fetch::object_id::ObjectId, traits::Object};
use anyhow::Context;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use deadpool::Runtime; use deadpool::Runtime;
use diesel::{ use diesel::{
@ -19,11 +20,9 @@ use diesel::{
query_dsl::methods::LimitDsl, query_dsl::methods::LimitDsl,
result::{ConnectionError, ConnectionResult, Error as DieselError, Error::QueryBuilderError}, result::{ConnectionError, ConnectionResult, Error as DieselError, Error::QueryBuilderError},
serialize::{Output, ToSql}, serialize::{Output, ToSql},
sql_query,
sql_types::{Text, Timestamptz}, sql_types::{Text, Timestamptz},
IntoSql, IntoSql,
PgConnection, PgConnection,
RunQueryDsl,
}; };
use diesel_async::{ use diesel_async::{
pg::AsyncPgConnection, pg::AsyncPgConnection,
@ -343,21 +342,18 @@ impl ServerCertVerifier for NoCertVerifier {
pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!();
fn run_migrations(db_url: &str) { fn run_migrations(db_url: &str) -> Result<(), LemmyError> {
// Needs to be a sync connection // Needs to be a sync connection
let mut conn = let mut conn =
PgConnection::establish(db_url).unwrap_or_else(|e| panic!("Error connecting to {db_url}: {e}")); PgConnection::establish(db_url).with_context(|| format!("Error connecting to {db_url}"))?;
// Disable auto_explain output for migrations
sql_query("SET auto_explain.log_min_duration = -1")
.execute(&mut conn)
.expect("failed to disable auto_explain");
info!("Running Database migrations (This may take a long time)..."); info!("Running Database migrations (This may take a long time)...");
let _ = &mut conn conn
.run_pending_migrations(MIGRATIONS) .run_pending_migrations(MIGRATIONS)
.unwrap_or_else(|e| panic!("Couldn't run DB Migrations: {e}")); .map_err(|e| anyhow::Error::msg(format!("Couldn't run DB Migrations: {e}")))?;
info!("Database migrations complete."); info!("Database migrations complete.");
Ok(())
} }
pub async fn build_db_pool() -> Result<ActualDbPool, LemmyError> { pub async fn build_db_pool() -> Result<ActualDbPool, LemmyError> {
@ -381,7 +377,7 @@ pub async fn build_db_pool() -> Result<ActualDbPool, LemmyError> {
.runtime(Runtime::Tokio1) .runtime(Runtime::Tokio1)
.build()?; .build()?;
run_migrations(&db_url); run_migrations(&db_url)?;
Ok(pool) Ok(pool)
} }

View file

@ -30,9 +30,6 @@ config_args=(
# Allow auto_explain to be turned on # Allow auto_explain to be turned on
-c session_preload_libraries=auto_explain -c session_preload_libraries=auto_explain
# Log triggers
-c auto_explain.log_nested_statements=on
# Include actual row amounts and run times for query plan nodes # Include actual row amounts and run times for query plan nodes
-c auto_explain.log_analyze=on -c auto_explain.log_analyze=on
@ -43,7 +40,7 @@ config_args=(
# Create cluster # Create cluster
pg_ctl init --silent --options="--username=postgres --auth=trust --no-instructions" pg_ctl init --silent --options="--username=postgres --auth=trust --no-instructions"
# Start server that only listens to socket in current directory # Start server
pg_ctl start --silent --options="${config_args[*]}" pg_ctl start --silent --options="${config_args[*]}"
# Setup database # Setup database