diff --git a/Cargo.lock b/Cargo.lock index 8c782c6fb..337c7628e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2662,6 +2662,7 @@ name = "lemmy_db_schema" version = "0.19.0" dependencies = [ "activitypub_federation", + "anyhow", "async-trait", "bcrypt", "chrono", diff --git a/crates/db_perf/src/main.rs b/crates/db_perf/src/main.rs index 6a23e0113..1143c99c9 100644 --- a/crates/db_perf/src/main.rs +++ b/crates/db_perf/src/main.rs @@ -1,6 +1,11 @@ -use clap::{Parser, Subcommand}; -use diesel::{dsl::{self, sql}, sql_query, sql_types, ExpressionMethods, IntoSql}; -use diesel_async::RunQueryDsl; +use clap::Parser; +use diesel::{ + dsl::{self, sql}, + sql_types, + ExpressionMethods, + IntoSql, +}; +use diesel_async::{RunQueryDsl, SimpleAsyncConnection}; use lemmy_db_schema::{ schema::post, source::{ @@ -12,17 +17,14 @@ use lemmy_db_schema::{ utils::{ build_db_pool, get_conn, - series::{self, ValuesFromSeries}, DbConn, DbPool, now, + now, + series::{self, ValuesFromSeries}, }, SortType, }; -use lemmy_db_views::{ - post_view::{PaginationCursorData, PostQuery}, - structs::PaginationCursor, -}; +use lemmy_db_views::{post_view::PostQuery, structs::PaginationCursor}; use lemmy_utils::error::LemmyResult; use std::num::NonZeroU32; -use diesel::pg::expression::dsl::IntervalDsl; #[derive(Parser, Debug)] struct CmdArgs { @@ -40,52 +42,53 @@ struct CmdArgs { #[tokio::main] async fn main() -> LemmyResult<()> { + if let Err(err) = try_main().await { + println!("Error: {err:?}"); + } + if let Ok(path) = std::env::var("PGDATA") { + println!("🪵 query plans and error details written in {path}/log"); + } + + Ok(()) +} + +async fn try_main() -> LemmyResult<()> { let args = CmdArgs::parse(); let pool = &build_db_pool().await?; let pool = &mut pool.into(); - let conn = &mut get_conn(pool).await?; + if args.explain_insertions { - sql_query("SET auto_explain.log_min_duration = 0") - .execute(conn) + // log_nested_statements is enabled to log trigger execution + conn + .batch_execute( + "SET auto_explain.log_min_duration = 0; SET auto_explain.log_nested_statements = on;", + ) .await?; } - let pool = &mut conn.into(); - let instance = Instance::read_or_create(pool, "reddit.com".to_owned()).await?; + let instance = Instance::read_or_create(&mut conn.into(), "reddit.com".to_owned()).await?; println!("🫃 creating {} people", args.people); let mut person_ids = vec![]; for i in 0..args.people.get() { - person_ids.push( - Person::create( - pool, - &PersonInsertForm::builder() - .name(format!("p{i}")) - .public_key("pubkey".to_owned()) - .instance_id(instance.id) - .build(), - ) - .await? - .id, - ); + let form = PersonInsertForm::builder() + .name(format!("p{i}")) + .public_key("pubkey".to_owned()) + .instance_id(instance.id) + .build(); + person_ids.push(Person::create(&mut conn.into(), &form).await?.id); } println!("🏠 creating {} communities", args.communities); let mut community_ids = vec![]; for i in 0..args.communities.get() { - community_ids.push( - Community::create( - pool, - &CommunityInsertForm::builder() - .name(format!("c{i}")) - .title(i.to_string()) - .instance_id(instance.id) - .build(), - ) - .await? - .id, - ); + let form = CommunityInsertForm::builder() + .name(format!("c{i}")) + .title(i.to_string()) + .instance_id(instance.id) + .build(); + community_ids.push(Community::create(&mut conn.into(), &form).await?.id); } let post_batches = args.people.get() * args.communities.get(); @@ -108,7 +111,10 @@ async fn main() -> LemmyResult<()> { person_id.into_sql::(), community_id.into_sql::(), series::current_value.eq(1), - now() - sql::("make_interval(secs => ").bind::(series::current_value).sql(")"), + now() + - sql::("make_interval(secs => ") + .bind::(series::current_value) + .sql(")"), ), }) .into_columns(( @@ -118,64 +124,50 @@ async fn main() -> LemmyResult<()> { post::featured_community, post::published, )) - .execute(&mut get_conn(pool).await?) + .execute(conn) .await?; num_inserted_posts += n; } } - - // Lie detector for the println above + // Make sure the println above shows the correct amount assert_eq!(num_inserted_posts, num_posts as usize); // Enable auto_explain - let conn = &mut get_conn(pool).await?; - sql_query("SET auto_explain.log_min_duration = 0") - .execute(conn) + conn + .batch_execute( + "SET auto_explain.log_min_duration = 0; SET auto_explain.log_nested_statements = off;", + ) .await?; - let pool = &mut conn.into(); - - { - let mut page_after = None; - for page_num in 1..=args.read_post_pages { - println!( - "👀 getting page {page_num} of posts (pagination cursor used: {})", - page_after.is_some() - ); - // TODO: include local_user - let post_views = PostQuery { - community_id: community_ids.get(0).cloned(), - sort: Some(SortType::New), - limit: Some(20), - page_after, - ..Default::default() - } - .list(pool) - .await?; + + let mut page_after = None; + for page_num in 1..=args.read_post_pages { + println!( + "👀 getting page {page_num} of posts (pagination cursor used: {})", + page_after.is_some() + ); + + // TODO: include local_user + let post_views = PostQuery { + community_id: community_ids.get(0).cloned(), + sort: Some(SortType::New), + limit: Some(20), + page_after, + ..Default::default() + } + .list(&mut conn.into()) + .await?; + if let Some(post_view) = post_views.into_iter().last() { println!("👀 getting pagination cursor data for next page"); - let cursor_data = PaginationCursor::after_post(&post_view).read(pool).await?; + let cursor_data = PaginationCursor::after_post(&post_view) + .read(&mut conn.into()) + .await?; page_after = Some(cursor_data); } else { - println!("🚫 reached empty page"); + println!("👀 reached empty page"); break; } - } } - - // TODO show this path when there's an error - if let Ok(path) = std::env::var("PGDATA") { - println!("🪵 query plans written in {path}/log"); - } - + Ok(()) } - -async fn conn_with_auto_explain<'a, 'b: 'a>(pool: &'a mut DbPool<'b>) -> LemmyResult> { - let mut conn = get_conn(pool).await?; - - sql_query("SET auto_explain.log_min_duration = 0") - .execute(&mut conn) - .await?; - - Ok(conn) -} diff --git a/crates/db_schema/Cargo.toml b/crates/db_schema/Cargo.toml index f3ebc0b66..0ce3cfca6 100644 --- a/crates/db_schema/Cargo.toml +++ b/crates/db_schema/Cargo.toml @@ -76,6 +76,7 @@ tokio-postgres = { workspace = true, optional = true } tokio-postgres-rustls = { workspace = true, optional = true } rustls = { workspace = true, optional = true } uuid = { workspace = true, features = ["v4"] } +anyhow = { workspace = true } [dev-dependencies] serial_test = { workspace = true } diff --git a/crates/db_schema/src/impls/post.rs b/crates/db_schema/src/impls/post.rs index cb76dc334..4f2f88cb2 100644 --- a/crates/db_schema/src/impls/post.rs +++ b/crates/db_schema/src/impls/post.rs @@ -41,14 +41,7 @@ use crate::{ }; use ::url::Url; use chrono::{Duration, Utc}; -use diesel::{ - dsl::insert_into, - result::Error, - ExpressionMethods, - Insertable, - QueryDsl, - TextExpressionMethods, -}; +use diesel::{dsl::insert_into, result::Error, ExpressionMethods, QueryDsl, TextExpressionMethods}; use diesel_async::RunQueryDsl; use std::collections::HashSet; diff --git a/crates/db_schema/src/utils.rs b/crates/db_schema/src/utils.rs index 67e005273..be12639e7 100644 --- a/crates/db_schema/src/utils.rs +++ b/crates/db_schema/src/utils.rs @@ -8,6 +8,7 @@ use crate::{ SortType, }; use activitypub_federation::{fetch::object_id::ObjectId, traits::Object}; +use anyhow::Context; use chrono::{DateTime, Utc}; use deadpool::Runtime; use diesel::{ @@ -19,11 +20,9 @@ use diesel::{ query_dsl::methods::LimitDsl, result::{ConnectionError, ConnectionResult, Error as DieselError, Error::QueryBuilderError}, serialize::{Output, ToSql}, - sql_query, sql_types::{Text, Timestamptz}, IntoSql, PgConnection, - RunQueryDsl, }; use diesel_async::{ pg::AsyncPgConnection, @@ -343,21 +342,18 @@ impl ServerCertVerifier for NoCertVerifier { pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); -fn run_migrations(db_url: &str) { +fn run_migrations(db_url: &str) -> Result<(), LemmyError> { // Needs to be a sync connection let mut conn = - PgConnection::establish(db_url).unwrap_or_else(|e| panic!("Error connecting to {db_url}: {e}")); - - // Disable auto_explain output for migrations - sql_query("SET auto_explain.log_min_duration = -1") - .execute(&mut conn) - .expect("failed to disable auto_explain"); + PgConnection::establish(db_url).with_context(|| format!("Error connecting to {db_url}"))?; info!("Running Database migrations (This may take a long time)..."); - let _ = &mut conn + conn .run_pending_migrations(MIGRATIONS) - .unwrap_or_else(|e| panic!("Couldn't run DB Migrations: {e}")); + .map_err(|e| anyhow::Error::msg(format!("Couldn't run DB Migrations: {e}")))?; info!("Database migrations complete."); + + Ok(()) } pub async fn build_db_pool() -> Result { @@ -381,7 +377,7 @@ pub async fn build_db_pool() -> Result { .runtime(Runtime::Tokio1) .build()?; - run_migrations(&db_url); + run_migrations(&db_url)?; Ok(pool) } diff --git a/scripts/start_dev_db.sh b/scripts/start_dev_db.sh index f6b4e2a9a..8ea4a294e 100644 --- a/scripts/start_dev_db.sh +++ b/scripts/start_dev_db.sh @@ -30,9 +30,6 @@ config_args=( # Allow auto_explain to be turned on -c session_preload_libraries=auto_explain - # Log triggers - -c auto_explain.log_nested_statements=on - # Include actual row amounts and run times for query plan nodes -c auto_explain.log_analyze=on @@ -43,7 +40,7 @@ config_args=( # Create cluster pg_ctl init --silent --options="--username=postgres --auth=trust --no-instructions" -# Start server that only listens to socket in current directory +# Start server pg_ctl start --silent --options="${config_args[*]}" # Setup database