diff --git a/Cargo.lock b/Cargo.lock index 86342b923..edbd43efd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2644,11 +2644,25 @@ dependencies = [ "uuid", ] +[[package]] +name = "lemmy_db_perf" +version = "0.19.0" +dependencies = [ + "clap", + "diesel", + "diesel-async", + "lemmy_db_schema", + "lemmy_db_views", + "lemmy_utils", + "tokio", +] + [[package]] name = "lemmy_db_schema" version = "0.19.1-rc.2" dependencies = [ "activitypub_federation", + "anyhow", "async-trait", "bcrypt", "chrono", diff --git a/Cargo.toml b/Cargo.toml index a2cb87ff9..f17b756a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -54,6 +54,7 @@ members = [ "crates/api_common", "crates/apub", "crates/utils", + "crates/db_perf", "crates/db_schema", "crates/db_views", "crates/db_views_actor", @@ -156,6 +157,7 @@ tokio-postgres = "0.7.10" tokio-postgres-rustls = "0.10.0" enum-map = "2.7" moka = { version = "0.12.1", features = ["future"] } +clap = { version = "4.4.11", features = ["derive"] } [dependencies] lemmy_api = { workspace = true } @@ -192,5 +194,5 @@ futures-util = { workspace = true } chrono = { workspace = true } prometheus = { version = "0.13.3", features = ["process"] } serial_test = { workspace = true } -clap = { version = "4.4.11", features = ["derive"] } +clap = { workspace = true } actix-web-prom = "0.7.0" diff --git a/crates/db_perf/Cargo.toml b/crates/db_perf/Cargo.toml new file mode 100644 index 000000000..7aa1566bf --- /dev/null +++ b/crates/db_perf/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "lemmy_db_perf" +version.workspace = true +edition.workspace = true +description.workspace = true +license.workspace = true +homepage.workspace = true +documentation.workspace = true +repository.workspace = true + + +[lints] +workspace = true + +[dependencies] +clap = { workspace = true } +diesel = { workspace = true } +diesel-async = { workspace = true } +lemmy_db_schema = { workspace = true } +lemmy_db_views = { workspace = true, features = ["full"] } +lemmy_utils = { workspace = true } +tokio = { workspace = true } diff --git a/crates/db_perf/src/main.rs b/crates/db_perf/src/main.rs new file mode 100644 index 000000000..4976f9e01 --- /dev/null +++ b/crates/db_perf/src/main.rs @@ -0,0 +1,174 @@ +use clap::Parser; +use diesel::{ + dsl::{self, sql}, + sql_types, + ExpressionMethods, + IntoSql, +}; +use diesel_async::{RunQueryDsl, SimpleAsyncConnection}; +use lemmy_db_schema::{ + schema::post, + source::{ + community::{Community, CommunityInsertForm}, + instance::Instance, + person::{Person, PersonInsertForm}, + }, + traits::Crud, + utils::{ + build_db_pool, + get_conn, + now, + series::{self, ValuesFromSeries}, + }, + SortType, +}; +use lemmy_db_views::{post_view::PostQuery, structs::PaginationCursor}; +use lemmy_utils::error::LemmyResult; +use std::num::NonZeroU32; + +#[derive(Parser, Debug)] +struct CmdArgs { + #[arg(long, default_value_t = 3.try_into().unwrap())] + communities: NonZeroU32, + #[arg(long, default_value_t = 3.try_into().unwrap())] + people: NonZeroU32, + #[arg(long, default_value_t = 100000.try_into().unwrap())] + posts: NonZeroU32, + #[arg(long, default_value_t = 0)] + read_post_pages: u32, + #[arg(long)] + explain_insertions: bool, +} + +#[tokio::main] +async fn main() -> LemmyResult<()> { + if let Err(err) = try_main().await { + println!("😂 Error: {err:?}"); + } + if let Ok(path) = std::env::var("PGDATA") { + println!("🪵 query plans and error details written in {path}/log"); + } + + Ok(()) +} + +async fn try_main() -> LemmyResult<()> { + let args = CmdArgs::parse(); + let pool = &build_db_pool().await?; + let pool = &mut pool.into(); + let conn = &mut get_conn(pool).await?; + + if args.explain_insertions { + // log_nested_statements is enabled to log trigger execution + conn + .batch_execute( + "SET auto_explain.log_min_duration = 0; SET auto_explain.log_nested_statements = on;", + ) + .await?; + } + + let instance = Instance::read_or_create(&mut conn.into(), "reddit.com".to_owned()).await?; + + println!("🫃 creating {} people", args.people); + let mut person_ids = vec![]; + for i in 0..args.people.get() { + let form = PersonInsertForm::builder() + .name(format!("p{i}")) + .public_key("pubkey".to_owned()) + .instance_id(instance.id) + .build(); + person_ids.push(Person::create(&mut conn.into(), &form).await?.id); + } + + println!("🌍 creating {} communities", args.communities); + let mut community_ids = vec![]; + for i in 0..args.communities.get() { + let form = CommunityInsertForm::builder() + .name(format!("c{i}")) + .title(i.to_string()) + .instance_id(instance.id) + .build(); + community_ids.push(Community::create(&mut conn.into(), &form).await?.id); + } + + let post_batches = args.people.get() * args.communities.get(); + let posts_per_batch = args.posts.get() / post_batches; + let num_posts = post_batches * posts_per_batch; + println!( + "📜 creating {} posts ({} featured in community)", + num_posts, post_batches + ); + let mut num_inserted_posts = 0; + // TODO: progress bar + for person_id in &person_ids { + for community_id in &community_ids { + let n = dsl::insert_into(post::table) + .values(ValuesFromSeries { + start: 1, + stop: posts_per_batch.into(), + selection: ( + "AAAAAAAAAAA".into_sql::(), + person_id.into_sql::(), + community_id.into_sql::(), + series::current_value.eq(1), + now() + - sql::("make_interval(secs => ") + .bind::(series::current_value) + .sql(")"), + ), + }) + .into_columns(( + post::name, + post::creator_id, + post::community_id, + post::featured_community, + post::published, + )) + .execute(conn) + .await?; + num_inserted_posts += n; + } + } + // Make sure the println above shows the correct amount + assert_eq!(num_inserted_posts, num_posts as usize); + + // Enable auto_explain + conn + .batch_execute( + "SET auto_explain.log_min_duration = 0; SET auto_explain.log_nested_statements = off;", + ) + .await?; + + // TODO: show execution duration stats + let mut page_after = None; + for page_num in 1..=args.read_post_pages { + println!( + "👀 getting page {page_num} of posts (pagination cursor used: {})", + page_after.is_some() + ); + + // TODO: include local_user + let post_views = PostQuery { + community_id: community_ids.get(0).cloned(), + sort: Some(SortType::New), + limit: Some(20), + page_after, + ..Default::default() + } + .list(&mut conn.into()) + .await?; + + if let Some(post_view) = post_views.into_iter().last() { + println!("👀 getting pagination cursor data for next page"); + let cursor_data = PaginationCursor::after_post(&post_view) + .read(&mut conn.into()) + .await?; + page_after = Some(cursor_data); + } else { + println!("👀 reached empty page"); + break; + } + } + + Ok(()) +} diff --git a/crates/db_schema/Cargo.toml b/crates/db_schema/Cargo.toml index f3ebc0b66..0ce3cfca6 100644 --- a/crates/db_schema/Cargo.toml +++ b/crates/db_schema/Cargo.toml @@ -76,6 +76,7 @@ tokio-postgres = { workspace = true, optional = true } tokio-postgres-rustls = { workspace = true, optional = true } rustls = { workspace = true, optional = true } uuid = { workspace = true, features = ["v4"] } +anyhow = { workspace = true } [dev-dependencies] serial_test = { workspace = true } diff --git a/crates/db_schema/src/impls/actor_language.rs b/crates/db_schema/src/impls/actor_language.rs index 5c4e252d4..36ac70b0b 100644 --- a/crates/db_schema/src/impls/actor_language.rs +++ b/crates/db_schema/src/impls/actor_language.rs @@ -96,16 +96,18 @@ impl LocalUserLanguage { .execute(conn) .await?; - for l in lang_ids { - let form = LocalUserLanguageForm { + let forms = lang_ids + .into_iter() + .map(|l| LocalUserLanguageForm { local_user_id: for_local_user_id, language_id: l, - }; - insert_into(local_user_language) - .values(form) - .get_result::(conn) - .await?; - } + }) + .collect::>(); + + insert_into(local_user_language) + .values(forms) + .execute(conn) + .await?; Ok(()) }) as _ }) @@ -164,16 +166,18 @@ impl SiteLanguage { .execute(conn) .await?; - for l in lang_ids { - let form = SiteLanguageForm { + let forms = lang_ids + .into_iter() + .map(|l| SiteLanguageForm { site_id: for_site_id, language_id: l, - }; - insert_into(site_language) - .values(form) - .get_result::(conn) - .await?; - } + }) + .collect::>(); + + insert_into(site_language) + .values(forms) + .get_result::(conn) + .await?; CommunityLanguage::limit_languages(conn, instance_id).await?; diff --git a/crates/db_schema/src/utils.rs b/crates/db_schema/src/utils.rs index 5c73e6fa3..6ede5e4f1 100644 --- a/crates/db_schema/src/utils.rs +++ b/crates/db_schema/src/utils.rs @@ -1,3 +1,5 @@ +pub mod series; + use crate::{ diesel::Connection, diesel_migrations::MigrationHarness, @@ -6,6 +8,7 @@ use crate::{ SortType, }; use activitypub_federation::{fetch::object_id::ObjectId, traits::Object}; +use anyhow::Context; use chrono::{DateTime, Utc}; use deadpool::Runtime; use diesel::{ @@ -14,6 +17,8 @@ use diesel::{ deserialize::FromSql, helper_types::AsExprOf, pg::Pg, + query_builder::{Query, QueryFragment}, + query_dsl::methods::LimitDsl, result::{ConnectionError, ConnectionResult, Error as DieselError, Error::QueryBuilderError}, serialize::{Output, ToSql}, sql_types::{Text, Timestamptz}, @@ -152,6 +157,67 @@ macro_rules! try_join_with_pool { }}; } +/// Includes an SQL comment before `T`, which can be used to label auto_explain output +#[derive(QueryId)] +pub struct Commented { + comment: String, + inner: T, +} + +impl Commented { + pub fn new(inner: T) -> Self { + Commented { + comment: String::new(), + inner, + } + } + + /// Adds `text` to the comment if `condition` is true + pub fn text_if(mut self, text: &str, condition: bool) -> Self { + if condition { + if !self.comment.is_empty() { + self.comment.push_str(", "); + } + self.comment.push_str(text); + } + self + } + + /// Adds `text` to the comment + pub fn text(self, text: &str) -> Self { + self.text_if(text, true) + } +} + +impl Query for Commented { + type SqlType = T::SqlType; +} + +impl> QueryFragment for Commented { + fn walk_ast<'b>( + &'b self, + mut out: diesel::query_builder::AstPass<'_, 'b, Pg>, + ) -> Result<(), DieselError> { + for line in self.comment.lines() { + out.push_sql("\n-- "); + out.push_sql(line); + } + out.push_sql("\n"); + self.inner.walk_ast(out.reborrow()) + } +} + +impl LimitDsl for Commented { + type Output = Commented; + + fn limit(self, limit: i64) -> Self::Output { + Commented { + comment: self.comment, + inner: self.inner.limit(limit), + } + } +} + pub fn fuzzy_search(q: &str) -> String { let replaced = q.replace('%', "\\%").replace('_', "\\_").replace(' ', "%"); format!("%{replaced}%") @@ -277,18 +343,21 @@ impl ServerCertVerifier for NoCertVerifier { pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!(); -fn run_migrations(db_url: &str) { +fn run_migrations(db_url: &str) -> Result<(), LemmyError> { // Needs to be a sync connection let mut conn = - PgConnection::establish(db_url).unwrap_or_else(|e| panic!("Error connecting to {db_url}: {e}")); + PgConnection::establish(db_url).with_context(|| format!("Error connecting to {db_url}"))?; + info!("Running Database migrations (This may take a long time)..."); - let _ = &mut conn + conn .run_pending_migrations(MIGRATIONS) - .unwrap_or_else(|e| panic!("Couldn't run DB Migrations: {e}")); + .map_err(|e| anyhow::anyhow!("Couldn't run DB Migrations: {e}"))?; conn .batch_execute(include_str!("../../../replaceable_schema.sql")) - .expect("Couldn't run replaceable_schema.sql"); + .with_context(|| "Couldn't run replaceable_schema.sql")?; info!("Database migrations complete."); + + Ok(()) } pub async fn build_db_pool() -> Result { @@ -312,7 +381,7 @@ pub async fn build_db_pool() -> Result { .runtime(Runtime::Tokio1) .build()?; - run_migrations(&db_url); + run_migrations(&db_url)?; Ok(pool) } diff --git a/crates/db_schema/src/utils/series.rs b/crates/db_schema/src/utils/series.rs new file mode 100644 index 000000000..c8f6831e6 --- /dev/null +++ b/crates/db_schema/src/utils/series.rs @@ -0,0 +1,81 @@ +use diesel::{ + dsl, + expression::{is_aggregate, ValidGrouping}, + pg::Pg, + query_builder::{AsQuery, AstPass, QueryFragment}, + result::Error, + sql_types, + AppearsOnTable, + Expression, + Insertable, + SelectableExpression, +}; + +#[derive(QueryId)] +pub struct ValuesFromSeries { + pub start: i64, + pub stop: i64, + pub selection: S, +} + +impl> QueryFragment for ValuesFromSeries { + fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> Result<(), Error> { + self.selection.walk_ast(out.reborrow())?; + out.push_sql(" FROM generate_series("); + out.push_bind_param::(&self.start)?; + out.push_sql(", "); + out.push_bind_param::(&self.stop)?; + out.push_sql(")"); + + Ok(()) + } +} + +impl Expression for ValuesFromSeries { + type SqlType = S::SqlType; +} + +impl> AppearsOnTable for ValuesFromSeries {} + +impl> SelectableExpression for ValuesFromSeries {} + +impl> Insertable for ValuesFromSeries +where + dsl::BareSelect: AsQuery + Insertable, +{ + type Values = as Insertable>::Values; + + fn values(self) -> Self::Values { + dsl::select(self).values() + } +} + +impl> ValidGrouping<()> + for ValuesFromSeries +{ + type IsAggregate = is_aggregate::No; +} + +#[allow(non_camel_case_types)] +#[derive(QueryId, Clone, Copy, Debug)] +pub struct current_value; + +impl QueryFragment for current_value { + fn walk_ast<'b>(&'b self, mut out: AstPass<'_, 'b, Pg>) -> Result<(), Error> { + out.push_identifier("generate_series")?; + + Ok(()) + } +} + +impl Expression for current_value { + type SqlType = sql_types::BigInt; +} + +impl AppearsOnTable for current_value {} + +impl SelectableExpression for current_value {} + +impl ValidGrouping<()> for current_value { + type IsAggregate = is_aggregate::No; +} diff --git a/crates/db_views/src/post_view.rs b/crates/db_views/src/post_view.rs index 857e7f644..7d42c7c22 100644 --- a/crates/db_views/src/post_view.rs +++ b/crates/db_views/src/post_view.rs @@ -43,6 +43,7 @@ use lemmy_db_schema::{ get_conn, limit_and_offset, now, + Commented, DbConn, DbPool, ListFn, @@ -282,7 +283,10 @@ fn queries<'a>() -> Queries< ); } - query.first::(&mut conn).await + Commented::new(query) + .text("PostView::read") + .first::(&mut conn) + .await }; let list = move |mut conn: DbConn<'a>, options: PostQuery<'a>| async move { @@ -558,7 +562,14 @@ fn queries<'a>() -> Queries< debug!("Post View Query: {:?}", debug_query::(&query)); - query.load::(&mut conn).await + Commented::new(query) + .text("PostQuery::list") + .text_if( + "getting upper bound for next query", + options.community_id_just_for_prefetch, + ) + .load::(&mut conn) + .await }; Queries::new(read, list) diff --git a/migrations/2023-12-19-210053_tolerable-batch-insert-speed/down.sql b/migrations/2023-12-19-210053_tolerable-batch-insert-speed/down.sql new file mode 100644 index 000000000..3b08a616b --- /dev/null +++ b/migrations/2023-12-19-210053_tolerable-batch-insert-speed/down.sql @@ -0,0 +1,88 @@ +CREATE OR REPLACE FUNCTION post_aggregates_post () + RETURNS TRIGGER + LANGUAGE plpgsql + AS $$ +BEGIN + IF (TG_OP = 'INSERT') THEN + INSERT INTO post_aggregates (post_id, published, newest_comment_time, newest_comment_time_necro, community_id, creator_id, instance_id) + SELECT + NEW.id, + NEW.published, + NEW.published, + NEW.published, + NEW.community_id, + NEW.creator_id, + community.instance_id + FROM + community + WHERE + NEW.community_id = community.id; + ELSIF (TG_OP = 'DELETE') THEN + DELETE FROM post_aggregates + WHERE post_id = OLD.id; + END IF; + RETURN NULL; +END +$$; + +CREATE OR REPLACE TRIGGER post_aggregates_post + AFTER INSERT OR DELETE ON post + FOR EACH ROW + EXECUTE PROCEDURE post_aggregates_post (); + +CREATE OR REPLACE TRIGGER community_aggregates_post_count + AFTER INSERT OR DELETE OR UPDATE OF removed, + deleted ON post + FOR EACH ROW + EXECUTE PROCEDURE community_aggregates_post_count (); + +DROP FUNCTION IF EXISTS community_aggregates_post_count_insert CASCADE; + +DROP FUNCTION IF EXISTS community_aggregates_post_update CASCADE; + +DROP FUNCTION IF EXISTS site_aggregates_post_update CASCADE; + +DROP FUNCTION IF EXISTS person_aggregates_post_insert CASCADE; + +CREATE OR REPLACE FUNCTION site_aggregates_post_insert () + RETURNS TRIGGER + LANGUAGE plpgsql + AS $$ +BEGIN + IF (was_restored_or_created (TG_OP, OLD, NEW)) THEN + UPDATE + site_aggregates sa + SET + posts = posts + 1 + FROM + site s + WHERE + sa.site_id = s.id; + END IF; + RETURN NULL; +END +$$; + +CREATE OR REPLACE TRIGGER site_aggregates_post_insert + AFTER INSERT OR UPDATE OF removed, + deleted ON post + FOR EACH ROW + WHEN (NEW.local = TRUE) + EXECUTE PROCEDURE site_aggregates_post_insert (); + +CREATE OR REPLACE FUNCTION generate_unique_changeme () + RETURNS text + LANGUAGE sql + AS $$ + SELECT + 'http://changeme.invalid/' || substr(md5(random()::text), 0, 25); +$$; + +CREATE OR REPLACE TRIGGER person_aggregates_post_count + AFTER INSERT OR DELETE OR UPDATE OF removed, + deleted ON post + FOR EACH ROW + EXECUTE PROCEDURE person_aggregates_post_count (); + +DROP SEQUENCE IF EXISTS changeme_seq; + diff --git a/migrations/2023-12-19-210053_tolerable-batch-insert-speed/up.sql b/migrations/2023-12-19-210053_tolerable-batch-insert-speed/up.sql new file mode 100644 index 000000000..aaa866d3a --- /dev/null +++ b/migrations/2023-12-19-210053_tolerable-batch-insert-speed/up.sql @@ -0,0 +1,166 @@ +-- Change triggers to run once per statement instead of once per row +-- post_aggregates_post trigger doesn't need to handle deletion because the post_id column has ON DELETE CASCADE +CREATE OR REPLACE FUNCTION post_aggregates_post () + RETURNS TRIGGER + LANGUAGE plpgsql + AS $$ +BEGIN + INSERT INTO post_aggregates (post_id, published, newest_comment_time, newest_comment_time_necro, community_id, creator_id, instance_id) + SELECT + id, + published, + published, + published, + community_id, + creator_id, + ( + SELECT + community.instance_id + FROM + community + WHERE + community.id = community_id + LIMIT 1) +FROM + new_post; + RETURN NULL; +END +$$; + +CREATE OR REPLACE FUNCTION community_aggregates_post_count_insert () + RETURNS TRIGGER + LANGUAGE plpgsql + AS $$ +BEGIN + UPDATE + community_aggregates + SET + posts = posts + post_group.count + FROM ( + SELECT + community_id, + count(*) + FROM + new_post + GROUP BY + community_id) post_group +WHERE + community_aggregates.community_id = post_group.community_id; + RETURN NULL; +END +$$; + +CREATE OR REPLACE FUNCTION person_aggregates_post_insert () + RETURNS TRIGGER + LANGUAGE plpgsql + AS $$ +BEGIN + UPDATE + person_aggregates + SET + post_count = post_count + post_group.count + FROM ( + SELECT + creator_id, + count(*) + FROM + new_post + GROUP BY + creator_id) post_group +WHERE + person_aggregates.person_id = post_group.creator_id; + RETURN NULL; +END +$$; + +CREATE OR REPLACE TRIGGER post_aggregates_post + AFTER INSERT ON post REFERENCING NEW TABLE AS new_post + FOR EACH STATEMENT + EXECUTE PROCEDURE post_aggregates_post (); + +-- Don't run old trigger for insert +CREATE OR REPLACE TRIGGER community_aggregates_post_count + AFTER DELETE OR UPDATE OF removed, + deleted ON post + FOR EACH ROW + EXECUTE PROCEDURE community_aggregates_post_count (); + +CREATE OR REPLACE TRIGGER community_aggregates_post_count_insert + AFTER INSERT ON post REFERENCING NEW TABLE AS new_post + FOR EACH STATEMENT + EXECUTE PROCEDURE community_aggregates_post_count_insert (); + +CREATE OR REPLACE FUNCTION site_aggregates_post_update () + RETURNS TRIGGER + LANGUAGE plpgsql + AS $$ +BEGIN + IF (was_restored_or_created (TG_OP, OLD, NEW)) THEN + UPDATE + site_aggregates sa + SET + posts = posts + 1 + FROM + site s + WHERE + sa.site_id = s.id; + END IF; + RETURN NULL; +END +$$; + +CREATE OR REPLACE FUNCTION site_aggregates_post_insert () + RETURNS TRIGGER + LANGUAGE plpgsql + AS $$ +BEGIN + UPDATE + site_aggregates sa + SET + posts = posts + ( + SELECT + count(*) + FROM + new_post) + FROM + site s + WHERE + sa.site_id = s.id; + RETURN NULL; +END +$$; + +CREATE OR REPLACE TRIGGER site_aggregates_post_update + AFTER UPDATE OF removed, + deleted ON post + FOR EACH ROW + WHEN (NEW.local = TRUE) + EXECUTE PROCEDURE site_aggregates_post_update (); + +CREATE OR REPLACE TRIGGER site_aggregates_post_insert + AFTER INSERT ON post REFERENCING NEW TABLE AS new_post + FOR EACH STATEMENT + EXECUTE PROCEDURE site_aggregates_post_insert (); + +CREATE OR REPLACE TRIGGER person_aggregates_post_count + AFTER DELETE OR UPDATE OF removed, + deleted ON post + FOR EACH ROW + EXECUTE PROCEDURE person_aggregates_post_count (); + +CREATE OR REPLACE TRIGGER person_aggregates_post_insert + AFTER INSERT ON post REFERENCING NEW TABLE AS new_post + FOR EACH STATEMENT + EXECUTE PROCEDURE person_aggregates_post_insert (); + +-- Avoid running hash function and random number generation for default ap_id +CREATE SEQUENCE IF NOT EXISTS changeme_seq AS bigint CYCLE; + +CREATE OR REPLACE FUNCTION generate_unique_changeme () + RETURNS text + LANGUAGE sql + AS $$ + SELECT + 'http://changeme.invalid/seq/' || nextval('changeme_seq')::text; +$$; + diff --git a/scripts/db_perf.sh b/scripts/db_perf.sh new file mode 100755 index 000000000..9d06d5175 --- /dev/null +++ b/scripts/db_perf.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash +set -e + +CWD="$(cd -P -- "$(dirname -- "${BASH_SOURCE[0]}")" && pwd -P)" + +cd $CWD/../ + +source scripts/start_dev_db.sh + +export LEMMY_CONFIG_LOCATION=config/config.hjson +export RUST_BACKTRACE=1 + +cargo run --package lemmy_db_perf -- "$@" + +pg_ctl stop --silent + +# $PGDATA directory is kept so log can be seen diff --git a/scripts/start_dev_db.sh b/scripts/start_dev_db.sh index e320372cd..5965316ba 100644 --- a/scripts/start_dev_db.sh +++ b/scripts/start_dev_db.sh @@ -8,20 +8,43 @@ export LEMMY_DATABASE_URL=$DATABASE_URL export PGDATABASE=lemmy # If cluster exists, stop the server and delete the cluster -if [ -d $PGDATA ] +if [[ -d $PGDATA ]] then - # Prevent `stop` from failing if server already stopped - pg_ctl restart > /dev/null - pg_ctl stop + # Only stop server if it is running + pg_status_exit_code=0 + (pg_ctl status > /dev/null) || pg_status_exit_code=$? + if [[ ${pg_status_exit_code} -ne 3 ]] + then + pg_ctl stop --silent + fi + rm -rf $PGDATA fi -# Create cluster -initdb --username=postgres --auth=trust --no-instructions +config_args=( + # Only listen to socket in current directory + -c listen_addresses= + -c unix_socket_directories=$PWD -# Start server that only listens to socket in current directory -pg_ctl start --options="-c listen_addresses= -c unix_socket_directories=$PWD" > /dev/null + # Write logs to a file in $PGDATA/log + -c logging_collector=on + + # Allow auto_explain to be turned on + -c session_preload_libraries=auto_explain + + # Include actual row amounts and run times for query plan nodes + -c auto_explain.log_analyze=on + + # Don't log parameter values + -c auto_explain.log_parameter_max_length=0 +) + +# Create cluster +pg_ctl init --silent --options="--username=postgres --auth=trust --no-instructions" + +# Start server +pg_ctl start --silent --options="${config_args[*]}" # Setup database -PGDATABASE=postgres psql -c "CREATE USER lemmy WITH PASSWORD 'password' SUPERUSER;" -PGDATABASE=postgres psql -c "CREATE DATABASE lemmy WITH OWNER lemmy;" +PGDATABASE=postgres psql --quiet -c "CREATE USER lemmy WITH PASSWORD 'password' SUPERUSER;" +PGDATABASE=postgres psql --quiet -c "CREATE DATABASE lemmy WITH OWNER lemmy;" diff --git a/scripts/test.sh b/scripts/test.sh index cdfbf7611..efe9b1513 100755 --- a/scripts/test.sh +++ b/scripts/test.sh @@ -27,5 +27,5 @@ cargo test -p lemmy_utils --all-features --no-fail-fast # Add this to do printlns: -- --nocapture -pg_ctl stop +pg_ctl stop --silent rm -rf $PGDATA