Skip to content

Commit

Permalink
Remove redundant enum
Browse files Browse the repository at this point in the history
  • Loading branch information
pawurb committed Jan 25, 2024
1 parent 08c61a4 commit df5c2a7
Show file tree
Hide file tree
Showing 35 changed files with 200 additions and 214 deletions.
72 changes: 34 additions & 38 deletions src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ pub use structs::null_indexes::NullIndexes;
pub use structs::outliers::Outliers;
pub use structs::records_rank::RecordsRank;
pub use structs::seq_scans::SeqScans;
pub use structs::shared::{get_default_schema, Query, Tabular};
pub use structs::shared::{get_default_schema, Tabular};
pub use structs::ssl_used::SslUsed;
pub use structs::table_cache_hit::TableCacheHit;
pub use structs::table_index_scans::TableIndexScans;
Expand Down Expand Up @@ -52,190 +52,186 @@ pub fn render_table<T: Tabular>(items: Vec<T>) {
}

pub async fn bloat() -> Result<Vec<Bloat>, PgExtrasError> {
let query = Query::read_file(Bloat::FILE_NAME);
let query = Bloat::read_file();
get_rows(query).await
}

pub async fn blocking(limit: Option<String>) -> Result<Vec<Blocking>, PgExtrasError> {
let limit = limit.unwrap_or("10".to_string());
let query = Query::read_file(Blocking::FILE_NAME).replace("%{limit}", limit.as_str());
let query = Blocking::read_file().replace("%{limit}", limit.as_str());
get_rows(&query).await
}

pub async fn calls(limit: Option<String>) -> Result<Vec<Calls>, PgExtrasError> {
let limit = limit.unwrap_or("10".to_string());
let query = Query::read_file(Calls::FILE_NAME).replace("%{limit}", limit.as_str());
let query = Calls::read_file().replace("%{limit}", limit.as_str());
get_rows(&query).await
}

pub async fn extensions() -> Result<Vec<Extensions>, PgExtrasError> {
let query = Query::read_file(Extensions::FILE_NAME);
let query = Extensions::read_file();
get_rows(query).await
}

pub async fn table_cache_hit() -> Result<Vec<TableCacheHit>, PgExtrasError> {
let query = Query::read_file(TableCacheHit::FILE_NAME);
let query = TableCacheHit::read_file();
get_rows(query).await
}

pub async fn tables(schema: Option<String>) -> Result<Vec<Tables>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = Query::read_file(Tables::FILE_NAME).replace("%{schema}", &schema_name);
let query = Tables::read_file().replace("%{schema}", &schema_name);
get_rows(&query).await
}

pub async fn index_cache_hit(schema: Option<String>) -> Result<Vec<IndexCacheHit>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = Query::read_file(IndexCacheHit::FILE_NAME).replace("%{schema}", &schema_name);
let query = IndexCacheHit::read_file().replace("%{schema}", &schema_name);
get_rows(&query).await
}

pub async fn indexes() -> Result<Vec<Indexes>, PgExtrasError> {
let query = Query::read_file(Indexes::FILE_NAME);
let query = Indexes::read_file();
get_rows(query).await
}

pub async fn index_size() -> Result<Vec<IndexSize>, PgExtrasError> {
let query = Query::read_file(IndexSize::FILE_NAME);
let query = IndexSize::read_file();
get_rows(query).await
}

pub async fn index_usage(schema: Option<String>) -> Result<Vec<IndexUsage>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = Query::read_file(IndexUsage::FILE_NAME).replace("%{schema}", &schema_name);
let query = IndexUsage::read_file().replace("%{schema}", &schema_name);
get_rows(&query).await
}

pub async fn index_scans(schema: Option<String>) -> Result<Vec<IndexScans>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = Query::read_file(IndexScans::FILE_NAME).replace("%{schema}", &schema_name);
let query = IndexScans::read_file().replace("%{schema}", &schema_name);
get_rows(&query).await
}

pub async fn null_indexes(
min_relation_size_mb: Option<String>,
) -> Result<Vec<NullIndexes>, PgExtrasError> {
let min_relation_size_mb = min_relation_size_mb.unwrap_or("0".to_string());
let query = Query::read_file(NullIndexes::FILE_NAME)
.replace("%{min_relation_size_mb}", &min_relation_size_mb);
let query = NullIndexes::read_file().replace("%{min_relation_size_mb}", &min_relation_size_mb);
get_rows(&query).await
}

pub async fn locks() -> Result<Vec<Locks>, PgExtrasError> {
let query = Query::read_file(Locks::FILE_NAME);
let query = Locks::read_file();
get_rows(query).await
}

pub async fn all_locks() -> Result<Vec<AllLocks>, PgExtrasError> {
let query = Query::read_file(AllLocks::FILE_NAME);
let query = AllLocks::read_file();
get_rows(query).await
}

pub async fn long_running_queries() -> Result<Vec<LongRunningQueries>, PgExtrasError> {
let query = Query::read_file(LongRunningQueries::FILE_NAME);
let query = LongRunningQueries::read_file();
get_rows(query).await
}

pub async fn mandelbrot() -> Result<Vec<Mandelbrot>, PgExtrasError> {
let query = Query::read_file(Mandelbrot::FILE_NAME);
let query = Mandelbrot::read_file();
get_rows(query).await
}

pub async fn outliers() -> Result<Vec<Outliers>, PgExtrasError> {
let query = Query::read_file(Outliers::FILE_NAME);
let query = Outliers::read_file();
get_rows(query).await
}

pub async fn records_rank(schema: Option<String>) -> Result<Vec<RecordsRank>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = Query::read_file(RecordsRank::FILE_NAME).replace("%{schema}", schema_name.as_str());
let query = RecordsRank::read_file().replace("%{schema}", schema_name.as_str());
get_rows(&query).await
}

pub async fn seq_scans(schema: Option<String>) -> Result<Vec<SeqScans>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = Query::read_file(SeqScans::FILE_NAME).replace("%{schema}", schema_name.as_str());
let query = SeqScans::read_file().replace("%{schema}", schema_name.as_str());
get_rows(&query).await
}

pub async fn table_index_scans(
schema: Option<String>,
) -> Result<Vec<TableIndexScans>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query =
Query::read_file(TableIndexScans::FILE_NAME).replace("%{schema}", schema_name.as_str());
let query = TableIndexScans::read_file().replace("%{schema}", schema_name.as_str());
get_rows(&query).await
}

pub async fn table_indexes_size(
schema: Option<String>,
) -> Result<Vec<TableIndexesSize>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query =
Query::read_file(TableIndexesSize::FILE_NAME).replace("%{schema}", schema_name.as_str());
let query = TableIndexesSize::read_file().replace("%{schema}", schema_name.as_str());
get_rows(&query).await
}

pub async fn table_size() -> Result<Vec<TableSize>, PgExtrasError> {
let query = Query::read_file(TableSize::FILE_NAME);
let query = TableSize::read_file();
get_rows(query).await
}

pub async fn total_index_size() -> Result<Vec<TotalIndexSize>, PgExtrasError> {
let query = Query::read_file(TotalIndexSize::FILE_NAME);
let query = TotalIndexSize::read_file();
get_rows(query).await
}

pub async fn total_table_size() -> Result<Vec<TotalTableSize>, PgExtrasError> {
let query = Query::read_file(TotalTableSize::FILE_NAME);
let query = TotalTableSize::read_file();
get_rows(query).await
}

pub async fn unused_indexes(schema: Option<String>) -> Result<Vec<UnusedIndexes>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query =
Query::read_file(UnusedIndexes::FILE_NAME).replace("%{schema}", schema_name.as_str());
let query = UnusedIndexes::read_file().replace("%{schema}", schema_name.as_str());
get_rows(&query).await
}

pub async fn duplicate_indexes() -> Result<Vec<DuplicateIndexes>, PgExtrasError> {
let query = Query::read_file(DuplicateIndexes::FILE_NAME);
let query = DuplicateIndexes::read_file();
get_rows(query).await
}

pub async fn vacuum_stats() -> Result<Vec<VacuumStats>, PgExtrasError> {
let query = Query::read_file(VacuumStats::FILE_NAME);
let query = VacuumStats::read_file();
get_rows(query).await
}

pub async fn buffercache_stats() -> Result<Vec<BuffercacheStats>, PgExtrasError> {
let query = Query::read_file(BuffercacheStats::FILE_NAME);
let query = BuffercacheStats::read_file();
get_rows(query).await
}

pub async fn buffercache_usage() -> Result<Vec<BuffercacheUsage>, PgExtrasError> {
let query = Query::read_file(BuffercacheUsage::FILE_NAME);
let query = BuffercacheUsage::read_file();
get_rows(query).await
}

pub async fn ssl_used() -> Result<Vec<SslUsed>, PgExtrasError> {
let query = Query::read_file(SslUsed::FILE_NAME);
let query = SslUsed::read_file();
get_rows(query).await
}

pub async fn connections() -> Result<Vec<Connections>, PgExtrasError> {
let query = Query::read_file(Connections::FILE_NAME);
let query = Connections::read_file();
get_rows(query).await
}

pub async fn cache_hit(schema: Option<String>) -> Result<Vec<CacheHit>, PgExtrasError> {
let schema_name = schema.unwrap_or(get_default_schema());
let query = Query::read_file(CacheHit::FILE_NAME).replace("%{schema}", schema_name.as_str());
let query = CacheHit::read_file().replace("%{schema}", schema_name.as_str());
get_rows(&query).await
}

pub async fn db_settings() -> Result<Vec<DbSettings>, PgExtrasError> {
let query = Query::read_file(DbSettings::FILE_NAME);
let query = DbSettings::read_file();
get_rows(query).await
}

Expand Down
8 changes: 5 additions & 3 deletions src/structs/all_locks.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::structs::shared::{get_default_interval, Query, Tabular};
use crate::structs::shared::{get_default_interval, Tabular};
use sqlx::postgres::{types::PgInterval, PgRow};
use sqlx::Row;

Expand All @@ -15,8 +15,6 @@ pub struct AllLocks {
}

impl Tabular for AllLocks {
const FILE_NAME: Query = Query::AllLocks;

fn new(row: &PgRow) -> Self {
Self {
pid: row.try_get("pid").unwrap_or_default(),
Expand Down Expand Up @@ -55,4 +53,8 @@ impl Tabular for AllLocks {
"application"
]
}

fn read_file() -> &'static str {
include_str!("../queries/all_locks.sql")
}
}
8 changes: 5 additions & 3 deletions src/structs/bloat.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::structs::shared::{Query, Tabular};
use crate::structs::shared::Tabular;
use sqlx::postgres::PgRow;
use sqlx::types::BigDecimal;
use sqlx::Row;
Expand All @@ -13,8 +13,6 @@ pub struct Bloat {
}

impl Tabular for Bloat {
const FILE_NAME: Query = Query::Bloat;

fn new(row: &PgRow) -> Self {
Self {
typefield: row.try_get("type").unwrap_or_default(),
Expand All @@ -38,4 +36,8 @@ impl Tabular for Bloat {
fn headers() -> prettytable::Row {
row!["type", "schemaname", "object_name", "bloat", "waste"]
}

fn read_file() -> &'static str {
include_str!("../queries/bloat.sql")
}
}
8 changes: 5 additions & 3 deletions src/structs/blocking.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::structs::shared::{get_default_interval, Query, Tabular};
use crate::structs::shared::{get_default_interval, Tabular};
use sqlx::postgres::{types::PgInterval, PgRow};
use sqlx::Row;

Expand All @@ -15,8 +15,6 @@ pub struct Blocking {
}

impl Tabular for Blocking {
const FILE_NAME: Query = Query::Blocking;

fn new(row: &PgRow) -> Self {
Self {
blocked_pid: row.try_get("blocked_pid").unwrap_or_default(),
Expand Down Expand Up @@ -59,4 +57,8 @@ impl Tabular for Blocking {
"blocking_sql_app"
]
}

fn read_file() -> &'static str {
include_str!("../queries/blocking.sql")
}
}
8 changes: 5 additions & 3 deletions src/structs/buffercache_stats.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::structs::shared::{Query, Tabular};
use crate::structs::shared::Tabular;
use sqlx::postgres::PgRow;
use sqlx::Row;

Expand All @@ -11,8 +11,6 @@ pub struct BuffercacheStats {
}

impl Tabular for BuffercacheStats {
const FILE_NAME: Query = Query::BuffercacheStats;

fn new(row: &PgRow) -> Self {
Self {
relname: row.try_get("relname").unwrap_or_default(),
Expand All @@ -39,4 +37,8 @@ impl Tabular for BuffercacheStats {
"percent_of_relation"
]
}

fn read_file() -> &'static str {
include_str!("../queries/buffercache_stats.sql")
}
}
8 changes: 5 additions & 3 deletions src/structs/buffercache_usage.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::structs::shared::{Query, Tabular};
use crate::structs::shared::Tabular;
use sqlx::postgres::PgRow;
use sqlx::Row;

Expand All @@ -9,8 +9,6 @@ pub struct BuffercacheUsage {
}

impl Tabular for BuffercacheUsage {
const FILE_NAME: Query = Query::BuffercacheUsage;

fn new(row: &PgRow) -> Self {
Self {
relname: row.try_get("relname").unwrap_or_default(),
Expand All @@ -25,4 +23,8 @@ impl Tabular for BuffercacheUsage {
fn headers() -> prettytable::Row {
row!["relname", "buffers"]
}

fn read_file() -> &'static str {
include_str!("../queries/buffercache_usage.sql")
}
}
8 changes: 5 additions & 3 deletions src/structs/cache_hit.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
use crate::structs::shared::{Query, Tabular};
use crate::structs::shared::Tabular;
use sqlx::postgres::PgRow;
use sqlx::types::BigDecimal;
use sqlx::Row;
Expand All @@ -10,8 +10,6 @@ pub struct CacheHit {
}

impl Tabular for CacheHit {
const FILE_NAME: Query = Query::CacheHit;

fn new(row: &PgRow) -> Self {
Self {
name: row.try_get("name").unwrap_or_default(),
Expand All @@ -26,4 +24,8 @@ impl Tabular for CacheHit {
fn headers() -> prettytable::Row {
row!["name", "ratio"]
}

fn read_file() -> &'static str {
include_str!("../queries/cache_hit.sql")
}
}
Loading

0 comments on commit df5c2a7

Please sign in to comment.