Add ability to read sql from files, align dbname

This commit is contained in:
James Campbell 2024-10-29 02:51:57 -04:00
parent 449c202098
commit 237ee9d2a2
Signed by: james
GPG Key ID: 2287C33A40DC906A
4 changed files with 66 additions and 34 deletions

View File

@ -3,7 +3,7 @@ metrics:
discover_dbs:
type: set
query:
0: SELECT datname FROM pg_database
0: SELECT datname AS dbname FROM pg_database
discover_rep:
type: set
query:
@ -23,8 +23,14 @@ metrics:
db_stats:
type: row
query:
0: SELECT numbackends, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, conflicts, temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time, extract('epoch' from stats_reset)::float FROM pg_stat_database WHERE datname = %(datname)s
140000: SELECT numbackends, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, conflicts, temp_files, temp_bytes, deadlocks, COALESCE(checksum_failures, 0) AS checksum_failures, blk_read_time, blk_write_time, session_time, active_time, idle_in_transaction_time, sessions, sessions_abandoned, sessions_fatal, sessions_killed, extract('epoch' from stats_reset)::float FROM pg_stat_database WHERE datname = %(datname)s
0: SELECT numbackends, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, conflicts, temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time, extract('epoch' from stats_reset)::float FROM pg_stat_database WHERE datname = %(dbname)s
140000: SELECT numbackends, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, conflicts, temp_files, temp_bytes, deadlocks, COALESCE(checksum_failures, 0) AS checksum_failures, blk_read_time, blk_write_time, session_time, active_time, idle_in_transaction_time, sessions, sessions_abandoned, sessions_fatal, sessions_killed, extract('epoch' from stats_reset)::float FROM pg_stat_database WHERE datname = %(dbname)s
# Debugging
ntables:
type: value
query:
0: file:sql/ntables.sql
# Per-replication metrics
rep_stats:

View File

@ -69,6 +69,27 @@ default_config = {
'metrics': {}
}
def update_deep(d1, d2):
"""
Recursively update a dict, adding keys to dictionaries and appending to
lists. Note that this both modifies and returns the first dict.
Params:
d1: the dictionary to update
d2: the dictionary to get new values from
Returns:
The new d1
"""
for k, v in d2.items():
if isinstance(v, dict):
d1[k] = update_deep(d1.get(k, {}), v)
elif isinstance(v, list):
d1[k] = d1.get(k, []) + v
else:
d1[k] = v
return d1
def read_config(path, included = False):
"""
Read a config file.
@ -84,7 +105,7 @@ def read_config(path, included = False):
# Read any included config files
for inc in cfg.get('include', []):
cfg.update(read_config(inc, included=True))
update_deep(cfg, read_config(inc, included=True))
# Return the config we read if this is an include, otherwise set the final
# config
@ -93,7 +114,15 @@ def read_config(path, included = False):
else:
new_config = {}
new_config.update(default_config)
new_config.update(cfg)
update_deep(new_config, cfg)
# Read any external queries
for metric in new_config.get('metrics', {}).values():
for vers, query in metric['query'].items():
if query.startswith('file:'):
path = query[5:]
with open(path, 'r') as f:
metric['query'][vers] = f.read()
# Minor sanity checks
if len(new_config['metrics']) == 0:

View File

@ -84,7 +84,7 @@ zabbix_export:
parameters:
- '0.001'
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -101,7 +101,7 @@ zabbix_export:
parameters:
- $.numbackends
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -118,7 +118,7 @@ zabbix_export:
parameters:
- $.blks_hit
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -135,7 +135,7 @@ zabbix_export:
parameters:
- $.blks_read
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -157,7 +157,7 @@ zabbix_export:
parameters:
- '0.001'
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -179,7 +179,7 @@ zabbix_export:
parameters:
- '0.001'
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -201,7 +201,7 @@ zabbix_export:
error_handler: CUSTOM_VALUE
error_handler_params: '0'
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -218,7 +218,7 @@ zabbix_export:
parameters:
- $.conflicts
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -235,7 +235,7 @@ zabbix_export:
parameters:
- $.deadlocks
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -257,7 +257,7 @@ zabbix_export:
parameters:
- '0.001'
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -274,7 +274,7 @@ zabbix_export:
parameters:
- $.sessions
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -291,7 +291,7 @@ zabbix_export:
parameters:
- $.sessions_abandoned
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -308,7 +308,7 @@ zabbix_export:
parameters:
- $.sessions_fatal
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -325,7 +325,7 @@ zabbix_export:
parameters:
- $.sessions_killed
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -343,7 +343,7 @@ zabbix_export:
parameters:
- $.temp_bytes
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -360,7 +360,7 @@ zabbix_export:
parameters:
- $.temp_files
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -377,7 +377,7 @@ zabbix_export:
parameters:
- $.tup_deleted
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -394,7 +394,7 @@ zabbix_export:
parameters:
- $.tup_fetched
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -411,7 +411,7 @@ zabbix_export:
parameters:
- $.tup_inserted
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -428,7 +428,7 @@ zabbix_export:
parameters:
- $.tup_returned
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -445,7 +445,7 @@ zabbix_export:
parameters:
- $.tup_updated
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -462,7 +462,7 @@ zabbix_export:
parameters:
- $.xact_commit
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -479,7 +479,7 @@ zabbix_export:
parameters:
- $.xact_rollback
master_item:
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
tags:
- tag: Application
value: PostgreSQL
@ -557,7 +557,7 @@ zabbix_export:
value: '{#DBNAME}'
- uuid: 492b3cac15f348c2b85f97b69c114d1b
name: 'Database Stats for {#DBNAME}'
key: 'web.page.get[localhost,/db_stats?datname={#DBNAME},{$AGENT_PORT}]'
key: 'web.page.get[localhost,/db_stats?dbname={#DBNAME},{$AGENT_PORT}]'
history: '0'
value_type: TEXT
preprocessing:
@ -599,7 +599,7 @@ zabbix_export:
key: 'pgmon_db[blk_write_time,{#DBNAME}]'
lld_macro_paths:
- lld_macro: '{#DBNAME}'
path: $.datname
path: $.dbname
preprocessing:
- type: REGEX
parameters:
@ -796,12 +796,8 @@ zabbix_export:
- tag: Database
value: '{#DBNAME}'
lld_macro_paths:
- lld_macro: '{#AGENT}'
path: $.agent
- lld_macro: '{#CLIENT_ADDR}'
path: $.client_addr
- lld_macro: '{#CLUSTER}'
path: $.cluster
- lld_macro: '{#REPID}'
path: $.repid
- lld_macro: '{#STATE}'

1
sql/ntables.sql Normal file
View File

@ -0,0 +1 @@
SELECT count(*) AS ntables FROM pg_stat_user_tables;