Compare commits

..

No commits in common. "24d1214855b9467a51002fbf20e87e88488da216" and "ebb084aa9dec7c7ad87baa8104e9bb43cf69e3fb" have entirely different histories.

3 changed files with 16 additions and 60 deletions

View File

@ -8,21 +8,14 @@ metrics:
0: > 0: >
SELECT datname AS dbname SELECT datname AS dbname
FROM pg_database FROM pg_database
# Note: If the user lacks sufficient privileges, these fields will be NULL.
# The WHERE clause is intended to prevent Zabbix from discovering a
# connection it cannot monitor. Ideally this would generate an error
# instead.
discover_rep: discover_rep:
type: set type: set
query: query:
0: > 0: >
SELECT host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') AS repid, SELECT client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') AS repid,
client_addr, client_addr,
state state
FROM pg_stat_replication FROM pg_stat_replication
WHERE state IS NOT NULL
discover_slots: discover_slots:
type: set type: set
query: query:
@ -43,7 +36,6 @@ metrics:
active active
FROM pg_replication_slots FROM pg_replication_slots
## ##
# cluster-wide metrics # cluster-wide metrics
## ##
@ -93,6 +85,7 @@ metrics:
FROM pg_stat_bgwriter bg FROM pg_stat_bgwriter bg
CROSS JOIN pg_stat_checkpointer cp CROSS JOIN pg_stat_checkpointer cp
io_per_backend: io_per_backend:
type: set type: set
query: query:
@ -115,7 +108,6 @@ metrics:
FROM pg_stat_io FROM pg_stat_io
GROUP BY backend_type GROUP BY backend_type
## ##
# Per-database metrics # Per-database metrics
## ##
@ -218,20 +210,14 @@ metrics:
# FROM pg_class c # FROM pg_class c
# CROSS JOIN LATERAL pg_sequence_parameters(c.oid) AS s # CROSS JOIN LATERAL pg_sequence_parameters(c.oid) AS s
# WHERE c.relkind = 'S' # WHERE c.relkind = 'S'
100000: SELECT COALESCE(MAX(last_value::float / max_value), 0) AS max_usage FROM pg_sequences; 100000: >
SELECT COALESCE(MAX(pg_sequence_last_value(c.oid)::float / sp.maximum_value), 0) AS max_usage
FROM pg_class c
CROSS JOIN LATERAL pg_sequence_parameters(c.oid) AS sp
WHERE c.relkind = 'S'
test_args: test_args:
dbname: postgres dbname: postgres
sequence_visibility:
type: row
query:
100000: >
SELECT COUNT(*) FILTER (WHERE has_sequence_privilege(c.oid, 'SELECT,USAGE'::text)) AS readable_sequences,
COUNT(*) AS total_sequences
FROM pg_class AS c
WHERE relkind = 'S';
## ##
# Per-replication metrics # Per-replication metrics
## ##
@ -251,7 +237,7 @@ metrics:
NULL AS replay_lag, NULL AS replay_lag,
sync_state sync_state
FROM pg_stat_replication FROM pg_stat_replication
WHERE host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s WHERE client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s
100000: > 100000: >
SELECT pid, usename, SELECT pid, usename,
EXTRACT(EPOCH FROM backend_start)::integer AS backend_start, EXTRACT(EPOCH FROM backend_start)::integer AS backend_start,
@ -264,8 +250,7 @@ metrics:
COALESCE(EXTRACT(EPOCH FROM flush_lag), 0)::integer AS flush_lag, COALESCE(EXTRACT(EPOCH FROM flush_lag), 0)::integer AS flush_lag,
COALESCE(EXTRACT(EPOCH FROM replay_lag), 0)::integer AS replay_lag, COALESCE(EXTRACT(EPOCH FROM replay_lag), 0)::integer AS replay_lag,
sync_state sync_state
FROM pg_stat_replication FROM pg_stat_replication WHERE client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s
WHERE host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s
test_args: test_args:
repid: 127.0.0.1_test_rep repid: 127.0.0.1_test_rep
@ -279,25 +264,24 @@ metrics:
90400: > 90400: >
SELECT NULL as active_pid, SELECT NULL as active_pid,
xmin, xmin,
pg_xlog_location_diff(pg_current_xlog_location(), restart_lsn)::bigint AS restart_bytes, pg_xlog_location_diff(pg_current_xlog_location(), restart_lsn) AS restart_bytes,
NULL AS confirmed_flush_bytes NULL AS confirmed_flush_bytes
FROM pg_replication_slots WHERE slot_name = %(slot)s FROM pg_replication_slots WHERE slot_name = %(slot)s
90600: > 90600: >
SELECT active_pid, SELECT active_pid,
xmin, xmin,
pg_xlog_location_diff(pg_current_xlog_location(), restart_lsn)::bigint AS restart_bytes, pg_xlog_location_diff(pg_current_xlog_location(), restart_lsn) AS restart_bytes,
pg_xlog_location_diff(pg_current_xlog_location(), confirmed_flush_lsn)::bigint AS confirmed_flush_bytes pg_xlog_location_diff(pg_current_xlog_location(), confirmed_flush_lsn) AS confirmed_flush_bytes
FROM pg_replication_slots WHERE slot_name = %(slot)s FROM pg_replication_slots WHERE slot_name = %(slot)s
100000: > 100000: >
SELECT active_pid, SELECT active_pid,
xmin, xmin,
pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn)::bigint AS restart_bytes, pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) AS restart_bytes,
pg_wal_lsn_diff(pg_current_wal_lsn(), confirmed_flush_lsn)::bigint AS confirmed_flush_bytes pg_wal_lsn_diff(pg_current_wal_lsn(), confirmed_flush_lsn) AS confirmed_flush_bytes
FROM pg_replication_slots WHERE slot_name = %(slot)s FROM pg_replication_slots WHERE slot_name = %(slot)s
test_args: test_args:
slot: test_slot slot: test_slot
## ##
# Debugging # Debugging
## ##

View File

@ -27,8 +27,6 @@ from urllib.parse import urlparse, parse_qs
import requests import requests
import re import re
from decimal import Decimal
VERSION = "1.0.3" VERSION = "1.0.3"
# Configuration # Configuration
@ -393,16 +391,6 @@ def get_query(metric, version):
raise MetricVersionError("Missing metric query for PostgreSQL {}".format(version)) raise MetricVersionError("Missing metric query for PostgreSQL {}".format(version))
def json_encode_special(obj):
"""
Encoder function to handle types the standard JSON package doesn't know what
to do with
"""
if isinstance(obj, Decimal):
return float(obj)
raise TypeError(f'Cannot serialize object of {type(obj)}')
def run_query_no_retry(pool, return_type, query, args): def run_query_no_retry(pool, return_type, query, args):
""" """
Run the query with no explicit retry code Run the query with no explicit retry code
@ -420,11 +408,11 @@ def run_query_no_retry(pool, return_type, query, args):
elif return_type == "row": elif return_type == "row":
if len(res) == 0: if len(res) == 0:
return "[]" return "[]"
return json.dumps(res[0], default=json_encode_special) return json.dumps(res[0])
elif return_type == "column": elif return_type == "column":
if len(res) == 0: if len(res) == 0:
return "[]" return "[]"
return json.dumps([list(r.values())[0] for r in res], default=json_encode_special) return json.dumps([list(r.values())[0] for r in res])
elif return_type == "set": elif return_type == "set":
return json.dumps(res) return json.dumps(res)
except: except:

View File

@ -5,8 +5,6 @@ import tempfile
import logging import logging
from decimal import Decimal
import pgmon import pgmon
# Silence most logging output # Silence most logging output
@ -791,17 +789,3 @@ metrics:
# Make sure we can pull the RSS file (we assume the 9.6 series won't be getting # Make sure we can pull the RSS file (we assume the 9.6 series won't be getting
# any more updates) # any more updates)
self.assertEqual(pgmon.get_latest_version(), 90624) self.assertEqual(pgmon.get_latest_version(), 90624)
def test_json_encode_special(self):
# Confirm that we're getting the right type
self.assertFalse(isinstance(Decimal('0.5'), float))
self.assertTrue(isinstance(pgmon.json_encode_special(Decimal('0.5')), float))
# Make sure we get sane values
self.assertEqual(pgmon.json_encode_special(Decimal('0.5')), 0.5)
self.assertEqual(pgmon.json_encode_special(Decimal('12')), 12.0)
# Make sure we can still fail for other types
self.assertRaises(
TypeError, pgmon.json_encode_special, object
)