Compare commits

...

15 Commits

Author SHA1 Message Date
e930178e9c
Merge branch 'rel/1.0.4' 2025-07-06 03:35:39 -04:00
5afc940df8
Bump release version to 1.0.4 2025-07-06 03:35:00 -04:00
3961aa3448
Add Gentoo ebuild for 1.0.4-rc1 2025-07-06 03:32:27 -04:00
8fd57032e7
Update Makefile to support rc versions, bump version
* Support packaging RC versions for deb and rpm packages

* Bump version to 1.0.4-rc1
2025-07-06 03:28:36 -04:00
5ede7dea07
Fix package-all make target
* Fix the package-all target in the Makefile

* Remove storage of raw sequence stats
2025-07-05 12:28:36 -04:00
5ea007c3f6
Merge branch 'dev/sequences' into develop 2025-07-05 01:18:01 -04:00
7cb0f7ad40
Fix typo in activity query 2025-07-05 01:17:35 -04:00
83fa12ec54
Correct types for slot LSN lag metrics 2025-07-04 02:46:25 -04:00
98b74d9aed
Add sequence metrics to Zabbix template 2025-07-04 02:45:43 -04:00
45953848e2
Remove some type casts from working around Decimal types 2025-07-03 10:16:20 -04:00
6116f4f885
Fix missing json default 2025-07-03 02:08:45 -04:00
24d1214855
Teach json how to serialize decimals 2025-07-03 01:47:06 -04:00
3c39d8aa97
Remove CIDR prefix from replication id 2025-07-03 01:13:58 -04:00
ebb084aa9d
Add initial query for sequence usage 2025-07-01 02:29:56 -04:00
86d5e8917b
Merge branch 'dev/io_stats' into develop 2025-07-01 01:30:21 -04:00
8 changed files with 303 additions and 60 deletions

74
GENTOO/pgmon-1.0.4.ebuild Normal file
View File

@ -0,0 +1,74 @@
# Copyright 2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
PYTHON_COMPAT=( python3_{6..13} )
inherit python-r1 systemd
DESCRIPTION="PostgreSQL monitoring bridge"
HOMEPAGE="None"
LICENSE="BSD"
SLOT="0"
KEYWORDS="amd64"
SRC_URI="https://code2.shh-dot-com.org/james/${PN}/releases/download/v${PV}/${P}.tar.bz2"
IUSE="-systemd"
DEPEND="
${PYTHON_DEPS}
dev-python/psycopg:2
dev-python/pyyaml
dev-python/requests
app-admin/logrotate
"
RDEPEND="${DEPEND}"
BDEPEND=""
#RESTRICT="fetch"
#S="${WORKDIR}/${PN}"
#pkg_nofetch() {
# einfo "Please download"
# einfo " - ${P}.tar.bz2"
# einfo "from ${HOMEPAGE} and place it in your DISTDIR directory."
# einfo "The file should be owned by portage:portage."
#}
src_compile() {
true
}
src_install() {
# Install init script
if ! use systemd ; then
newinitd "openrc/pgmon.initd" pgmon
newconfd "openrc/pgmon.confd" pgmon
fi
# Install systemd unit
if use systemd ; then
systemd_dounit "systemd/pgmon.service"
fi
# Install script
exeinto /usr/bin
newexe "src/pgmon.py" pgmon
# Install default config
diropts -o root -g root -m 0755
insinto /etc/pgmon
doins "sample-config/pgmon.yml"
doins "sample-config/pgmon-metrics.yml"
# Install logrotate config
insinto /etc/logrotate.d
newins "logrotate/pgmon.logrotate" pgmon
# Install man page
doman manpages/pgmon.1
}

View File

@ -3,7 +3,22 @@ PACKAGE_NAME := pgmon
SCRIPT := src/$(PACKAGE_NAME).py
VERSION := $(shell grep -m 1 '^VERSION = ' "$(SCRIPT)" | sed -ne 's/.*"\(.*\)".*/\1/p')
# Figure out the version components
# Note: The release is for RPM packages, where prerelease releases are written as 0.<release>
FULL_VERSION := $(shell grep -m 1 '^VERSION = ' "$(SCRIPT)" | sed -ne 's/.*"\(.*\)".*/\1/p')
VERSION := $(shell echo $(FULL_VERSION) | sed -n 's/\(.*\)\(-rc.*\|$$\)/\1/p')
RELEASE := $(shell echo $(FULL_VERSION) | sed -n 's/.*-rc\([0-9]\+\)$$/\1/p')
ifeq ($(RELEASE),)
RPM_RELEASE := 1
RPM_VERSION := $(VERSION)-$(RPM_RELEASE)
DEB_VERSION := $(VERSION)
else
RPM_RELEASE := 0.$(RELEASE)
RPM_VERSION := $(VERSION)-$(RPM_RELEASE)
DEB_VERSION := $(VERSION)~rc$(RELEASE)
endif
# Where packages are built
BUILD_DIR := build
@ -25,16 +40,20 @@ SUPPORTED := ubuntu-20.04 \
.PHONY: all clean tgz test query-tests install-common install-openrc install-systemd
all: package-all
version:
@echo "full version=$(FULL_VERSION) version=$(VERSION) rel=$(RELEASE) rpm=$(RPM_VERSION) deb=$(DEB_VERSION)"
# Build all packages
.PHONY: package-all
all: $(foreach distro_release, $(SUPPORTED), package-$(distro_release))
package-all: $(foreach distro_release, $(SUPPORTED), package-$(distro_release))
# Gentoo package (tar.gz) creation
.PHONY: package-gentoo
package-gentoo:
mkdir -p $(BUILD_DIR)/gentoo
tar --transform "s,^,$(PACKAGE_NAME)-$(VERSION)/," -acjf $(BUILD_DIR)/gentoo/$(PACKAGE_NAME)-$(VERSION).tar.bz2 --exclude .gitignore $(shell git ls-tree --full-tree --name-only -r HEAD)
tar --transform "s,^,$(PACKAGE_NAME)-$(FULL_VERSION)/," -acjf $(BUILD_DIR)/gentoo/$(PACKAGE_NAME)-$(FULL_VERSION).tar.bz2 --exclude .gitignore $(shell git ls-tree --full-tree --name-only -r HEAD)
# Create a deb package
@ -55,7 +74,7 @@ tgz:
rm -rf $(BUILD_DIR)/tgz/root
mkdir -p $(BUILD_DIR)/tgz/root
$(MAKE) install-openrc DESTDIR=$(BUILD_DIR)/tgz/root
tar -cz -f $(BUILD_DIR)/tgz/$(PACKAGE_NAME)-$(VERSION).tgz -C $(BUILD_DIR)/tgz/root .
tar -cz -f $(BUILD_DIR)/tgz/$(PACKAGE_NAME)-$(FULL_VERSION).tgz -C $(BUILD_DIR)/tgz/root .
# Clean up the build directory
clean:
@ -129,28 +148,28 @@ debian-%-install-test:
docker run --rm \
-v ./$(BUILD_DIR):/output \
debian:$* \
bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(VERSION)-debian-$*.deb'
bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(DEB_VERSION)-debian-$*.deb'
# Run a RedHat install test
rockylinux-%-install-test:
docker run --rm \
-v ./$(BUILD_DIR):/output \
rockylinux:$* \
bash -c 'dnf makecache && dnf install -y /output/$(PACKAGE_NAME)-$(VERSION)-1.el$*.noarch.rpm'
bash -c 'dnf makecache && dnf install -y /output/$(PACKAGE_NAME)-$(RPM_VERSION).el$*.noarch.rpm'
# Run an Ubuntu install test
ubuntu-%-install-test:
docker run --rm \
-v ./$(BUILD_DIR):/output \
ubuntu:$* \
bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(VERSION)-ubuntu-$*.deb'
bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(DEB_VERSION)-ubuntu-$*.deb'
# Run an OracleLinux install test (this is for EL7 since CentOS7 images no longer exist)
oraclelinux-%-install-test:
docker run --rm \
-v ./$(BUILD_DIR):/output \
oraclelinux:7 \
bash -c 'yum makecache && yum install -y /output/$(PACKAGE_NAME)-$(VERSION)-1.el7.noarch.rpm'
bash -c 'yum makecache && yum install -y /output/$(PACKAGE_NAME)-$(RPM_VERSION).el7.noarch.rpm'
# Run a Gentoo install test
gentoo-install-test:
@ -192,28 +211,28 @@ package-image-%:
actually-package-debian-%:
$(MAKE) install-systemd DESTDIR=/output/debian-$*
cp -r --preserve=mode DEBIAN /output/debian-$*/
dpkg-deb -Zgzip --build /output/debian-$* "/output/$(PACKAGE_NAME)-$(VERSION)-debian-$*.deb"
dpkg-deb -Zgzip --build /output/debian-$* "/output/$(PACKAGE_NAME)-$(DEB_VERSION)-debian-$*.deb"
# RedHat package creation
actually-package-rockylinux-%:
mkdir -p /output/rockylinux-$*/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
sed -e "s/@@VERSION@@/$(VERSION)/g" RPM/$(PACKAGE_NAME).spec > /output/rockylinux-$*/SPECS/$(PACKAGE_NAME).spec
sed -e "s/@@VERSION@@/$(VERSION)/g" -e "s/@@RELEASE@@/$(RPM_RELEASE)/g" RPM/$(PACKAGE_NAME).spec > /output/rockylinux-$*/SPECS/$(PACKAGE_NAME).spec
rpmbuild --define '_topdir /output/rockylinux-$*' \
--define 'version $(VERSION)' \
--define 'version $(RPM_VERSION)' \
-bb /output/rockylinux-$*/SPECS/$(PACKAGE_NAME).spec
cp /output/rockylinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(VERSION)-1.el$*.noarch.rpm /output/
cp /output/rockylinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(RPM_VERSION).el$*.noarch.rpm /output/
# Ubuntu package creation
actually-package-ubuntu-%:
$(MAKE) install-systemd DESTDIR=/output/ubuntu-$*
cp -r --preserve=mode DEBIAN /output/ubuntu-$*/
dpkg-deb -Zgzip --build /output/ubuntu-$* "/output/$(PACKAGE_NAME)-$(VERSION)-ubuntu-$*.deb"
dpkg-deb -Zgzip --build /output/ubuntu-$* "/output/$(PACKAGE_NAME)-$(DEB_VERSION)-ubuntu-$*.deb"
# OracleLinux package creation
actually-package-oraclelinux-%:
mkdir -p /output/oraclelinux-$*/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
sed -e "s/@@VERSION@@/$(VERSION)/g" RPM/$(PACKAGE_NAME)-el7.spec > /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec
sed -e "s/@@VERSION@@/$(VERSION)/g" -e "s/@@RELEASE@@/$(RPM_RELEASE)/g" RPM/$(PACKAGE_NAME)-el7.spec > /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec
rpmbuild --define '_topdir /output/oraclelinux-$*' \
--define 'version $(VERSION)' \
--define 'version $(RPM_VERSION)' \
-bb /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec
cp /output/oraclelinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(VERSION)-1.el$*.noarch.rpm /output/
cp /output/oraclelinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(RPM_VERSION).el$*.noarch.rpm /output/

View File

@ -1,6 +1,6 @@
Name: pgmon
Version: @@VERSION@@
Release: 1%{?dist}
Release: @@RELEASE@@%{?dist}
Summary: A bridge to sit between monitoring tools and PostgreSQL
License: MIT

View File

@ -1,6 +1,6 @@
Name: pgmon
Version: @@VERSION@@
Release: 1%{?dist}
Release: @@RELEASE@@%{?dist}
Summary: A bridge to sit between monitoring tools and PostgreSQL
License: MIT

View File

@ -8,14 +8,21 @@ metrics:
0: >
SELECT datname AS dbname
FROM pg_database
# Note: If the user lacks sufficient privileges, these fields will be NULL.
# The WHERE clause is intended to prevent Zabbix from discovering a
# connection it cannot monitor. Ideally this would generate an error
# instead.
discover_rep:
type: set
query:
0: >
SELECT client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') AS repid,
SELECT host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') AS repid,
client_addr,
state
FROM pg_stat_replication
WHERE state IS NOT NULL
discover_slots:
type: set
query:
@ -36,6 +43,7 @@ metrics:
active
FROM pg_replication_slots
##
# cluster-wide metrics
##
@ -85,29 +93,29 @@ metrics:
FROM pg_stat_bgwriter bg
CROSS JOIN pg_stat_checkpointer cp
io_per_backend:
type: set
query:
160000: >
SELECT backend_type,
COALESCE(SUM(reads * op_bytes), 0)::bigint AS reads,
COALESCE(SUM(read_time), 0)::bigint AS read_time,
COALESCE(SUM(writes * op_bytes), 0)::bigint AS writes,
COALESCE(SUM(write_time), 0)::bigint AS write_time,
COALESCE(SUM(writebacks * op_bytes), 0)::bigint AS writebacks,
COALESCE(SUM(writeback_time), 0)::bigint AS writeback_time,
COALESCE(SUM(extends * op_bytes), 0)::bigint AS extends,
COALESCE(SUM(extend_time), 0)::bigint AS extend_time,
COALESCE(SUM(op_bytes), 0)::bigint AS op_bytes,
COALESCE(SUM(hits), 0)::bigint AS hits,
COALESCE(SUM(evictions), 0)::bigint AS evictions,
COALESCE(SUM(reuses), 0)::bigint AS reuses,
COALESCE(SUM(fsyncs), 0)::bigint AS fsyncs,
COALESCE(SUM(fsync_time), 0)::bigint AS fsync_time
COALESCE(SUM(reads * op_bytes), 0) AS reads,
COALESCE(SUM(read_time), 0) AS read_time,
COALESCE(SUM(writes * op_bytes), 0) AS writes,
COALESCE(SUM(write_time), 0) AS write_time,
COALESCE(SUM(writebacks * op_bytes), 0) AS writebacks,
COALESCE(SUM(writeback_time), 0) AS writeback_time,
COALESCE(SUM(extends * op_bytes), 0) AS extends,
COALESCE(SUM(extend_time), 0) AS extend_time,
COALESCE(SUM(op_bytes), 0) AS op_bytes,
COALESCE(SUM(hits), 0) AS hits,
COALESCE(SUM(evictions), 0) AS evictions,
COALESCE(SUM(reuses), 0) AS reuses,
COALESCE(SUM(fsyncs), 0) AS fsyncs,
COALESCE(SUM(fsync_time), 0) AS fsync_time
FROM pg_stat_io
GROUP BY backend_type
##
# Per-database metrics
##
@ -139,7 +147,7 @@ metrics:
NULL AS sessions_abandoned,
NULL AS sessions_fatal,
NULL AS sessions_killed,
extract('epoch' from stats_reset)::float AS stats_reset
extract('epoch' from stats_reset) AS stats_reset
FROM pg_stat_database WHERE datname = %(dbname)s
140000: >
SELECT numbackends,
@ -166,7 +174,7 @@ metrics:
sessions_abandoned,
sessions_fatal,
sessions_killed,
extract('epoch' from stats_reset)::float AS stats_reset
extract('epoch' from stats_reset) AS stats_reset
FROM pg_stat_database WHERE datname = %(dbname)s
test_args:
dbname: postgres
@ -189,13 +197,40 @@ metrics:
0: >
SELECT state,
count(*) AS backend_count,
COALESCE(EXTRACT(EPOCH FROM max(now() - state_change))::float, 0) AS max_state_time
COALESCE(EXTRACT(EPOCH FROM max(now() - state_change)), 0) AS max_state_time
FROM pg_stat_activity
WHERE datname = %(dbname)s
GROUP BY state
test_args:
dbname: postgres
sequence_usage:
type: value
query:
# 9.2 lacks lateral joins, the pg_sequence_last_value function, and the pg_sequences view
# 0: >
# SELECT COALESCE(MAX(pg_sequence_last_value(c.oid)::float / (pg_sequence_parameters(oid)).maximum_value), 0) AS max_usage
# FROM pg_class c
# WHERE c.relkind = 'S'
# 9.3 - 9.6 lacks the pg_sequence_last_value function, and pg_sequences view
# 90300: >
# SELECT COALESCE(MAX(pg_sequence_last_value(c.oid)::float / s.maximum_value), 0) AS max_usage
# FROM pg_class c
# CROSS JOIN LATERAL pg_sequence_parameters(c.oid) AS s
# WHERE c.relkind = 'S'
100000: SELECT COALESCE(MAX(last_value::float / max_value), 0) AS max_usage FROM pg_sequences;
test_args:
dbname: postgres
sequence_visibility:
type: row
query:
100000: >
SELECT COUNT(*) FILTER (WHERE has_sequence_privilege(c.oid, 'SELECT,USAGE')) AS visible_sequences,
COUNT(*) AS total_sequences
FROM pg_class AS c
WHERE relkind = 'S';
##
# Per-replication metrics
@ -205,7 +240,7 @@ metrics:
query:
90400: >
SELECT pid, usename,
EXTRACT(EPOCH FROM backend_start)::integer AS backend_start,
EXTRACT(EPOCH FROM backend_start) AS backend_start,
state,
pg_xlog_location_diff(pg_current_xlog_location(), sent_location) AS sent_lsn,
pg_xlog_location_diff(pg_current_xlog_location(), write_location) AS write_lsn,
@ -216,20 +251,21 @@ metrics:
NULL AS replay_lag,
sync_state
FROM pg_stat_replication
WHERE client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s
WHERE host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s
100000: >
SELECT pid, usename,
EXTRACT(EPOCH FROM backend_start)::integer AS backend_start,
EXTRACT(EPOCH FROM backend_start) AS backend_start,
state,
pg_wal_lsn_diff(pg_current_wal_lsn(), sent_lsn) AS sent_lsn,
pg_wal_lsn_diff(pg_current_wal_lsn(), write_lsn) AS write_lsn,
pg_wal_lsn_diff(pg_current_wal_lsn(), flush_lsn) AS flush_lsn,
pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn) AS replay_lsn,
COALESCE(EXTRACT(EPOCH FROM write_lag), 0)::integer AS write_lag,
COALESCE(EXTRACT(EPOCH FROM flush_lag), 0)::integer AS flush_lag,
COALESCE(EXTRACT(EPOCH FROM replay_lag), 0)::integer AS replay_lag,
COALESCE(EXTRACT(EPOCH FROM write_lag), 0) AS write_lag,
COALESCE(EXTRACT(EPOCH FROM flush_lag), 0) AS flush_lag,
COALESCE(EXTRACT(EPOCH FROM replay_lag), 0) AS replay_lag,
sync_state
FROM pg_stat_replication WHERE client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s
FROM pg_stat_replication
WHERE host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s
test_args:
repid: 127.0.0.1_test_rep
@ -261,6 +297,7 @@ metrics:
test_args:
slot: test_slot
##
# Debugging
##

View File

@ -27,7 +27,9 @@ from urllib.parse import urlparse, parse_qs
import requests
import re
VERSION = "1.0.3"
from decimal import Decimal
VERSION = "1.0.4"
# Configuration
config = {}
@ -391,6 +393,16 @@ def get_query(metric, version):
raise MetricVersionError("Missing metric query for PostgreSQL {}".format(version))
def json_encode_special(obj):
"""
Encoder function to handle types the standard JSON package doesn't know what
to do with
"""
if isinstance(obj, Decimal):
return float(obj)
raise TypeError(f'Cannot serialize object of {type(obj)}')
def run_query_no_retry(pool, return_type, query, args):
"""
Run the query with no explicit retry code
@ -408,13 +420,13 @@ def run_query_no_retry(pool, return_type, query, args):
elif return_type == "row":
if len(res) == 0:
return "[]"
return json.dumps(res[0])
return json.dumps(res[0], default=json_encode_special)
elif return_type == "column":
if len(res) == 0:
return "[]"
return json.dumps([list(r.values())[0] for r in res])
return json.dumps([list(r.values())[0] for r in res], default=json_encode_special)
elif return_type == "set":
return json.dumps(res)
return json.dumps(res, default=json_encode_special)
except:
dbname = pool.name
if dbname in unhappy_cooldown:

View File

@ -5,6 +5,9 @@ import tempfile
import logging
from decimal import Decimal
import json
import pgmon
# Silence most logging output
@ -789,3 +792,20 @@ metrics:
# Make sure we can pull the RSS file (we assume the 9.6 series won't be getting
# any more updates)
self.assertEqual(pgmon.get_latest_version(), 90624)
def test_json_encode_special(self):
# Confirm that we're getting the right type
self.assertFalse(isinstance(Decimal('0.5'), float))
self.assertTrue(isinstance(pgmon.json_encode_special(Decimal('0.5')), float))
# Make sure we get sane values
self.assertEqual(pgmon.json_encode_special(Decimal('0.5')), 0.5)
self.assertEqual(pgmon.json_encode_special(Decimal('12')), 12.0)
# Make sure we can still fail for other types
self.assertRaises(
TypeError, pgmon.json_encode_special, object
)
# Make sure we can actually serialize a Decimal
self.assertEqual(json.dumps(Decimal('2.5'), default=pgmon.json_encode_special), '2.5')

View File

@ -785,6 +785,77 @@ zabbix_export:
value: PostgreSQL
- tag: Database
value: '{#DBNAME}'
- uuid: 5960120dd01c4926b0fc1fbe9c011507
name: 'Database max sequence usage in {#DBNAME}'
type: HTTP_AGENT
key: 'pgmon_db_max_sequence[{#DBNAME}]'
delay: 5m
value_type: FLOAT
units: '%'
description: 'The percent of the currently configured value range for the most utilized sequence.'
url: 'http://localhost:{$AGENT_PORT}/sequence_usage'
query_fields:
- name: dbname
value: '{#DBNAME}'
tags:
- tag: Application
value: PostgreSQL
- tag: Database
value: '{#DBNAME}'
- uuid: 48b9cc80ac4d4aee9e9f3a5d6f7d4a95
name: 'Total number of sequences on {#DBNAME}'
type: DEPENDENT
key: 'pgmon_db_sequences[total,{#DBNAME}]'
delay: '0'
description: 'Total number of sequences in the database.'
preprocessing:
- type: JSONPATH
parameters:
- $.total_sequences
master_item:
key: 'pgmon_db_sequence_visibility[{#DBNAME}]'
tags:
- tag: Application
value: PostgreSQL
- tag: Database
value: '{#DBNAME}'
- uuid: 6521a9bab2ac47bf85429832d289bbac
name: 'Visible sequences on {#DBNAME}'
type: DEPENDENT
key: 'pgmon_db_sequences[visible,{#DBNAME}]'
delay: '0'
description: 'Number of sequences in the database for which Zabbix can see stats.'
preprocessing:
- type: JSONPATH
parameters:
- $.visible_sequences
master_item:
key: 'pgmon_db_sequence_visibility[{#DBNAME}]'
tags:
- tag: Application
value: PostgreSQL
- tag: Database
value: '{#DBNAME}'
- uuid: 00f2da3eb99940839410a6ecd5df153f
name: 'Database sequence visibility in {#DBNAME}'
type: HTTP_AGENT
key: 'pgmon_db_sequence_visibility[{#DBNAME}]'
delay: 30m
history: '0'
value_type: TEXT
trends: '0'
description: 'Statistics about the number of sequences that exist and the number Zabbix can actually see stats for.'
url: 'http://localhost:{$AGENT_PORT}/sequence_visibility'
query_fields:
- name: dbname
value: '{#DBNAME}'
tags:
- tag: Application
value: PostgreSQL
- tag: Database
value: '{#DBNAME}'
- tag: Type
value: Raw
- uuid: 492b3cac15f348c2b85f97b69c114d1b
name: 'Database Stats for {#DBNAME}'
type: HTTP_AGENT
@ -803,6 +874,17 @@ zabbix_export:
value: '{#DBNAME}'
- tag: Type
value: Raw
trigger_prototypes:
- uuid: d29d0fd9d9d34b5ebd649592b0829ce5
expression: 'last(/PostgreSQL by pgmon/pgmon_db_sequences[total,{#DBNAME}]) <> last(/PostgreSQL by pgmon/pgmon_db_sequences[visible,{#DBNAME}])'
name: 'Sequences not visible to Zabbix on {#DBNAME}'
priority: WARNING
description: 'There are sequences for which Zabbix cannot see usage statistics'
tags:
- tag: Application
value: PostgreSQL
- tag: Component
value: Sequence
graph_prototypes:
- uuid: 1f7de43b77714f819e61c31273712b70
name: 'DML Totals for {#DBNAME}'
@ -1500,6 +1582,7 @@ zabbix_export:
delay: '0'
history: 90d
value_type: FLOAT
units: s
description: 'Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it (but not yet applied it). This can be used to gauge the delay that synchronous_commit level on incurred while committing if this server was configured as a synchronous standby.'
preprocessing:
- type: JSONPATH
@ -1515,13 +1598,12 @@ zabbix_export:
- tag: Database
value: '{#DBNAME}'
- uuid: 624f8f085a3642c9a10a03361c17763d
name: 'Last flush LSN for {#REPID}'
name: 'Last flush LSN lag for {#REPID}'
type: DEPENDENT
key: 'pgmon_rep[flush_lsn,repid={#REPID}]'
delay: '0'
history: 90d
value_type: TEXT
trends: '0'
units: B
description: 'Last write-ahead log location flushed to disk by this standby server'
preprocessing:
- type: JSONPATH
@ -1543,6 +1625,7 @@ zabbix_export:
delay: '0'
history: 90d
value_type: FLOAT
units: s
description: 'Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it. This can be used to gauge the delay that synchronous_commit level remote_apply incurred while committing if this server was configured as a synchronous standby.'
preprocessing:
- type: JSONPATH
@ -1558,13 +1641,12 @@ zabbix_export:
- tag: Database
value: '{#DBNAME}'
- uuid: fe1bed51845d4694bae8f53deed4846d
name: 'Last replay LSN for {#REPID}'
name: 'Last replay LSN lag for {#REPID}'
type: DEPENDENT
key: 'pgmon_rep[replay_lsn,repid={#REPID}]'
delay: '0'
history: 90d
value_type: TEXT
trends: '0'
units: B
description: 'Last write-ahead log location replayed into the database on this standby server'
preprocessing:
- type: JSONPATH
@ -1580,13 +1662,12 @@ zabbix_export:
- tag: Database
value: '{#DBNAME}'
- uuid: 68c179d0e33f45f9bf82d2d4125763f0
name: 'Last sent LSN for {#REPID}'
name: 'Last sent LSN lag for {#REPID}'
type: DEPENDENT
key: 'pgmon_rep[sent_lsn,repid={#REPID}]'
delay: '0'
history: 90d
value_type: TEXT
trends: '0'
units: B
description: 'Last write-ahead log location sent on this connection'
preprocessing:
- type: JSONPATH
@ -1636,6 +1717,7 @@ zabbix_export:
delay: '0'
history: 90d
value_type: FLOAT
units: s
description: 'Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.'
preprocessing:
- type: JSONPATH
@ -1651,13 +1733,12 @@ zabbix_export:
- tag: Database
value: '{#DBNAME}'
- uuid: 57fb03cf63af4b0a91d8e36d6ff64d30
name: 'Last write LSN for {#REPID}'
name: 'Last write LSN lag for {#REPID}'
type: DEPENDENT
key: 'pgmon_rep[write_lsn,repid={#REPID}]'
delay: '0'
history: 90d
value_type: TEXT
trends: '0'
units: B
description: 'Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.'
preprocessing:
- type: JSONPATH