diff --git a/GENTOO/pgmon-1.0.4.ebuild b/GENTOO/pgmon-1.0.4.ebuild new file mode 100644 index 0000000..9d0e6a9 --- /dev/null +++ b/GENTOO/pgmon-1.0.4.ebuild @@ -0,0 +1,74 @@ +# Copyright 2024 Gentoo Authors +# Distributed under the terms of the GNU General Public License v2 + +EAPI=8 + +PYTHON_COMPAT=( python3_{6..13} ) + +inherit python-r1 systemd + +DESCRIPTION="PostgreSQL monitoring bridge" +HOMEPAGE="None" + +LICENSE="BSD" +SLOT="0" +KEYWORDS="amd64" + +SRC_URI="https://code2.shh-dot-com.org/james/${PN}/releases/download/v${PV}/${P}.tar.bz2" + +IUSE="-systemd" + +DEPEND=" + ${PYTHON_DEPS} + dev-python/psycopg:2 + dev-python/pyyaml + dev-python/requests + app-admin/logrotate + " +RDEPEND="${DEPEND}" +BDEPEND="" + +#RESTRICT="fetch" + +#S="${WORKDIR}/${PN}" + +#pkg_nofetch() { +# einfo "Please download" +# einfo " - ${P}.tar.bz2" +# einfo "from ${HOMEPAGE} and place it in your DISTDIR directory." +# einfo "The file should be owned by portage:portage." +#} + +src_compile() { + true +} + +src_install() { + # Install init script + if ! use systemd ; then + newinitd "openrc/pgmon.initd" pgmon + newconfd "openrc/pgmon.confd" pgmon + fi + + # Install systemd unit + if use systemd ; then + systemd_dounit "systemd/pgmon.service" + fi + + # Install script + exeinto /usr/bin + newexe "src/pgmon.py" pgmon + + # Install default config + diropts -o root -g root -m 0755 + insinto /etc/pgmon + doins "sample-config/pgmon.yml" + doins "sample-config/pgmon-metrics.yml" + + # Install logrotate config + insinto /etc/logrotate.d + newins "logrotate/pgmon.logrotate" pgmon + + # Install man page + doman manpages/pgmon.1 +} diff --git a/Makefile b/Makefile index 16a051a..95ae465 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,22 @@ PACKAGE_NAME := pgmon SCRIPT := src/$(PACKAGE_NAME).py -VERSION := $(shell grep -m 1 '^VERSION = ' "$(SCRIPT)" | sed -ne 's/.*"\(.*\)".*/\1/p') +# Figure out the version components +# Note: The release is for RPM packages, where prerelease releases are written as 0. +FULL_VERSION := $(shell grep -m 1 '^VERSION = ' "$(SCRIPT)" | sed -ne 's/.*"\(.*\)".*/\1/p') +VERSION := $(shell echo $(FULL_VERSION) | sed -n 's/\(.*\)\(-rc.*\|$$\)/\1/p') +RELEASE := $(shell echo $(FULL_VERSION) | sed -n 's/.*-rc\([0-9]\+\)$$/\1/p') + +ifeq ($(RELEASE),) +RPM_RELEASE := 1 +RPM_VERSION := $(VERSION)-$(RPM_RELEASE) +DEB_VERSION := $(VERSION) +else +RPM_RELEASE := 0.$(RELEASE) +RPM_VERSION := $(VERSION)-$(RPM_RELEASE) +DEB_VERSION := $(VERSION)~rc$(RELEASE) +endif + # Where packages are built BUILD_DIR := build @@ -25,16 +40,20 @@ SUPPORTED := ubuntu-20.04 \ .PHONY: all clean tgz test query-tests install-common install-openrc install-systemd +all: package-all + +version: + @echo "full version=$(FULL_VERSION) version=$(VERSION) rel=$(RELEASE) rpm=$(RPM_VERSION) deb=$(DEB_VERSION)" # Build all packages .PHONY: package-all -all: $(foreach distro_release, $(SUPPORTED), package-$(distro_release)) +package-all: $(foreach distro_release, $(SUPPORTED), package-$(distro_release)) # Gentoo package (tar.gz) creation .PHONY: package-gentoo package-gentoo: mkdir -p $(BUILD_DIR)/gentoo - tar --transform "s,^,$(PACKAGE_NAME)-$(VERSION)/," -acjf $(BUILD_DIR)/gentoo/$(PACKAGE_NAME)-$(VERSION).tar.bz2 --exclude .gitignore $(shell git ls-tree --full-tree --name-only -r HEAD) + tar --transform "s,^,$(PACKAGE_NAME)-$(FULL_VERSION)/," -acjf $(BUILD_DIR)/gentoo/$(PACKAGE_NAME)-$(FULL_VERSION).tar.bz2 --exclude .gitignore $(shell git ls-tree --full-tree --name-only -r HEAD) # Create a deb package @@ -55,7 +74,7 @@ tgz: rm -rf $(BUILD_DIR)/tgz/root mkdir -p $(BUILD_DIR)/tgz/root $(MAKE) install-openrc DESTDIR=$(BUILD_DIR)/tgz/root - tar -cz -f $(BUILD_DIR)/tgz/$(PACKAGE_NAME)-$(VERSION).tgz -C $(BUILD_DIR)/tgz/root . + tar -cz -f $(BUILD_DIR)/tgz/$(PACKAGE_NAME)-$(FULL_VERSION).tgz -C $(BUILD_DIR)/tgz/root . # Clean up the build directory clean: @@ -129,28 +148,28 @@ debian-%-install-test: docker run --rm \ -v ./$(BUILD_DIR):/output \ debian:$* \ - bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(VERSION)-debian-$*.deb' + bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(DEB_VERSION)-debian-$*.deb' # Run a RedHat install test rockylinux-%-install-test: docker run --rm \ -v ./$(BUILD_DIR):/output \ rockylinux:$* \ - bash -c 'dnf makecache && dnf install -y /output/$(PACKAGE_NAME)-$(VERSION)-1.el$*.noarch.rpm' + bash -c 'dnf makecache && dnf install -y /output/$(PACKAGE_NAME)-$(RPM_VERSION).el$*.noarch.rpm' # Run an Ubuntu install test ubuntu-%-install-test: docker run --rm \ -v ./$(BUILD_DIR):/output \ ubuntu:$* \ - bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(VERSION)-ubuntu-$*.deb' + bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(DEB_VERSION)-ubuntu-$*.deb' # Run an OracleLinux install test (this is for EL7 since CentOS7 images no longer exist) oraclelinux-%-install-test: docker run --rm \ -v ./$(BUILD_DIR):/output \ oraclelinux:7 \ - bash -c 'yum makecache && yum install -y /output/$(PACKAGE_NAME)-$(VERSION)-1.el7.noarch.rpm' + bash -c 'yum makecache && yum install -y /output/$(PACKAGE_NAME)-$(RPM_VERSION).el7.noarch.rpm' # Run a Gentoo install test gentoo-install-test: @@ -192,28 +211,28 @@ package-image-%: actually-package-debian-%: $(MAKE) install-systemd DESTDIR=/output/debian-$* cp -r --preserve=mode DEBIAN /output/debian-$*/ - dpkg-deb -Zgzip --build /output/debian-$* "/output/$(PACKAGE_NAME)-$(VERSION)-debian-$*.deb" + dpkg-deb -Zgzip --build /output/debian-$* "/output/$(PACKAGE_NAME)-$(DEB_VERSION)-debian-$*.deb" # RedHat package creation actually-package-rockylinux-%: mkdir -p /output/rockylinux-$*/{BUILD,RPMS,SOURCES,SPECS,SRPMS} - sed -e "s/@@VERSION@@/$(VERSION)/g" RPM/$(PACKAGE_NAME).spec > /output/rockylinux-$*/SPECS/$(PACKAGE_NAME).spec + sed -e "s/@@VERSION@@/$(VERSION)/g" -e "s/@@RELEASE@@/$(RPM_RELEASE)/g" RPM/$(PACKAGE_NAME).spec > /output/rockylinux-$*/SPECS/$(PACKAGE_NAME).spec rpmbuild --define '_topdir /output/rockylinux-$*' \ - --define 'version $(VERSION)' \ + --define 'version $(RPM_VERSION)' \ -bb /output/rockylinux-$*/SPECS/$(PACKAGE_NAME).spec - cp /output/rockylinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(VERSION)-1.el$*.noarch.rpm /output/ + cp /output/rockylinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(RPM_VERSION).el$*.noarch.rpm /output/ # Ubuntu package creation actually-package-ubuntu-%: $(MAKE) install-systemd DESTDIR=/output/ubuntu-$* cp -r --preserve=mode DEBIAN /output/ubuntu-$*/ - dpkg-deb -Zgzip --build /output/ubuntu-$* "/output/$(PACKAGE_NAME)-$(VERSION)-ubuntu-$*.deb" + dpkg-deb -Zgzip --build /output/ubuntu-$* "/output/$(PACKAGE_NAME)-$(DEB_VERSION)-ubuntu-$*.deb" # OracleLinux package creation actually-package-oraclelinux-%: mkdir -p /output/oraclelinux-$*/{BUILD,RPMS,SOURCES,SPECS,SRPMS} - sed -e "s/@@VERSION@@/$(VERSION)/g" RPM/$(PACKAGE_NAME)-el7.spec > /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec + sed -e "s/@@VERSION@@/$(VERSION)/g" -e "s/@@RELEASE@@/$(RPM_RELEASE)/g" RPM/$(PACKAGE_NAME)-el7.spec > /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec rpmbuild --define '_topdir /output/oraclelinux-$*' \ - --define 'version $(VERSION)' \ + --define 'version $(RPM_VERSION)' \ -bb /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec - cp /output/oraclelinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(VERSION)-1.el$*.noarch.rpm /output/ + cp /output/oraclelinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(RPM_VERSION).el$*.noarch.rpm /output/ diff --git a/RPM/pgmon-el7.spec b/RPM/pgmon-el7.spec index 3b880c2..68ee750 100644 --- a/RPM/pgmon-el7.spec +++ b/RPM/pgmon-el7.spec @@ -1,6 +1,6 @@ Name: pgmon Version: @@VERSION@@ -Release: 1%{?dist} +Release: @@RELEASE@@%{?dist} Summary: A bridge to sit between monitoring tools and PostgreSQL License: MIT diff --git a/RPM/pgmon.spec b/RPM/pgmon.spec index 5f7facf..469b584 100644 --- a/RPM/pgmon.spec +++ b/RPM/pgmon.spec @@ -1,6 +1,6 @@ Name: pgmon Version: @@VERSION@@ -Release: 1%{?dist} +Release: @@RELEASE@@%{?dist} Summary: A bridge to sit between monitoring tools and PostgreSQL License: MIT diff --git a/sample-config/pgmon-metrics.yml b/sample-config/pgmon-metrics.yml index 97c8209..0b07909 100644 --- a/sample-config/pgmon-metrics.yml +++ b/sample-config/pgmon-metrics.yml @@ -8,14 +8,21 @@ metrics: 0: > SELECT datname AS dbname FROM pg_database + + # Note: If the user lacks sufficient privileges, these fields will be NULL. + # The WHERE clause is intended to prevent Zabbix from discovering a + # connection it cannot monitor. Ideally this would generate an error + # instead. discover_rep: type: set query: 0: > - SELECT client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') AS repid, + SELECT host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') AS repid, client_addr, state FROM pg_stat_replication + WHERE state IS NOT NULL + discover_slots: type: set query: @@ -36,6 +43,7 @@ metrics: active FROM pg_replication_slots + ## # cluster-wide metrics ## @@ -49,7 +57,7 @@ metrics: query: 0: > SELECT max(age(datfrozenxid)) AS xid_age, - 0 AS mxid_age + NULL AS mxid_age FROM pg_database 90600: > SELECT max(age(datfrozenxid)) AS xid_age, @@ -85,6 +93,28 @@ metrics: FROM pg_stat_bgwriter bg CROSS JOIN pg_stat_checkpointer cp + io_per_backend: + type: set + query: + 160000: > + SELECT backend_type, + COALESCE(SUM(reads * op_bytes), 0) AS reads, + COALESCE(SUM(read_time), 0) AS read_time, + COALESCE(SUM(writes * op_bytes), 0) AS writes, + COALESCE(SUM(write_time), 0) AS write_time, + COALESCE(SUM(writebacks * op_bytes), 0) AS writebacks, + COALESCE(SUM(writeback_time), 0) AS writeback_time, + COALESCE(SUM(extends * op_bytes), 0) AS extends, + COALESCE(SUM(extend_time), 0) AS extend_time, + COALESCE(SUM(op_bytes), 0) AS op_bytes, + COALESCE(SUM(hits), 0) AS hits, + COALESCE(SUM(evictions), 0) AS evictions, + COALESCE(SUM(reuses), 0) AS reuses, + COALESCE(SUM(fsyncs), 0) AS fsyncs, + COALESCE(SUM(fsync_time), 0) AS fsync_time + FROM pg_stat_io + GROUP BY backend_type + ## # Per-database metrics @@ -107,17 +137,17 @@ metrics: temp_files, temp_bytes, deadlocks, - 0 AS checksum_failures, + NULL AS checksum_failures, blk_read_time, blk_write_time, - 0 AS session_time, - 0 AS active_time, - 0 AS idle_in_transaction_time, - 0 AS sessions, - 0 AS sessions_abandoned, - 0 AS sessions_fatal, - 0 AS sessions_killed, - extract('epoch' from stats_reset)::float AS stats_reset + NULL AS session_time, + NULL AS active_time, + NULL AS idle_in_transaction_time, + NULL AS sessions, + NULL AS sessions_abandoned, + NULL AS sessions_fatal, + NULL AS sessions_killed, + extract('epoch' from stats_reset) AS stats_reset FROM pg_stat_database WHERE datname = %(dbname)s 140000: > SELECT numbackends, @@ -134,8 +164,7 @@ metrics: temp_files, temp_bytes, deadlocks, - COALESCE(checksum_failures, - 0) AS checksum_failures, + COALESCE(checksum_failures, 0) AS checksum_failures, blk_read_time, blk_write_time, session_time, @@ -145,7 +174,7 @@ metrics: sessions_abandoned, sessions_fatal, sessions_killed, - extract('epoch' from stats_reset)::float AS stats_reset + extract('epoch' from stats_reset) AS stats_reset FROM pg_stat_database WHERE datname = %(dbname)s test_args: dbname: postgres @@ -168,13 +197,40 @@ metrics: 0: > SELECT state, count(*) AS backend_count, - COALESCE(EXTRACT(EPOCH FROM max(now() - state_change))::float, 0) AS max_state_time + COALESCE(EXTRACT(EPOCH FROM max(now() - state_change)), 0) AS max_state_time FROM pg_stat_activity WHERE datname = %(dbname)s GROUP BY state test_args: dbname: postgres + sequence_usage: + type: value + query: +# 9.2 lacks lateral joins, the pg_sequence_last_value function, and the pg_sequences view +# 0: > +# SELECT COALESCE(MAX(pg_sequence_last_value(c.oid)::float / (pg_sequence_parameters(oid)).maximum_value), 0) AS max_usage +# FROM pg_class c +# WHERE c.relkind = 'S' +# 9.3 - 9.6 lacks the pg_sequence_last_value function, and pg_sequences view +# 90300: > +# SELECT COALESCE(MAX(pg_sequence_last_value(c.oid)::float / s.maximum_value), 0) AS max_usage +# FROM pg_class c +# CROSS JOIN LATERAL pg_sequence_parameters(c.oid) AS s +# WHERE c.relkind = 'S' + 100000: SELECT COALESCE(MAX(last_value::float / max_value), 0) AS max_usage FROM pg_sequences; + test_args: + dbname: postgres + + sequence_visibility: + type: row + query: + 100000: > + SELECT COUNT(*) FILTER (WHERE has_sequence_privilege(c.oid, 'SELECT,USAGE')) AS visible_sequences, + COUNT(*) AS total_sequences + FROM pg_class AS c + WHERE relkind = 'S'; + ## # Per-replication metrics @@ -184,7 +240,7 @@ metrics: query: 90400: > SELECT pid, usename, - EXTRACT(EPOCH FROM backend_start)::integer AS backend_start, + EXTRACT(EPOCH FROM backend_start) AS backend_start, state, pg_xlog_location_diff(pg_current_xlog_location(), sent_location) AS sent_lsn, pg_xlog_location_diff(pg_current_xlog_location(), write_location) AS write_lsn, @@ -195,20 +251,21 @@ metrics: NULL AS replay_lag, sync_state FROM pg_stat_replication - WHERE client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s + WHERE host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s 100000: > SELECT pid, usename, - EXTRACT(EPOCH FROM backend_start)::integer AS backend_start, + EXTRACT(EPOCH FROM backend_start) AS backend_start, state, pg_wal_lsn_diff(pg_current_wal_lsn(), sent_lsn) AS sent_lsn, pg_wal_lsn_diff(pg_current_wal_lsn(), write_lsn) AS write_lsn, pg_wal_lsn_diff(pg_current_wal_lsn(), flush_lsn) AS flush_lsn, pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn) AS replay_lsn, - COALESCE(EXTRACT(EPOCH FROM write_lag), 0)::integer AS write_lag, - COALESCE(EXTRACT(EPOCH FROM flush_lag), 0)::integer AS flush_lag, - COALESCE(EXTRACT(EPOCH FROM replay_lag), 0)::integer AS replay_lag, + COALESCE(EXTRACT(EPOCH FROM write_lag), 0) AS write_lag, + COALESCE(EXTRACT(EPOCH FROM flush_lag), 0) AS flush_lag, + COALESCE(EXTRACT(EPOCH FROM replay_lag), 0) AS replay_lag, sync_state - FROM pg_stat_replication WHERE client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s + FROM pg_stat_replication + WHERE host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s test_args: repid: 127.0.0.1_test_rep @@ -240,6 +297,7 @@ metrics: test_args: slot: test_slot + ## # Debugging ## diff --git a/src/pgmon.py b/src/pgmon.py index 71d3491..2b72169 100755 --- a/src/pgmon.py +++ b/src/pgmon.py @@ -27,7 +27,9 @@ from urllib.parse import urlparse, parse_qs import requests import re -VERSION = "1.0.3" +from decimal import Decimal + +VERSION = "1.0.4" # Configuration config = {} @@ -391,6 +393,16 @@ def get_query(metric, version): raise MetricVersionError("Missing metric query for PostgreSQL {}".format(version)) +def json_encode_special(obj): + """ + Encoder function to handle types the standard JSON package doesn't know what + to do with + """ + if isinstance(obj, Decimal): + return float(obj) + raise TypeError(f'Cannot serialize object of {type(obj)}') + + def run_query_no_retry(pool, return_type, query, args): """ Run the query with no explicit retry code @@ -408,13 +420,13 @@ def run_query_no_retry(pool, return_type, query, args): elif return_type == "row": if len(res) == 0: return "[]" - return json.dumps(res[0]) + return json.dumps(res[0], default=json_encode_special) elif return_type == "column": if len(res) == 0: return "[]" - return json.dumps([list(r.values())[0] for r in res]) + return json.dumps([list(r.values())[0] for r in res], default=json_encode_special) elif return_type == "set": - return json.dumps(res) + return json.dumps(res, default=json_encode_special) except: dbname = pool.name if dbname in unhappy_cooldown: diff --git a/src/test_pgmon.py b/src/test_pgmon.py index d2c0590..1a86492 100644 --- a/src/test_pgmon.py +++ b/src/test_pgmon.py @@ -5,6 +5,9 @@ import tempfile import logging +from decimal import Decimal +import json + import pgmon # Silence most logging output @@ -789,3 +792,20 @@ metrics: # Make sure we can pull the RSS file (we assume the 9.6 series won't be getting # any more updates) self.assertEqual(pgmon.get_latest_version(), 90624) + + def test_json_encode_special(self): + # Confirm that we're getting the right type + self.assertFalse(isinstance(Decimal('0.5'), float)) + self.assertTrue(isinstance(pgmon.json_encode_special(Decimal('0.5')), float)) + + # Make sure we get sane values + self.assertEqual(pgmon.json_encode_special(Decimal('0.5')), 0.5) + self.assertEqual(pgmon.json_encode_special(Decimal('12')), 12.0) + + # Make sure we can still fail for other types + self.assertRaises( + TypeError, pgmon.json_encode_special, object + ) + + # Make sure we can actually serialize a Decimal + self.assertEqual(json.dumps(Decimal('2.5'), default=pgmon.json_encode_special), '2.5') diff --git a/zabbix_templates/pgmon_templates.yaml b/zabbix_templates/pgmon_templates.yaml index 931e6eb..64a13fe 100644 --- a/zabbix_templates/pgmon_templates.yaml +++ b/zabbix_templates/pgmon_templates.yaml @@ -95,6 +95,20 @@ zabbix_export: tags: - tag: Application value: PostgreSQL + - uuid: 06b1d082ed1e4796bc31cc25f7db6326 + name: 'PostgreSQL Backend IO Info' + type: HTTP_AGENT + key: 'pgmon[io_per_backend]' + history: '0' + value_type: TEXT + trends: '0' + description: 'Aggregated statistics about I/O activity for different backend types' + url: 'http://localhost:{$AGENT_PORT}/io_per_backend' + tags: + - tag: Application + value: PostgreSQL + - tag: Type + value: Raw - uuid: d890e395fbbc4f2bacbd50e7321fcb9f name: 'PostgreSQL latest version info' type: HTTP_AGENT @@ -771,6 +785,77 @@ zabbix_export: value: PostgreSQL - tag: Database value: '{#DBNAME}' + - uuid: 5960120dd01c4926b0fc1fbe9c011507 + name: 'Database max sequence usage in {#DBNAME}' + type: HTTP_AGENT + key: 'pgmon_db_max_sequence[{#DBNAME}]' + delay: 5m + value_type: FLOAT + units: '%' + description: 'The percent of the currently configured value range for the most utilized sequence.' + url: 'http://localhost:{$AGENT_PORT}/sequence_usage' + query_fields: + - name: dbname + value: '{#DBNAME}' + tags: + - tag: Application + value: PostgreSQL + - tag: Database + value: '{#DBNAME}' + - uuid: 48b9cc80ac4d4aee9e9f3a5d6f7d4a95 + name: 'Total number of sequences on {#DBNAME}' + type: DEPENDENT + key: 'pgmon_db_sequences[total,{#DBNAME}]' + delay: '0' + description: 'Total number of sequences in the database.' + preprocessing: + - type: JSONPATH + parameters: + - $.total_sequences + master_item: + key: 'pgmon_db_sequence_visibility[{#DBNAME}]' + tags: + - tag: Application + value: PostgreSQL + - tag: Database + value: '{#DBNAME}' + - uuid: 6521a9bab2ac47bf85429832d289bbac + name: 'Visible sequences on {#DBNAME}' + type: DEPENDENT + key: 'pgmon_db_sequences[visible,{#DBNAME}]' + delay: '0' + description: 'Number of sequences in the database for which Zabbix can see stats.' + preprocessing: + - type: JSONPATH + parameters: + - $.visible_sequences + master_item: + key: 'pgmon_db_sequence_visibility[{#DBNAME}]' + tags: + - tag: Application + value: PostgreSQL + - tag: Database + value: '{#DBNAME}' + - uuid: 00f2da3eb99940839410a6ecd5df153f + name: 'Database sequence visibility in {#DBNAME}' + type: HTTP_AGENT + key: 'pgmon_db_sequence_visibility[{#DBNAME}]' + delay: 30m + history: '0' + value_type: TEXT + trends: '0' + description: 'Statistics about the number of sequences that exist and the number Zabbix can actually see stats for.' + url: 'http://localhost:{$AGENT_PORT}/sequence_visibility' + query_fields: + - name: dbname + value: '{#DBNAME}' + tags: + - tag: Application + value: PostgreSQL + - tag: Database + value: '{#DBNAME}' + - tag: Type + value: Raw - uuid: 492b3cac15f348c2b85f97b69c114d1b name: 'Database Stats for {#DBNAME}' type: HTTP_AGENT @@ -789,6 +874,17 @@ zabbix_export: value: '{#DBNAME}' - tag: Type value: Raw + trigger_prototypes: + - uuid: d29d0fd9d9d34b5ebd649592b0829ce5 + expression: 'last(/PostgreSQL by pgmon/pgmon_db_sequences[total,{#DBNAME}]) <> last(/PostgreSQL by pgmon/pgmon_db_sequences[visible,{#DBNAME}])' + name: 'Sequences not visible to Zabbix on {#DBNAME}' + priority: WARNING + description: 'There are sequences for which Zabbix cannot see usage statistics' + tags: + - tag: Application + value: PostgreSQL + - tag: Component + value: Sequence graph_prototypes: - uuid: 1f7de43b77714f819e61c31273712b70 name: 'DML Totals for {#DBNAME}' @@ -881,6 +977,596 @@ zabbix_export: lld_macro_paths: - lld_macro: '{#DBNAME}' path: $.dbname + - uuid: 6afbe12fb9f54fa2ad29d647429eb16e + name: 'Discover I/O Backend Types' + type: DEPENDENT + key: pgmon_discover_io_backend_types + delay: '0' + item_prototypes: + - uuid: b1ac2e56b30f4812bf33ce973ef16b10 + name: 'I/O Evictions by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend[evictions,{#BACKEND_TYPE}]' + delay: '0' + preprocessing: + - type: JSONPATH + parameters: + - '$[?(@.backend_type == "{#BACKEND_TYPE}")].evictions.first()' + master_item: + key: 'pgmon[io_per_backend]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: bde8e6c28e714be58fa963a395e14619 + name: 'I/O Extends by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend[extends,{#BACKEND_TYPE}]' + delay: '0' + units: B + preprocessing: + - type: JSONPATH + parameters: + - '$[?(@.backend_type == "{#BACKEND_TYPE}")].extends.first()' + master_item: + key: 'pgmon[io_per_backend]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 5ce7a815c19241a6a398e17539cca1d4 + name: 'I/O fsyncs by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend[fsyncs,{#BACKEND_TYPE}]' + delay: '0' + preprocessing: + - type: JSONPATH + parameters: + - '$[?(@.backend_type == "{#BACKEND_TYPE}")].fsyncs.first()' + master_item: + key: 'pgmon[io_per_backend]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 3be8cb08b6bc42f7888d8d7877ab759f + name: 'I/O Hits by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend[hits,{#BACKEND_TYPE}]' + delay: '0' + preprocessing: + - type: JSONPATH + parameters: + - '$[?(@.backend_type == "{#BACKEND_TYPE}")].hits.first()' + master_item: + key: 'pgmon[io_per_backend]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: e28566682196415fb3bdad9d9a294ae9 + name: 'I/O Reads by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend[reads,{#BACKEND_TYPE}]' + delay: '0' + units: B + preprocessing: + - type: JSONPATH + parameters: + - '$[?(@.backend_type == "{#BACKEND_TYPE}")].reads.first()' + master_item: + key: 'pgmon[io_per_backend]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: e5e9b21c7f7649b284a8ca5b4502cb96 + name: 'I/O Reuses by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend[reuses,{#BACKEND_TYPE}]' + delay: '0' + preprocessing: + - type: JSONPATH + parameters: + - '$[?(@.backend_type == "{#BACKEND_TYPE}")].reuses.first()' + master_item: + key: 'pgmon[io_per_backend]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 6de5bc1a98dc4d8da23ac0a90983c8aa + name: 'I/O Writebacks by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend[writebacks,{#BACKEND_TYPE}]' + delay: '0' + units: B + preprocessing: + - type: JSONPATH + parameters: + - '$[?(@.backend_type == "{#BACKEND_TYPE}")].writebacks.first()' + master_item: + key: 'pgmon[io_per_backend]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 87d09d3891c145a898050d390ea7f16e + name: 'I/O Writes by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend[writes,{#BACKEND_TYPE}]' + delay: '0' + units: B + preprocessing: + - type: JSONPATH + parameters: + - '$[?(@.backend_type == "{#BACKEND_TYPE}")].writes.first()' + master_item: + key: 'pgmon[io_per_backend]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 1587592caf694d91ba9457cafaca7761 + name: 'I/O Evictions by {#BACKEND_TYPE} - 1h delta' + type: CALCULATED + key: 'pgmon_io_backend_delta[evictions,1h,{#BACKEND_TYPE}]' + delay: 10m + params: 'last(//pgmon_io_backend[evictions,{#BACKEND_TYPE}]) - last(//pgmon_io_backend[evictions,{#BACKEND_TYPE}], #1:now-1h)' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 637b0601458147b5b72bc40ab22b2117 + name: 'I/O Evictions Rate by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend_delta[evictions,{#BACKEND_TYPE}]' + delay: '0' + value_type: FLOAT + preprocessing: + - type: CHANGE_PER_SECOND + parameters: + - '' + master_item: + key: 'pgmon_io_backend[evictions,{#BACKEND_TYPE}]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: c7ed8e1bcb0945ffa2a36b06ea5fe0c5 + name: 'I/O Extends by {#BACKEND_TYPE} - 1h delta' + type: CALCULATED + key: 'pgmon_io_backend_delta[extends,1h,{#BACKEND_TYPE}]' + delay: 10m + units: B/h + params: 'last(//pgmon_io_backend[extends,{#BACKEND_TYPE}]) - last(//pgmon_io_backend[extends,{#BACKEND_TYPE}], #1:now-1h)' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 855cbbc83d884b00a6fea457f8e09806 + name: 'I/O Extends Rate by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend_delta[extends,{#BACKEND_TYPE}]' + delay: '0' + value_type: FLOAT + units: Bps + preprocessing: + - type: CHANGE_PER_SECOND + parameters: + - '' + master_item: + key: 'pgmon_io_backend[extends,{#BACKEND_TYPE}]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 20c6135bb6c8491b9ca68fa506232b04 + name: 'I/O fsyncs by {#BACKEND_TYPE} - 1h delta' + type: CALCULATED + key: 'pgmon_io_backend_delta[fsyncs,1h,{#BACKEND_TYPE}]' + delay: 10m + params: 'last(//pgmon_io_backend[fsyncs,{#BACKEND_TYPE}]) - last(//pgmon_io_backend[fsyncs,{#BACKEND_TYPE}], #1:now-1h)' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 7391c372d1704f21b314379c2fe5bb64 + name: 'I/O fsyncs Rate by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend_delta[fsyncs,{#BACKEND_TYPE}]' + delay: '0' + value_type: FLOAT + preprocessing: + - type: CHANGE_PER_SECOND + parameters: + - '' + master_item: + key: 'pgmon_io_backend[fsyncs,{#BACKEND_TYPE}]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: ab8d0766bbf64db3bb09f9442d268a83 + name: 'I/O Hits by {#BACKEND_TYPE} - 1h delta' + type: CALCULATED + key: 'pgmon_io_backend_delta[hits,1h,{#BACKEND_TYPE}]' + delay: 10m + params: 'last(//pgmon_io_backend[hits,{#BACKEND_TYPE}]) - last(//pgmon_io_backend[hits,{#BACKEND_TYPE}], #1:now-1h)' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: be9aa056b9364c12a8809d5dd3af8387 + name: 'I/O Hits Rate by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend_delta[hits,{#BACKEND_TYPE}]' + delay: '0' + value_type: FLOAT + preprocessing: + - type: CHANGE_PER_SECOND + parameters: + - '' + master_item: + key: 'pgmon_io_backend[hits,{#BACKEND_TYPE}]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 0c42a40a263948dda4464856747ac6ef + name: 'I/O Reads by {#BACKEND_TYPE} - 1h delta' + type: CALCULATED + key: 'pgmon_io_backend_delta[reads,1h,{#BACKEND_TYPE}]' + delay: 10m + units: B/h + params: 'last(//pgmon_io_backend[reads,{#BACKEND_TYPE}]) - last(//pgmon_io_backend[reads,{#BACKEND_TYPE}], #1:now-1h)' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 01bc131aaec448e6aafb7ef13d1ea5ed + name: 'I/O Reads Rate by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend_delta[reads,{#BACKEND_TYPE}]' + delay: '0' + value_type: FLOAT + units: Bps + preprocessing: + - type: CHANGE_PER_SECOND + parameters: + - '' + master_item: + key: 'pgmon_io_backend[reads,{#BACKEND_TYPE}]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 740d5c84be6e4b99b4ca3513b6c6c5ea + name: 'I/O Reuses by {#BACKEND_TYPE} - 1h delta' + type: CALCULATED + key: 'pgmon_io_backend_delta[reuses,1h,{#BACKEND_TYPE}]' + delay: 10m + params: 'last(//pgmon_io_backend[reuses,{#BACKEND_TYPE}]) - last(//pgmon_io_backend[reuses,{#BACKEND_TYPE}], #1:now-1h)' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 8cbc16fc472a44d5945498285e8407be + name: 'I/O Reuses Rate by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend_delta[reuses,{#BACKEND_TYPE}]' + delay: '0' + value_type: FLOAT + preprocessing: + - type: CHANGE_PER_SECOND + parameters: + - '' + master_item: + key: 'pgmon_io_backend[reuses,{#BACKEND_TYPE}]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 2e796f4c75a94967a0b11a37a2d64726 + name: 'I/O Writebacks by {#BACKEND_TYPE} - 1h delta' + type: CALCULATED + key: 'pgmon_io_backend_delta[writebacks,1h,{#BACKEND_TYPE}]' + delay: 10m + units: B/h + params: 'last(//pgmon_io_backend[writebacks,{#BACKEND_TYPE}]) - last(//pgmon_io_backend[writebacks,{#BACKEND_TYPE}], #1:now-1h)' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 52d2536db79047948893ccb764b2a220 + name: 'I/O Writebacks Rate by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend_delta[writebacks,{#BACKEND_TYPE}]' + delay: '0' + value_type: FLOAT + units: Bps + preprocessing: + - type: CHANGE_PER_SECOND + parameters: + - '' + master_item: + key: 'pgmon_io_backend[writebacks,{#BACKEND_TYPE}]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 6a741d3b857b48eea0e1a4d1deea7183 + name: 'I/O Writes by {#BACKEND_TYPE} - 1h delta' + type: CALCULATED + key: 'pgmon_io_backend_delta[writes,1h,{#BACKEND_TYPE}]' + delay: 10m + units: B/h + params: 'last(//pgmon_io_backend[writes,{#BACKEND_TYPE}]) - last(//pgmon_io_backend[writes,{#BACKEND_TYPE}], #1:now-1h)' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + - uuid: 6d10339b08b84a91a931a64050a471cc + name: 'I/O Writes Rate by {#BACKEND_TYPE}' + type: DEPENDENT + key: 'pgmon_io_backend_delta[writes,{#BACKEND_TYPE}]' + delay: '0' + value_type: FLOAT + units: Bps + preprocessing: + - type: CHANGE_PER_SECOND + parameters: + - '' + master_item: + key: 'pgmon_io_backend[writes,{#BACKEND_TYPE}]' + tags: + - tag: Application + value: PostgreSQL + - tag: 'Backend Type' + value: '{#BACKEND_TYPE}' + - tag: Component + value: IO + graph_prototypes: + - uuid: faf1e3f41dd84adba42c414f6efac152 + name: 'Backend I/O for {#BACKEND_TYPE}' + graph_items: + - color: 199C0D + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend[evictions,{#BACKEND_TYPE}]' + - sortorder: '1' + color: F63100 + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend[extends,{#BACKEND_TYPE}]' + - sortorder: '2' + color: 2774A4 + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend[fsyncs,{#BACKEND_TYPE}]' + - sortorder: '3' + color: F7941D + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend[hits,{#BACKEND_TYPE}]' + - sortorder: '4' + color: FC6EA3 + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend[reads,{#BACKEND_TYPE}]' + - sortorder: '5' + color: 6C59DC + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend[reuses,{#BACKEND_TYPE}]' + - sortorder: '6' + color: C7A72D + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend[writebacks,{#BACKEND_TYPE}]' + - sortorder: '7' + color: BA2A5D + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend[writes,{#BACKEND_TYPE}]' + - uuid: 24e2e3ec67cd43058c8b6a02d686fc6a + name: 'Backend I/O for {#BACKEND_TYPE} - 1h delta' + graph_items: + - color: 199C0D + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[evictions,1h,{#BACKEND_TYPE}]' + - sortorder: '1' + color: F63100 + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[extends,1h,{#BACKEND_TYPE}]' + - sortorder: '2' + color: 2774A4 + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[fsyncs,1h,{#BACKEND_TYPE}]' + - sortorder: '3' + color: F7941D + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[hits,1h,{#BACKEND_TYPE}]' + - sortorder: '4' + color: FC6EA3 + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[reads,1h,{#BACKEND_TYPE}]' + - sortorder: '5' + color: 6C59DC + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[reuses,1h,{#BACKEND_TYPE}]' + - sortorder: '6' + color: C7A72D + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[writebacks,1h,{#BACKEND_TYPE}]' + - sortorder: '7' + color: BA2A5D + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[writes,1h,{#BACKEND_TYPE}]' + - uuid: 815f1a00c19d45a18b10846644710668 + name: 'Backend I/O Rates for {#BACKEND_TYPE}' + graph_items: + - color: 199C0D + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[evictions,{#BACKEND_TYPE}]' + - sortorder: '1' + color: F63100 + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[extends,{#BACKEND_TYPE}]' + - sortorder: '2' + color: 2774A4 + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[fsyncs,{#BACKEND_TYPE}]' + - sortorder: '3' + color: F7941D + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[hits,{#BACKEND_TYPE}]' + - sortorder: '4' + color: FC6EA3 + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[reads,{#BACKEND_TYPE}]' + - sortorder: '5' + color: 6C59DC + yaxisside: RIGHT + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[reuses,{#BACKEND_TYPE}]' + - sortorder: '6' + color: C7A72D + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[writebacks,{#BACKEND_TYPE}]' + - sortorder: '7' + color: BA2A5D + calc_fnc: ALL + item: + host: 'PostgreSQL by pgmon' + key: 'pgmon_io_backend_delta[writes,{#BACKEND_TYPE}]' + master_item: + key: 'pgmon[io_per_backend]' + lld_macro_paths: + - lld_macro: '{#BACKEND_TYPE}' + path: $.backend_type + preprocessing: + - type: JSONPATH + parameters: + - '$[:]' - uuid: 8ec029d577ae4872858e2e5cfd1cc40e name: 'Discover Replication' type: HTTP_AGENT @@ -896,6 +1582,7 @@ zabbix_export: delay: '0' history: 90d value_type: FLOAT + units: s description: 'Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written and flushed it (but not yet applied it). This can be used to gauge the delay that synchronous_commit level on incurred while committing if this server was configured as a synchronous standby.' preprocessing: - type: JSONPATH @@ -911,13 +1598,12 @@ zabbix_export: - tag: Database value: '{#DBNAME}' - uuid: 624f8f085a3642c9a10a03361c17763d - name: 'Last flush LSN for {#REPID}' + name: 'Last flush LSN lag for {#REPID}' type: DEPENDENT key: 'pgmon_rep[flush_lsn,repid={#REPID}]' delay: '0' history: 90d - value_type: TEXT - trends: '0' + units: B description: 'Last write-ahead log location flushed to disk by this standby server' preprocessing: - type: JSONPATH @@ -939,6 +1625,7 @@ zabbix_export: delay: '0' history: 90d value_type: FLOAT + units: s description: 'Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written, flushed and applied it. This can be used to gauge the delay that synchronous_commit level remote_apply incurred while committing if this server was configured as a synchronous standby.' preprocessing: - type: JSONPATH @@ -954,13 +1641,12 @@ zabbix_export: - tag: Database value: '{#DBNAME}' - uuid: fe1bed51845d4694bae8f53deed4846d - name: 'Last replay LSN for {#REPID}' + name: 'Last replay LSN lag for {#REPID}' type: DEPENDENT key: 'pgmon_rep[replay_lsn,repid={#REPID}]' delay: '0' history: 90d - value_type: TEXT - trends: '0' + units: B description: 'Last write-ahead log location replayed into the database on this standby server' preprocessing: - type: JSONPATH @@ -976,13 +1662,12 @@ zabbix_export: - tag: Database value: '{#DBNAME}' - uuid: 68c179d0e33f45f9bf82d2d4125763f0 - name: 'Last sent LSN for {#REPID}' + name: 'Last sent LSN lag for {#REPID}' type: DEPENDENT key: 'pgmon_rep[sent_lsn,repid={#REPID}]' delay: '0' history: 90d - value_type: TEXT - trends: '0' + units: B description: 'Last write-ahead log location sent on this connection' preprocessing: - type: JSONPATH @@ -1032,6 +1717,7 @@ zabbix_export: delay: '0' history: 90d value_type: FLOAT + units: s description: 'Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.' preprocessing: - type: JSONPATH @@ -1047,13 +1733,12 @@ zabbix_export: - tag: Database value: '{#DBNAME}' - uuid: 57fb03cf63af4b0a91d8e36d6ff64d30 - name: 'Last write LSN for {#REPID}' + name: 'Last write LSN lag for {#REPID}' type: DEPENDENT key: 'pgmon_rep[write_lsn,repid={#REPID}]' delay: '0' history: 90d - value_type: TEXT - trends: '0' + units: B description: 'Time elapsed between flushing recent WAL locally and receiving notification that this standby server has written it (but not yet flushed it or applied it). This can be used to gauge the delay that synchronous_commit level remote_write incurred while committing if this server was configured as a synchronous standby.' preprocessing: - type: JSONPATH