Compare commits

..

No commits in common. "main" and "v1.0.0" have entirely different histories.
main ... v1.0.0

22 changed files with 719 additions and 3005 deletions

View File

@ -3,7 +3,7 @@ Version: 1.0
Section: utils Section: utils
Priority: optional Priority: optional
Architecture: all Architecture: all
Depends: logrotate, python3 (>= 3.6), python3-psycopg2, python3-requests, python3-yaml, systemd Depends: logrotate, python3 (>= 3.6), python3-psycopg2, python3-yaml, systemd
Maintainer: James Campbell <james@commandprompt.com> Maintainer: James Campbell <james@commandprompt.com>
Homepage: https://www.commandprompt.com Homepage: https://www.commandprompt.com
Description: A bridge to sit between monitoring tools and PostgreSQL Description: A bridge to sit between monitoring tools and PostgreSQL

52
GENTOO/pgmon-0.1.0.ebuild Normal file
View File

@ -0,0 +1,52 @@
# Copyright 2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
PYTHON_COMPAT=( python3_{6..12} )
inherit git-r3 python-r1
DESCRIPTION="PostgreSQL monitoring bridge"
HOMEPAGE="None"
LICENSE="BSD"
SLOT="0"
KEYWORDS="amd64"
EGIT_REPO_URI="https://code2.shh-dot-com.org/james/pgmon.git"
#EGIT_COMMIT=""
DEPEND="
${PYTHON_DEPS}
dev-python/psycopg:3
dev-python/pyyaml
acct-user/zabbix
acct-group/zabbix
agent? ( net-analyzer/zabbix[agent] )
agent2? ( net-analyzer/zabbix[agent2] )
app-admin/logrotate
"
RDEPEND="${DEPEND}"
BDEPEND=""
src_install() {
default
# Install init script
newinitd "${FILESDIR}/pgmon.openrc" pgmon
# Install script
exeinto /usr/bin
newexe "${S}/pgmon.py" pgmon
# Install default config
diropts -o root -g zabbix -m 0755
insinto /etc/pgmon
doins "${FILESDIR}/pgmon.yml"
doins "${S}/pgmon-metrics.yml"
# Install logrotate config
insinto /etc/logrotate.d
newins "${FILESDIR}/pgmon.logrotate" pgmon
}

View File

@ -5,7 +5,7 @@ EAPI=8
PYTHON_COMPAT=( python3_{6..13} ) PYTHON_COMPAT=( python3_{6..13} )
inherit python-r1 inherit git-r3 python-r1
DESCRIPTION="PostgreSQL monitoring bridge" DESCRIPTION="PostgreSQL monitoring bridge"
HOMEPAGE="None" HOMEPAGE="None"
@ -14,9 +14,7 @@ LICENSE="BSD"
SLOT="0" SLOT="0"
KEYWORDS="amd64" KEYWORDS="amd64"
SRC_URI="https://code2.shh-dot-com.org/james/${PN}/archive/v${PV}.tar.bz2 -> ${P}.tar.bz2" SRC_URI="https://code2.shh-dot-com.org/james/${PN}/archive/v${PV}.tar.gz -> ${P}.tar.gz"
IUSE="-systemd"
DEPEND=" DEPEND="
${PYTHON_DEPS} ${PYTHON_DEPS}
@ -27,36 +25,21 @@ DEPEND="
RDEPEND="${DEPEND}" RDEPEND="${DEPEND}"
BDEPEND="" BDEPEND=""
RESTRICT="fetch" S="${WORKDIR}/${PN}"
#S="${WORKDIR}/${PN}"
pkg_nofetch() {
einfo "Please download"
einfo " - ${P}.tar.bz2"
einfo "from ${HOMEPAGE} and place it in your DISTDIR directory."
einfo "The file should be owned by portage:portage."
}
src_compile() {
true
}
src_install() { src_install() {
default
# Install init script # Install init script
if ! use systemd ; then
newinitd "openrc/pgmon.initd" pgmon newinitd "openrc/pgmon.initd" pgmon
newconfd "openrc/pgmon.confd" pgmon newconfd "openrc/pgmon.confd" pgmon
fi
# Install systemd unit # Install systemd unit
if use systemd ; then
systemd_dounit "systemd/pgmon.service" systemd_dounit "systemd/pgmon.service"
fi
# Install script # Install script
exeinto /usr/bin exeinto /usr/bin
newexe "src/pgmon.py" pgmon newexe "pgmon.py" pgmon
# Install default config # Install default config
diropts -o root -g root -m 0755 diropts -o root -g root -m 0755

View File

@ -1,73 +0,0 @@
# Copyright 2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
PYTHON_COMPAT=( python3_{6..13} )
inherit python-r1
DESCRIPTION="PostgreSQL monitoring bridge"
HOMEPAGE="None"
LICENSE="BSD"
SLOT="0"
KEYWORDS="amd64"
SRC_URI="https://code2.shh-dot-com.org/james/${PN}/archive/v${PV}.tar.bz2 -> ${P}.tar.bz2"
IUSE="-systemd"
DEPEND="
${PYTHON_DEPS}
dev-python/psycopg:2
dev-python/pyyaml
app-admin/logrotate
"
RDEPEND="${DEPEND}"
BDEPEND=""
RESTRICT="fetch"
#S="${WORKDIR}/${PN}"
pkg_nofetch() {
einfo "Please download"
einfo " - ${P}.tar.bz2"
einfo "from ${HOMEPAGE} and place it in your DISTDIR directory."
einfo "The file should be owned by portage:portage."
}
src_compile() {
true
}
src_install() {
# Install init script
if ! use systemd ; then
newinitd "openrc/pgmon.initd" pgmon
newconfd "openrc/pgmon.confd" pgmon
fi
# Install systemd unit
if use systemd ; then
systemd_dounit "systemd/pgmon.service"
fi
# Install script
exeinto /usr/bin
newexe "src/pgmon.py" pgmon
# Install default config
diropts -o root -g root -m 0755
insinto /etc/pgmon
doins "sample-config/pgmon.yml"
doins "sample-config/pgmon-metrics.yml"
# Install logrotate config
insinto /etc/logrotate.d
newins "logrotate/pgmon.logrotate" pgmon
# Install man page
doman manpages/pgmon.1
}

View File

@ -1,74 +0,0 @@
# Copyright 2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
PYTHON_COMPAT=( python3_{6..13} )
inherit python-r1 systemd
DESCRIPTION="PostgreSQL monitoring bridge"
HOMEPAGE="None"
LICENSE="BSD"
SLOT="0"
KEYWORDS="amd64"
SRC_URI="https://code2.shh-dot-com.org/james/${PN}/releases/download/v${PV}/${P}.tar.bz2"
IUSE="-systemd"
DEPEND="
${PYTHON_DEPS}
dev-python/psycopg:2
dev-python/pyyaml
dev-python/requests
app-admin/logrotate
"
RDEPEND="${DEPEND}"
BDEPEND=""
#RESTRICT="fetch"
#S="${WORKDIR}/${PN}"
#pkg_nofetch() {
# einfo "Please download"
# einfo " - ${P}.tar.bz2"
# einfo "from ${HOMEPAGE} and place it in your DISTDIR directory."
# einfo "The file should be owned by portage:portage."
#}
src_compile() {
true
}
src_install() {
# Install init script
if ! use systemd ; then
newinitd "openrc/pgmon.initd" pgmon
newconfd "openrc/pgmon.confd" pgmon
fi
# Install systemd unit
if use systemd ; then
systemd_dounit "systemd/pgmon.service"
fi
# Install script
exeinto /usr/bin
newexe "src/pgmon.py" pgmon
# Install default config
diropts -o root -g root -m 0755
insinto /etc/pgmon
doins "sample-config/pgmon.yml"
doins "sample-config/pgmon-metrics.yml"
# Install logrotate config
insinto /etc/logrotate.d
newins "logrotate/pgmon.logrotate" pgmon
# Install man page
doman manpages/pgmon.1
}

View File

@ -1,74 +0,0 @@
# Copyright 2024 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
EAPI=8
PYTHON_COMPAT=( python3_{6..13} )
inherit python-r1 systemd
DESCRIPTION="PostgreSQL monitoring bridge"
HOMEPAGE="None"
LICENSE="BSD"
SLOT="0"
KEYWORDS="amd64"
SRC_URI="https://code2.shh-dot-com.org/james/${PN}/releases/download/v${PV}/${P}.tar.bz2"
IUSE="-systemd"
DEPEND="
${PYTHON_DEPS}
dev-python/psycopg:2
dev-python/pyyaml
dev-python/requests
app-admin/logrotate
"
RDEPEND="${DEPEND}"
BDEPEND=""
#RESTRICT="fetch"
#S="${WORKDIR}/${PN}"
#pkg_nofetch() {
# einfo "Please download"
# einfo " - ${P}.tar.bz2"
# einfo "from ${HOMEPAGE} and place it in your DISTDIR directory."
# einfo "The file should be owned by portage:portage."
#}
src_compile() {
true
}
src_install() {
# Install init script
if ! use systemd ; then
newinitd "openrc/pgmon.initd" pgmon
newconfd "openrc/pgmon.confd" pgmon
fi
# Install systemd unit
if use systemd ; then
systemd_dounit "systemd/pgmon.service"
fi
# Install script
exeinto /usr/bin
newexe "src/pgmon.py" pgmon
# Install default config
diropts -o root -g root -m 0755
insinto /etc/pgmon
doins "sample-config/pgmon.yml"
doins "sample-config/pgmon-metrics.yml"
# Install logrotate config
insinto /etc/logrotate.d
newins "logrotate/pgmon.logrotate" pgmon
# Install man page
doman manpages/pgmon.1
}

118
Makefile
View File

@ -1,25 +1,9 @@
# Package details # Package details
PACKAGE_NAME := pgmon PACKAGE_NAME := pgmon
VERSION := 1.0
SCRIPT := src/$(PACKAGE_NAME).py SCRIPT := src/$(PACKAGE_NAME).py
# Figure out the version components
# Note: The release is for RPM packages, where prerelease releases are written as 0.<release>
FULL_VERSION := $(shell grep -m 1 '^VERSION = ' "$(SCRIPT)" | sed -ne 's/.*"\(.*\)".*/\1/p')
VERSION := $(shell echo $(FULL_VERSION) | sed -n 's/\(.*\)\(-rc.*\|$$\)/\1/p')
RELEASE := $(shell echo $(FULL_VERSION) | sed -n 's/.*-rc\([0-9]\+\)$$/\1/p')
ifeq ($(RELEASE),)
RPM_RELEASE := 1
RPM_VERSION := $(VERSION)-$(RPM_RELEASE)
DEB_VERSION := $(VERSION)
else
RPM_RELEASE := 0.$(RELEASE)
RPM_VERSION := $(VERSION)-$(RPM_RELEASE)
DEB_VERSION := $(VERSION)~rc$(RELEASE)
endif
# Where packages are built # Where packages are built
BUILD_DIR := build BUILD_DIR := build
@ -31,29 +15,18 @@ SUPPORTED := ubuntu-20.04 \
debian-11 \ debian-11 \
rockylinux-8 \ rockylinux-8 \
rockylinux-9 \ rockylinux-9 \
oraclelinux-7 \ oraclelinux-7
gentoo
## ##
# These targets are the main ones to use for most things. # These targets are the main ones to use for most things.
## ##
.PHONY: all clean tgz test query-tests install-common install-openrc install-systemd .PHONY: all clean tgz test install
all: package-all
version:
@echo "full version=$(FULL_VERSION) version=$(VERSION) rel=$(RELEASE) rpm=$(RPM_VERSION) deb=$(DEB_VERSION)"
# Build all packages # Build all packages
.PHONY: package-all .PHONY: package-all
package-all: $(foreach distro_release, $(SUPPORTED), package-$(distro_release)) all: $(foreach distro_release, $(SUPPORTED), package-$(distro_release))
# Gentoo package (tar.gz) creation
.PHONY: package-gentoo
package-gentoo:
mkdir -p $(BUILD_DIR)/gentoo
tar --transform "s,^,$(PACKAGE_NAME)-$(FULL_VERSION)/," -acjf $(BUILD_DIR)/gentoo/$(PACKAGE_NAME)-$(FULL_VERSION).tar.bz2 --exclude .gitignore $(shell git ls-tree --full-tree --name-only -r HEAD)
# Create a deb package # Create a deb package
@ -69,12 +42,13 @@ package-%:
--user $(shell id -u):$(shell id -g) \ --user $(shell id -u):$(shell id -g) \
"$(DISTRO)-packager:$(RELEASE)" "$(DISTRO)-packager:$(RELEASE)"
# Create a tarball # Create a tarball
tgz: tgz:
rm -rf $(BUILD_DIR)/tgz/root rm -rf $(BUILD_DIR)/tgz/root
mkdir -p $(BUILD_DIR)/tgz/root mkdir -p $(BUILD_DIR)/tgz/root
$(MAKE) install-openrc DESTDIR=$(BUILD_DIR)/tgz/root $(MAKE) install DESTDIR=$(BUILD_DIR)/tgz/root
tar -cz -f $(BUILD_DIR)/tgz/$(PACKAGE_NAME)-$(FULL_VERSION).tgz -C $(BUILD_DIR)/tgz/root . tar -cz -f $(BUILD_DIR)/tgz/$(PACKAGE_NAME)-$(VERSION).tgz -C $(BUILD_DIR)/tgz/root .
# Clean up the build directory # Clean up the build directory
clean: clean:
@ -84,21 +58,18 @@ clean:
test: test:
cd src ; python3 -m unittest cd src ; python3 -m unittest
# Run query tests # Install the script at the specified base directory
query-tests: install:
cd tests ; ./run-tests.sh
# Install the script at the specified base directory (common components)
install-common:
# Set up directories # Set up directories
mkdir -p $(DESTDIR)/etc/$(PACKAGE_NAME) mkdir -p $(DESTDIR)/etc/$(PACKAGE_NAME)
mkdir -p ${DESTDIR}/etc/logrotate.d mkdir -p ${DESTDIR}/etc/logrotate.d
mkdir -p $(DESTDIR)/usr/bin mkdir -p $(DESTDIR)/lib/systemd/system
mkdir -p $(DESTDIR)/usr/local/bin
mkdir -p $(DESTDIR)/usr/share/man/man1 mkdir -p $(DESTDIR)/usr/share/man/man1
# Install script # Install script
cp $(SCRIPT) $(DESTDIR)/usr/bin/$(PACKAGE_NAME) cp $(SCRIPT) $(DESTDIR)/usr/local/bin/$(PACKAGE_NAME)
chmod 755 $(DESTDIR)/usr/bin/$(PACKAGE_NAME) chmod 755 $(DESTDIR)/usr/local/bin/$(PACKAGE_NAME)
# Install manpage # Install manpage
cp manpages/* $(DESTDIR)/usr/share/man/man1/ cp manpages/* $(DESTDIR)/usr/share/man/man1/
@ -107,39 +78,15 @@ install-common:
# Install sample config # Install sample config
cp sample-config/* $(DESTDIR)/etc/$(PACKAGE_NAME)/ cp sample-config/* $(DESTDIR)/etc/$(PACKAGE_NAME)/
# Install logrotate config
cp logrotate/${PACKAGE_NAME}.logrotate ${DESTDIR}/etc/logrotate.d/${PACKAGE_NAME}
# Install for systemd
install-systemd:
# Install the common stuff
$(MAKE) install-common
# Set up directories
mkdir -p $(DESTDIR)/lib/systemd/system
# Install systemd unit files # Install systemd unit files
cp systemd/* $(DESTDIR)/lib/systemd/system/ cp systemd/* $(DESTDIR)/lib/systemd/system/
# Install for open-rc # Install logrotate config
install-openrc: cp logrotate/${PACKAGE_NAME}.logrotate ${DESTDIR}/etc/logrotate.d/${PACKAGE_NAME}
# Install the common stuff
$(MAKE) install-common
# Set up directories
mkdir -p $(DESTDIR)/etc/init.d
mkdir -p $(DESTDIR)/etc/conf.d
# Install init script
cp openrc/pgmon.initd $(DESTDIR)/etc/init.d/pgmon
chmod 755 $(DESTDIR)/etc/init.d/pgmon
# Install init script config file
cp openrc/pgmon.confd $(DESTDIR)/etc/conf.d/pgmon
# Run all of the install tests # Run all of the install tests
.PHONY: install-tests debian-%-install-test rockylinux-%-install-test ubuntu-%-install-test gentoo-install-test .PHONY: install-tests debian-%-install-test rockylinux-%-install-test ubuntu-%-install-test
install-tests: $(foreach distro_release, $(SUPPORTED), $(distro_release)-install-test) install-tests: $(foreach distro_release, $(SUPPORTED), $(distro_release)-install-test)
@ -148,33 +95,28 @@ debian-%-install-test:
docker run --rm \ docker run --rm \
-v ./$(BUILD_DIR):/output \ -v ./$(BUILD_DIR):/output \
debian:$* \ debian:$* \
bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(DEB_VERSION)-debian-$*.deb' bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(VERSION)-debian-$*.deb'
# Run a RedHat install test # Run a RedHat install test
rockylinux-%-install-test: rockylinux-%-install-test:
docker run --rm \ docker run --rm \
-v ./$(BUILD_DIR):/output \ -v ./$(BUILD_DIR):/output \
rockylinux:$* \ rockylinux:$* \
bash -c 'dnf makecache && dnf install -y /output/$(PACKAGE_NAME)-$(RPM_VERSION).el$*.noarch.rpm' bash -c 'dnf makecache && dnf install -y /output/$(PACKAGE_NAME)-$(VERSION)-1.el$*.noarch.rpm'
# Run an Ubuntu install test # Run an Ubuntu install test
ubuntu-%-install-test: ubuntu-%-install-test:
docker run --rm \ docker run --rm \
-v ./$(BUILD_DIR):/output \ -v ./$(BUILD_DIR):/output \
ubuntu:$* \ ubuntu:$* \
bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(DEB_VERSION)-ubuntu-$*.deb' bash -c 'apt-get update && apt-get install -y /output/$(PACKAGE_NAME)-$(VERSION)-ubuntu-$*.deb'
# Run an OracleLinux install test (this is for EL7 since CentOS7 images no longer exist) # Run an OracleLinux install test (this is for EL7 since CentOS7 images no longer exist)
oraclelinux-%-install-test: oraclelinux-%-install-test:
docker run --rm \ docker run --rm \
-v ./$(BUILD_DIR):/output \ -v ./$(BUILD_DIR):/output \
oraclelinux:7 \ oraclelinux:7 \
bash -c 'yum makecache && yum install -y /output/$(PACKAGE_NAME)-$(RPM_VERSION).el7.noarch.rpm' bash -c 'yum makecache && yum install -y /output/$(PACKAGE_NAME)-$(VERSION)-1.el7.noarch.rpm'
# Run a Gentoo install test
gentoo-install-test:
# May impliment this in the future, but would require additional headaches to set up a repo
true
## ##
# Container targets # Container targets
@ -209,30 +151,30 @@ package-image-%:
# Debian package creation # Debian package creation
actually-package-debian-%: actually-package-debian-%:
$(MAKE) install-systemd DESTDIR=/output/debian-$* $(MAKE) install DESTDIR=/output/debian-$*
cp -r --preserve=mode DEBIAN /output/debian-$*/ cp -r --preserve=mode DEBIAN /output/debian-$*/
dpkg-deb -Zgzip --build /output/debian-$* "/output/$(PACKAGE_NAME)-$(DEB_VERSION)-debian-$*.deb" dpkg-deb -Zgzip --build /output/debian-$* "/output/$(PACKAGE_NAME)-$(VERSION)-debian-$*.deb"
# RedHat package creation # RedHat package creation
actually-package-rockylinux-%: actually-package-rockylinux-%:
mkdir -p /output/rockylinux-$*/{BUILD,RPMS,SOURCES,SPECS,SRPMS} mkdir -p /output/rockylinux-$*/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
sed -e "s/@@VERSION@@/$(VERSION)/g" -e "s/@@RELEASE@@/$(RPM_RELEASE)/g" RPM/$(PACKAGE_NAME).spec > /output/rockylinux-$*/SPECS/$(PACKAGE_NAME).spec cp RPM/$(PACKAGE_NAME).spec /output/rockylinux-$*/SPECS/
rpmbuild --define '_topdir /output/rockylinux-$*' \ rpmbuild --define '_topdir /output/rockylinux-$*' \
--define 'version $(RPM_VERSION)' \ --define 'version $(VERSION)' \
-bb /output/rockylinux-$*/SPECS/$(PACKAGE_NAME).spec -bb /output/rockylinux-$*/SPECS/$(PACKAGE_NAME).spec
cp /output/rockylinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(RPM_VERSION).el$*.noarch.rpm /output/ cp /output/rockylinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(VERSION)-1.el$*.noarch.rpm /output/
# Ubuntu package creation # Ubuntu package creation
actually-package-ubuntu-%: actually-package-ubuntu-%:
$(MAKE) install-systemd DESTDIR=/output/ubuntu-$* $(MAKE) install DESTDIR=/output/ubuntu-$*
cp -r --preserve=mode DEBIAN /output/ubuntu-$*/ cp -r --preserve=mode DEBIAN /output/ubuntu-$*/
dpkg-deb -Zgzip --build /output/ubuntu-$* "/output/$(PACKAGE_NAME)-$(DEB_VERSION)-ubuntu-$*.deb" dpkg-deb -Zgzip --build /output/ubuntu-$* "/output/$(PACKAGE_NAME)-$(VERSION)-ubuntu-$*.deb"
# OracleLinux package creation # OracleLinux package creation
actually-package-oraclelinux-%: actually-package-oraclelinux-%:
mkdir -p /output/oraclelinux-$*/{BUILD,RPMS,SOURCES,SPECS,SRPMS} mkdir -p /output/oraclelinux-$*/{BUILD,RPMS,SOURCES,SPECS,SRPMS}
sed -e "s/@@VERSION@@/$(VERSION)/g" -e "s/@@RELEASE@@/$(RPM_RELEASE)/g" RPM/$(PACKAGE_NAME)-el7.spec > /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec cp RPM/$(PACKAGE_NAME)-el7.spec /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec
rpmbuild --define '_topdir /output/oraclelinux-$*' \ rpmbuild --define '_topdir /output/oraclelinux-$*' \
--define 'version $(RPM_VERSION)' \ --define 'version $(VERSION)' \
-bb /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec -bb /output/oraclelinux-$*/SPECS/$(PACKAGE_NAME).spec
cp /output/oraclelinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(RPM_VERSION).el$*.noarch.rpm /output/ cp /output/oraclelinux-$*/RPMS/noarch/$(PACKAGE_NAME)-$(VERSION)-1.el$*.noarch.rpm /output/

View File

@ -1,13 +1,13 @@
Name: pgmon Name: pgmon
Version: @@VERSION@@ Version: 1.0
Release: @@RELEASE@@%{?dist} Release: 1%{?dist}
Summary: A bridge to sit between monitoring tools and PostgreSQL Summary: A bridge to sit between monitoring tools and PostgreSQL
License: MIT License: MIT
URL: https://www.commandprompt.com URL: https://www.commandprompt.com
BuildArch: noarch BuildArch: noarch
Requires: logrotate, python, python-psycopg2, PyYAML, python-requests, systemd Requires: logrotate, python, python-psycopg2, PyYAML, systemd
%description %description
A bridge to sit between monitoring tools and PostgreSQL A bridge to sit between monitoring tools and PostgreSQL
@ -19,7 +19,7 @@ A bridge to sit between monitoring tools and PostgreSQL
# Do nothing since we have nothing to build # Do nothing since we have nothing to build
%install %install
make -C /src install-systemd DESTDIR=%{buildroot} make -C /src install DESTDIR=%{buildroot}
%files %files
/etc/logrotate.d/pgmon /etc/logrotate.d/pgmon
@ -28,7 +28,7 @@ make -C /src install-systemd DESTDIR=%{buildroot}
/etc/pgmon/pgmon-service.conf /etc/pgmon/pgmon-service.conf
/lib/systemd/system/pgmon.service /lib/systemd/system/pgmon.service
/lib/systemd/system/pgmon@.service /lib/systemd/system/pgmon@.service
/usr/bin/pgmon /usr/local/bin/pgmon
/usr/share/man/man1/pgmon.1.gz /usr/share/man/man1/pgmon.1.gz
%post %post

View File

@ -1,13 +1,13 @@
Name: pgmon Name: pgmon
Version: @@VERSION@@ Version: 1.0
Release: @@RELEASE@@%{?dist} Release: 1%{?dist}
Summary: A bridge to sit between monitoring tools and PostgreSQL Summary: A bridge to sit between monitoring tools and PostgreSQL
License: MIT License: MIT
URL: https://www.commandprompt.com URL: https://www.commandprompt.com
BuildArch: noarch BuildArch: noarch
Requires: logrotate, python3, python3-psycopg2, python3-pyyaml, python3-requests, systemd Requires: logrotate, python3, python3-psycopg2, python3-pyyaml, systemd
%description %description
A bridge to sit between monitoring tools and PostgreSQL A bridge to sit between monitoring tools and PostgreSQL
@ -19,7 +19,7 @@ A bridge to sit between monitoring tools and PostgreSQL
# Do nothing since we have nothing to build # Do nothing since we have nothing to build
%install %install
make -C /src install-systemd DESTDIR=%{buildroot} make -C /src install DESTDIR=%{buildroot}
%files %files
/etc/logrotate.d/pgmon /etc/logrotate.d/pgmon
@ -28,7 +28,7 @@ make -C /src install-systemd DESTDIR=%{buildroot}
/etc/pgmon/pgmon-service.conf /etc/pgmon/pgmon-service.conf
/lib/systemd/system/pgmon.service /lib/systemd/system/pgmon.service
/lib/systemd/system/pgmon@.service /lib/systemd/system/pgmon@.service
/usr/bin/pgmon /usr/local/bin/pgmon
/usr/share/man/man1/pgmon.1.gz /usr/share/man/man1/pgmon.1.gz
%post %post

View File

@ -11,14 +11,7 @@ PGMON_USER="${PGMON_USER:-postgres}"
PGMON_GROUP="${PGMON_GROUP:-$PGMON_USER}" PGMON_GROUP="${PGMON_GROUP:-$PGMON_USER}"
CONFIG_FILE="/etc/pgmon/${agent_name}.yml" CONFIG_FILE="/etc/pgmon/${agent_name}.yml"
output_log=/var/log/pgmon/${SVCNAME}.log
error_log=/var/log/pgmon/${SVCNAME}.err
start_pre() {
checkpath -f -m 0644 -o "${PGMON_USER}:${PGMON_GROUP}" "${output_log}" "${error_log}"
}
command="/usr/bin/pgmon" command="/usr/bin/pgmon"
command_args="-c '$CONFIG_FILE'" command_args="'$CONFIG_FILE'"
command_background="true" command_background="true"
command_user="${PGMON_USER}:${PGMON_GROUP}" command_user="${PGMON_USER}:${PGMON_GROUP}"

View File

@ -1,4 +0,0 @@
-r requirements.txt
testcontainers[postgresql]
pytest
black

View File

@ -1,307 +1,45 @@
metrics: metrics:
##
# Discovery metrics # Discovery metrics
##
discover_dbs: discover_dbs:
type: set type: set
query: query:
0: > 0: SELECT datname AS dbname FROM pg_database
SELECT datname AS dbname
FROM pg_database
# Note: If the user lacks sufficient privileges, these fields will be NULL.
# The WHERE clause is intended to prevent Zabbix from discovering a
# connection it cannot monitor. Ideally this would generate an error
# instead.
discover_rep: discover_rep:
type: set type: set
query: query:
0: > 0: SELECT client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') AS repid, client_addr, state FROM pg_stat_replication
SELECT host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') AS repid,
client_addr,
state
FROM pg_stat_replication
WHERE state IS NOT NULL
discover_slots:
type: set
query:
90400: >
SELECT slot_name,
plugin,
slot_type,
database,
false as temporary,
active
FROM pg_replication_slots
100000: >
SELECT slot_name,
plugin,
slot_type,
database,
temporary,
active
FROM pg_replication_slots
##
# cluster-wide metrics # cluster-wide metrics
##
version: version:
type: value type: value
query: query:
0: SHOW server_version_num 0: SHOW server_version_num
max_frozen_age: max_frozen_age:
type: row type: value
query: query:
0: > 0: SELECT max(age(datfrozenxid)) FROM pg_database
SELECT max(age(datfrozenxid)) AS xid_age,
NULL AS mxid_age
FROM pg_database
90600: >
SELECT max(age(datfrozenxid)) AS xid_age,
max(mxid_age(datminmxid)) AS mxid_age
FROM pg_database
bgwriter:
type: row
query:
0: >
SELECT checkpoints_timed,
checkpoints_req,
checkpoint_write_time,
checkpoint_sync_time,
buffers_checkpoint,
buffers_clean,
maxwritten_clean,
buffers_backend,
buffers_backend_fsync,
buffers_alloc
FROM pg_stat_bgwriter
170000: >
SELECT cp.num_timed AS checkpoints_timed,
cp.num_requested AS checkpoints_req,
cp.write_time AS checkpoint_write_time,
cp.sync_time AS checkpoint_sync_time,
cp.buffers_written AS buffers_checkpoint,
bg.buffers_clean AS buffers_clean,
bg.maxwritten_clean AS maxwritten_clean,
NULL AS buffers_backend,
NULL AS buffers_backend_fsync,
bg.buffers_alloc AS buffers_alloc
FROM pg_stat_bgwriter bg
CROSS JOIN pg_stat_checkpointer cp
io_per_backend:
type: set
query:
160000: >
SELECT backend_type,
COALESCE(SUM(reads * op_bytes), 0) AS reads,
COALESCE(SUM(read_time), 0) AS read_time,
COALESCE(SUM(writes * op_bytes), 0) AS writes,
COALESCE(SUM(write_time), 0) AS write_time,
COALESCE(SUM(writebacks * op_bytes), 0) AS writebacks,
COALESCE(SUM(writeback_time), 0) AS writeback_time,
COALESCE(SUM(extends * op_bytes), 0) AS extends,
COALESCE(SUM(extend_time), 0) AS extend_time,
COALESCE(SUM(op_bytes), 0) AS op_bytes,
COALESCE(SUM(hits), 0) AS hits,
COALESCE(SUM(evictions), 0) AS evictions,
COALESCE(SUM(reuses), 0) AS reuses,
COALESCE(SUM(fsyncs), 0) AS fsyncs,
COALESCE(SUM(fsync_time), 0) AS fsync_time
FROM pg_stat_io
GROUP BY backend_type
##
# Per-database metrics # Per-database metrics
##
db_stats: db_stats:
type: row type: row
query: query:
0: > 0: SELECT numbackends, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, conflicts, temp_files, temp_bytes, deadlocks, blk_read_time, blk_write_time, extract('epoch' from stats_reset)::float FROM pg_stat_database WHERE datname = %(dbname)s
SELECT numbackends, 140000: SELECT numbackends, xact_commit, xact_rollback, blks_read, blks_hit, tup_returned, tup_fetched, tup_inserted, tup_updated, tup_deleted, conflicts, temp_files, temp_bytes, deadlocks, COALESCE(checksum_failures, 0) AS checksum_failures, blk_read_time, blk_write_time, session_time, active_time, idle_in_transaction_time, sessions, sessions_abandoned, sessions_fatal, sessions_killed, extract('epoch' from stats_reset)::float FROM pg_stat_database WHERE datname = %(dbname)s
xact_commit,
xact_rollback,
blks_read,
blks_hit,
tup_returned,
tup_fetched,
tup_inserted,
tup_updated,
tup_deleted,
conflicts,
temp_files,
temp_bytes,
deadlocks,
NULL AS checksum_failures,
blk_read_time,
blk_write_time,
NULL AS session_time,
NULL AS active_time,
NULL AS idle_in_transaction_time,
NULL AS sessions,
NULL AS sessions_abandoned,
NULL AS sessions_fatal,
NULL AS sessions_killed,
extract('epoch' from stats_reset) AS stats_reset
FROM pg_stat_database WHERE datname = %(dbname)s
140000: >
SELECT numbackends,
xact_commit,
xact_rollback,
blks_read,
blks_hit,
tup_returned,
tup_fetched,
tup_inserted,
tup_updated,
tup_deleted,
conflicts,
temp_files,
temp_bytes,
deadlocks,
COALESCE(checksum_failures, 0) AS checksum_failures,
blk_read_time,
blk_write_time,
session_time,
active_time,
idle_in_transaction_time,
sessions,
sessions_abandoned,
sessions_fatal,
sessions_killed,
extract('epoch' from stats_reset) AS stats_reset
FROM pg_stat_database WHERE datname = %(dbname)s
test_args:
dbname: postgres
hit_ratios:
type: row
query:
0: >
SELECT sum(heap_blks_read)::float / NULLIF(sum(heap_blks_read + heap_blks_hit), 0) AS avg_heap_hit_ratio,
sum(idx_blks_hit)::float / NULLIF(sum(idx_blks_read + idx_blks_hit), 0) AS avg_idx_hit_ratio,
sum(toast_blks_hit)::float / NULLIF(sum(toast_blks_read + toast_blks_hit), 0) AS avg_toast_hit_ratio,
sum(tidx_blks_hit)::float / NULLIF(sum(tidx_blks_read + tidx_blks_hit), 0) AS avg_tidx_hit_ratio
FROM pg_statio_all_tables
test_args:
dbname: postgres
activity:
type: set
query:
0: >
SELECT state,
count(*) AS backend_count,
COALESCE(EXTRACT(EPOCH FROM max(now() - state_change)), 0) AS max_state_time
FROM pg_stat_activity
WHERE datname = %(dbname)s
GROUP BY state
test_args:
dbname: postgres
sequence_usage:
type: value
query:
# 9.2 lacks lateral joins, the pg_sequence_last_value function, and the pg_sequences view
# 0: >
# SELECT COALESCE(MAX(pg_sequence_last_value(c.oid)::float / (pg_sequence_parameters(oid)).maximum_value), 0) AS max_usage
# FROM pg_class c
# WHERE c.relkind = 'S'
# 9.3 - 9.6 lacks the pg_sequence_last_value function, and pg_sequences view
# 90300: >
# SELECT COALESCE(MAX(pg_sequence_last_value(c.oid)::float / s.maximum_value), 0) AS max_usage
# FROM pg_class c
# CROSS JOIN LATERAL pg_sequence_parameters(c.oid) AS s
# WHERE c.relkind = 'S'
100000: SELECT COALESCE(MAX(last_value::float / max_value), 0) AS max_usage FROM pg_sequences;
test_args:
dbname: postgres
sequence_visibility:
type: row
query:
100000: >
SELECT COUNT(*) FILTER (WHERE has_sequence_privilege(c.oid, 'SELECT,USAGE')) AS visible_sequences,
COUNT(*) AS total_sequences
FROM pg_class AS c
WHERE relkind = 'S';
##
# Per-replication metrics
##
rep_stats:
type: row
query:
90400: >
SELECT pid, usename,
EXTRACT(EPOCH FROM backend_start) AS backend_start,
state,
pg_xlog_location_diff(pg_current_xlog_location(), sent_location) AS sent_lsn,
pg_xlog_location_diff(pg_current_xlog_location(), write_location) AS write_lsn,
pg_xlog_location_diff(pg_current_xlog_location(), flush_location) AS flush_lsn,
pg_xlog_location_diff(pg_current_xlog_location(), replay_location) AS replay_lsn,
NULL AS write_lag,
NULL AS flush_lag,
NULL AS replay_lag,
sync_state
FROM pg_stat_replication
WHERE host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s
100000: >
SELECT pid, usename,
EXTRACT(EPOCH FROM backend_start) AS backend_start,
state,
pg_wal_lsn_diff(pg_current_wal_lsn(), sent_lsn) AS sent_lsn,
pg_wal_lsn_diff(pg_current_wal_lsn(), write_lsn) AS write_lsn,
pg_wal_lsn_diff(pg_current_wal_lsn(), flush_lsn) AS flush_lsn,
pg_wal_lsn_diff(pg_current_wal_lsn(), replay_lsn) AS replay_lsn,
COALESCE(EXTRACT(EPOCH FROM write_lag), 0) AS write_lag,
COALESCE(EXTRACT(EPOCH FROM flush_lag), 0) AS flush_lag,
COALESCE(EXTRACT(EPOCH FROM replay_lag), 0) AS replay_lag,
sync_state
FROM pg_stat_replication
WHERE host(client_addr) || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = %(repid)s
test_args:
repid: 127.0.0.1_test_rep
##
# Per-slot metrics
##
slot_stats:
type: row
query:
90400: >
SELECT NULL as active_pid,
xmin,
pg_xlog_location_diff(pg_current_xlog_location(), restart_lsn) AS restart_bytes,
NULL AS confirmed_flush_bytes
FROM pg_replication_slots WHERE slot_name = %(slot)s
90600: >
SELECT active_pid,
xmin,
pg_xlog_location_diff(pg_current_xlog_location(), restart_lsn) AS restart_bytes,
pg_xlog_location_diff(pg_current_xlog_location(), confirmed_flush_lsn) AS confirmed_flush_bytes
FROM pg_replication_slots WHERE slot_name = %(slot)s
100000: >
SELECT active_pid,
xmin,
pg_wal_lsn_diff(pg_current_wal_lsn(), restart_lsn) AS restart_bytes,
pg_wal_lsn_diff(pg_current_wal_lsn(), confirmed_flush_lsn) AS confirmed_flush_bytes
FROM pg_replication_slots WHERE slot_name = %(slot)s
test_args:
slot: test_slot
##
# Debugging # Debugging
##
ntables: ntables:
type: value type: value
query: query:
0: SELECT count(*) AS ntables FROM pg_stat_user_tables 0: SELECT count(*) AS ntables FROM pg_stat_user_tables
# Per-replication metrics
rep_stats:
type: row
query:
0: SELECT * FROM pg_stat_database WHERE client_addr || '_' || regexp_replace(application_name, '[ ,]', '_', 'g') = '{repid}'
# Debugging
sleep:
type: value
query:
0: SELECT now(), pg_sleep(5);

View File

@ -1,9 +1,3 @@
# The address the agent binds to
#address: 127.0.0.1
# The port the agent listens on for requests
#port: 5400
# Min PostgreSQL connection pool size (per database) # Min PostgreSQL connection pool size (per database)
#min_pool_size: 0 #min_pool_size: 0
@ -29,9 +23,6 @@
# Default database to connect to when none is specified for a metric # Default database to connect to when none is specified for a metric
#dbname: 'postgres' #dbname: 'postgres'
# SSL connection mode
#ssl_mode: require
# Timeout for getting a connection slot from a pool # Timeout for getting a connection slot from a pool
#pool_slot_timeout: 5 #pool_slot_timeout: 5

View File

@ -4,7 +4,6 @@ import yaml
import json import json
import time import time
import os import os
import sys
import argparse import argparse
import logging import logging
@ -12,7 +11,7 @@ import logging
from datetime import datetime, timedelta from datetime import datetime, timedelta
import psycopg2 import psycopg2
from psycopg2.extras import RealDictCursor from psycopg2.extras import DictCursor
from psycopg2.pool import ThreadedConnectionPool from psycopg2.pool import ThreadedConnectionPool
from contextlib import contextmanager from contextlib import contextmanager
@ -24,12 +23,7 @@ from http.server import BaseHTTPRequestHandler, HTTPServer
from http.server import ThreadingHTTPServer from http.server import ThreadingHTTPServer
from urllib.parse import urlparse, parse_qs from urllib.parse import urlparse, parse_qs
import requests VERSION = '0.1.0'
import re
from decimal import Decimal
VERSION = "1.0.4"
# Configuration # Configuration
config = {} config = {}
@ -48,12 +42,6 @@ cluster_version = None
cluster_version_next_check = None cluster_version_next_check = None
cluster_version_lock = Lock() cluster_version_lock = Lock()
# PostgreSQL latest version information
latest_version = None
latest_version_next_check = None
latest_version_lock = Lock()
release_supported = None
# Running state (used to gracefully shut down) # Running state (used to gracefully shut down)
running = True running = True
@ -65,78 +53,63 @@ config_file = None
# Configure logging # Configure logging
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
formatter = logging.Formatter( formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(filename)s: %(funcName)s() line %(lineno)d: %(message)s')
"%(asctime)s - %(levelname)s - %(filename)s: %(funcName)s() line %(lineno)d: %(message)s"
)
console_log_handler = logging.StreamHandler() console_log_handler = logging.StreamHandler()
console_log_handler.setFormatter(formatter) console_log_handler.setFormatter(formatter)
log.addHandler(console_log_handler) log.addHandler(console_log_handler)
# Error types # Error types
class ConfigError(Exception): class ConfigError(Exception):
pass pass
class DisconnectedError(Exception): class DisconnectedError(Exception):
pass pass
class UnhappyDBError(Exception): class UnhappyDBError(Exception):
pass pass
class UnknownMetricError(Exception):
pass
class MetricVersionError(Exception): class MetricVersionError(Exception):
pass pass
class LatestVersionCheckError(Exception):
pass
# Default config settings # Default config settings
default_config = { default_config = {
# The address the agent binds to
"address": "127.0.0.1",
# The port the agent listens on for requests
"port": 5400,
# Min PostgreSQL connection pool size (per database) # Min PostgreSQL connection pool size (per database)
"min_pool_size": 0, 'min_pool_size': 0,
# Max PostgreSQL connection pool size (per database) # Max PostgreSQL connection pool size (per database)
"max_pool_size": 4, 'max_pool_size': 4,
# How long a connection can sit idle in the pool before it's removed (seconds) # How long a connection can sit idle in the pool before it's removed (seconds)
"max_idle_time": 30, 'max_idle_time': 30,
# Log level for stderr logging # Log level for stderr logging
"log_level": "error", 'log_level': 'error',
# Database user to connect as # Database user to connect as
"dbuser": "postgres", 'dbuser': 'postgres',
# Database host # Database host
"dbhost": "/var/run/postgresql", 'dbhost': '/var/run/postgresql',
# Database port # Database port
"dbport": 5432, 'dbport': 5432,
# Default database to connect to when none is specified for a metric # Default database to connect to when none is specified for a metric
"dbname": "postgres", 'dbname': 'postgres',
# SSL connection mode
"ssl_mode": "require",
# Timeout for getting a connection slot from a pool # Timeout for getting a connection slot from a pool
"pool_slot_timeout": 5, 'pool_slot_timeout': 5,
# PostgreSQL connection timeout (seconds) # PostgreSQL connection timeout (seconds)
# Note: It can actually be double this because of retries # Note: It can actually be double this because of retries
"connect_timeout": 5, 'connect_timeout': 5,
# Time to wait before trying to reconnect again after a reconnect failure (seconds)
"reconnect_cooldown": 30,
# How often to check the version of PostgreSQL (seconds)
"version_check_period": 300,
# How often to check the latest supported version of PostgreSQL (seconds)
"latest_version_check_period": 86400,
# Metrics
"metrics": {},
}
# Time to wait before trying to reconnect again after a reconnect failure (seconds)
'reconnect_cooldown': 30,
# How often to check the version of PostgreSQL (seconds)
'version_check_period': 300,
# Metrics
'metrics': {}
}
def update_deep(d1, d2): def update_deep(d1, d2):
""" """
@ -151,33 +124,24 @@ def update_deep(d1, d2):
The new d1 The new d1
""" """
if not isinstance(d1, dict) or not isinstance(d2, dict): if not isinstance(d1, dict) or not isinstance(d2, dict):
raise TypeError("Both arguments to update_deep need to be dictionaries") raise TypeError('Both arguments to update_deep need to be dictionaries')
for k, v2 in d2.items(): for k, v2 in d2.items():
if isinstance(v2, dict): if isinstance(v2, dict):
v1 = d1.get(k, {}) v1 = d1.get(k, {})
if not isinstance(v1, dict): if not isinstance(v1, dict):
raise TypeError( raise TypeError('Type mismatch between dictionaries: {} is not a dict'.format(type(v1).__name__))
"Type mismatch between dictionaries: {} is not a dict".format(
type(v1).__name__
)
)
d1[k] = update_deep(v1, v2) d1[k] = update_deep(v1, v2)
elif isinstance(v2, list): elif isinstance(v2, list):
v1 = d1.get(k, []) v1 = d1.get(k, [])
if not isinstance(v1, list): if not isinstance(v1, list):
raise TypeError( raise TypeError('Type mismatch between dictionaries: {} is not a list'.format(type(v1).__name__))
"Type mismatch between dictionaries: {} is not a list".format(
type(v1).__name__
)
)
d1[k] = v1 + v2 d1[k] = v1 + v2
else: else:
d1[k] = v2 d1[k] = v2
return d1 return d1
def read_config(path, included = False):
def read_config(path, included=False):
""" """
Read a config file. Read a config file.
@ -187,7 +151,7 @@ def read_config(path, included=False):
""" """
# Read config file # Read config file
log.info("Reading log file: {}".format(path)) log.info("Reading log file: {}".format(path))
with open(path, "r") as f: with open(path, 'r') as f:
try: try:
cfg = yaml.safe_load(f) cfg = yaml.safe_load(f)
except yaml.parser.ParserError as e: except yaml.parser.ParserError as e:
@ -197,52 +161,41 @@ def read_config(path, included=False):
config_base = os.path.dirname(path) config_base = os.path.dirname(path)
# Read any external queries and validate metric definitions # Read any external queries and validate metric definitions
for name, metric in cfg.get("metrics", {}).items(): for name, metric in cfg.get('metrics', {}).items():
# Validate return types # Validate return types
try: try:
if metric["type"] not in ["value", "row", "column", "set"]: if metric['type'] not in ['value', 'row', 'column', 'set']:
raise ConfigError( raise ConfigError("Invalid return type: {} for metric {} in {}".format(metric['type'], name, path))
"Invalid return type: {} for metric {} in {}".format(
metric["type"], name, path
)
)
except KeyError: except KeyError:
raise ConfigError( raise ConfigError("No type specified for metric {} in {}".format(name, path))
"No type specified for metric {} in {}".format(name, path)
)
# Ensure queries exist # Ensure queries exist
query_dict = metric.get("query", {}) query_dict = metric.get('query', {})
if type(query_dict) is not dict: if type(query_dict) is not dict:
raise ConfigError( raise ConfigError("Query definition should be a dictionary, got: {} for metric {} in {}".format(query_dict, name, path))
"Query definition should be a dictionary, got: {} for metric {} in {}".format(
query_dict, name, path
)
)
if len(query_dict) == 0: if len(query_dict) == 0:
raise ConfigError("Missing queries for metric {} in {}".format(name, path)) raise ConfigError("Missing queries for metric {} in {}".format(name, path))
# Read external sql files and validate version keys # Read external sql files and validate version keys
for vers, query in metric["query"].items(): for vers, query in metric['query'].items():
try: try:
int(vers) int(vers)
except: except:
raise ConfigError( raise ConfigError("Invalid version: {} for metric {} in {}".format(vers, name, path))
"Invalid version: {} for metric {} in {}".format(vers, name, path)
)
if query.startswith("file:"): if query.startswith('file:'):
query_path = query[5:] query_path = query[5:]
if not query_path.startswith("/"): if not query_path.startswith('/'):
query_path = os.path.join(config_base, query_path) query_path = os.path.join(config_base, query_path)
with open(query_path, "r") as f: with open(query_path, 'r') as f:
metric["query"][vers] = f.read() metric['query'][vers] = f.read()
# Read any included config files # Read any included config files
for inc in cfg.get("include", []): for inc in cfg.get('include', []):
# Prefix relative paths with the directory from the current config # Prefix relative paths with the directory from the current config
if not inc.startswith("/"): if not inc.startswith('/'):
inc = os.path.join(config_base, inc) inc = os.path.join(config_base, inc)
update_deep(cfg, read_config(inc, included=True)) update_deep(cfg, read_config(inc, included=True))
@ -256,26 +209,19 @@ def read_config(path, included=False):
update_deep(new_config, cfg) update_deep(new_config, cfg)
# Minor sanity checks # Minor sanity checks
if len(new_config["metrics"]) == 0: if len(new_config['metrics']) == 0:
log.error("No metrics are defined") log.error("No metrics are defined")
raise ConfigError("No metrics defined") raise ConfigError("No metrics defined")
# Validate the new log level before changing the config # Validate the new log level before changing the config
if new_config["log_level"].upper() not in [ if new_config['log_level'].upper() not in ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']:
"DEBUG", raise ConfigError("Invalid log level: {}".format(new_config['log_level']))
"INFO",
"WARNING",
"ERROR",
"CRITICAL",
]:
raise ConfigError("Invalid log level: {}".format(new_config["log_level"]))
global config global config
config = new_config config = new_config
# Apply changes to log level # Apply changes to log level
log.setLevel(logging.getLevelName(config["log_level"].upper())) log.setLevel(logging.getLevelName(config['log_level'].upper()))
def signal_handler(sig, frame): def signal_handler(sig, frame):
""" """
@ -287,7 +233,7 @@ def signal_handler(sig, frame):
signal.signal(signal.SIGINT, signal.default_int_handler) signal.signal(signal.SIGINT, signal.default_int_handler)
# Signal everything to shut down # Signal everything to shut down
if sig in [signal.SIGINT, signal.SIGTERM, signal.SIGQUIT]: if sig in [ signal.SIGINT, signal.SIGTERM, signal.SIGQUIT ]:
log.info("Shutting down ...") log.info("Shutting down ...")
global running global running
running = False running = False
@ -299,11 +245,10 @@ def signal_handler(sig, frame):
log.warning("Received config reload signal") log.warning("Received config reload signal")
read_config(config_file) read_config(config_file)
class ConnectionPool(ThreadedConnectionPool): class ConnectionPool(ThreadedConnectionPool):
def __init__(self, dbname, minconn, maxconn, *args, **kwargs): def __init__(self, dbname, minconn, maxconn, *args, **kwargs):
# Make sure dbname isn't different in the kwargs # Make sure dbname isn't different in the kwargs
kwargs["dbname"] = dbname kwargs['dbname'] = dbname
super().__init__(minconn, maxconn, *args, **kwargs) super().__init__(minconn, maxconn, *args, **kwargs)
self.name = dbname self.name = dbname
@ -325,10 +270,7 @@ class ConnectionPool(ThreadedConnectionPool):
except psycopg2.pool.PoolError: except psycopg2.pool.PoolError:
# If we failed to get the connection slot, wait a bit and try again # If we failed to get the connection slot, wait a bit and try again
time.sleep(0.1) time.sleep(0.1)
raise TimeoutError( raise TimeoutError("Timed out waiting for an available connection to {}".format(self.name))
"Timed out waiting for an available connection to {}".format(self.name)
)
def get_pool(dbname): def get_pool(dbname):
""" """
@ -346,32 +288,26 @@ def get_pool(dbname):
# lock # lock
if dbname not in connections: if dbname not in connections:
log.info("Creating connection pool for: {}".format(dbname)) log.info("Creating connection pool for: {}".format(dbname))
# Actually create the connection pool
connections[dbname] = ConnectionPool( connections[dbname] = ConnectionPool(
dbname, dbname,
int(config["min_pool_size"]), int(config['min_pool_size']),
int(config["max_pool_size"]), int(config['max_pool_size']),
application_name="pgmon", application_name='pgmon',
host=config["dbhost"], host=config['dbhost'],
port=config["dbport"], port=config['dbport'],
user=config["dbuser"], user=config['dbuser'],
connect_timeout=int(config["connect_timeout"]), connect_timeout=float(config['connect_timeout']),
sslmode=config["ssl_mode"], sslmode='require')
)
# Clear the unhappy indicator if present # Clear the unhappy indicator if present
unhappy_cooldown.pop(dbname, None) unhappy_cooldown.pop(dbname, None)
return connections[dbname] return connections[dbname]
def handle_connect_failure(pool): def handle_connect_failure(pool):
""" """
Mark the database as being unhappy so we can leave it alone for a while Mark the database as being unhappy so we can leave it alone for a while
""" """
dbname = pool.name dbname = pool.name
unhappy_cooldown[dbname] = datetime.now() + timedelta( unhappy_cooldown[dbname] = datetime.now() + timedelta(seconds=int(config['reconnect_cooldown']))
seconds=int(config["reconnect_cooldown"])
)
def get_query(metric, version): def get_query(metric, version):
""" """
@ -382,61 +318,42 @@ def get_query(metric, version):
version: The PostgreSQL version number, as given by server_version_num version: The PostgreSQL version number, as given by server_version_num
""" """
# Select the correct query # Select the correct query
for v in reversed(sorted(metric["query"].keys())): for v in reversed(sorted(metric['query'].keys())):
if version >= v: if version >= v:
if len(metric["query"][v].strip()) == 0: if len(metric['query'][v].strip()) == 0:
raise MetricVersionError( raise MetricVersionError("Metric no longer applies to PostgreSQL {}".format(version))
"Metric no longer applies to PostgreSQL {}".format(version) return metric['query'][v]
)
return metric["query"][v]
raise MetricVersionError("Missing metric query for PostgreSQL {}".format(version)) raise MetricVersionError('Missing metric query for PostgreSQL {}'.format(version))
def json_encode_special(obj):
"""
Encoder function to handle types the standard JSON package doesn't know what
to do with
"""
if isinstance(obj, Decimal):
return float(obj)
raise TypeError(f'Cannot serialize object of {type(obj)}')
def run_query_no_retry(pool, return_type, query, args): def run_query_no_retry(pool, return_type, query, args):
""" """
Run the query with no explicit retry code Run the query with no explicit retry code
""" """
with pool.connection(float(config["connect_timeout"])) as conn: with pool.connection(float(config['connect_timeout'])) as conn:
try: try:
with conn.cursor(cursor_factory=RealDictCursor) as curs: with conn.cursor(cursor_factory=DictCursor) as curs:
curs.execute(query, args) curs.execute(query, args)
res = curs.fetchall() res = curs.fetchall()
if return_type == "value": if return_type == 'value':
if len(res) == 0:
return ""
return str(list(res[0].values())[0]) return str(list(res[0].values())[0])
elif return_type == "row": elif return_type == 'row':
if len(res) == 0: return json.dumps(res[0])
return "[]" elif return_type == 'column':
return json.dumps(res[0], default=json_encode_special) return json.dumps([list(r.values())[0] for r in res])
elif return_type == "column": elif return_type == 'set':
if len(res) == 0: return json.dumps(res)
return "[]"
return json.dumps([list(r.values())[0] for r in res], default=json_encode_special)
elif return_type == "set":
return json.dumps(res, default=json_encode_special)
except: except:
dbname = pool.name dbname = pool.name
if dbname in unhappy_cooldown: if dbname in unhappy_cooldown:
raise UnhappyDBError() raise UnhappyDBError()
elif conn.closed != 0: elif conn.broken:
raise DisconnectedError() raise DisconnectedError()
else: else:
raise raise
def run_query(pool, return_type, query, args): def run_query(pool, return_type, query, args):
""" """
Run the query, and if we find upon the first attempt that the connection Run the query, and if we find upon the first attempt that the connection
@ -467,7 +384,6 @@ def run_query(pool, return_type, query, args):
handle_connect_failure(pool) handle_connect_failure(pool)
raise UnhappyDBError() raise UnhappyDBError()
def get_cluster_version(): def get_cluster_version():
""" """
Get the PostgreSQL version if we don't already know it, or if it's been Get the PostgreSQL version if we don't already know it, or if it's been
@ -479,228 +395,26 @@ def get_cluster_version():
# If we don't know the version or it's past the recheck time, get the # If we don't know the version or it's past the recheck time, get the
# version from the database. Only one thread needs to do this, so they all # version from the database. Only one thread needs to do this, so they all
# try to grab the lock, and then make sure nobody else beat them to it. # try to grab the lock, and then make sure nobody else beat them to it.
if ( if cluster_version is None or cluster_version_next_check is None or cluster_version_next_check < datetime.now():
cluster_version is None
or cluster_version_next_check is None
or cluster_version_next_check < datetime.now()
):
with cluster_version_lock: with cluster_version_lock:
# Only check if nobody already got the version before us # Only check if nobody already got the version before us
if ( if cluster_version is None or cluster_version_next_check is None or cluster_version_next_check < datetime.now():
cluster_version is None log.info('Checking PostgreSQL cluster version')
or cluster_version_next_check is None pool = get_pool(config['dbname'])
or cluster_version_next_check < datetime.now() cluster_version = int(run_query(pool, 'value', 'SHOW server_version_num', None))
): cluster_version_next_check = datetime.now() + timedelta(seconds=int(config['version_check_period']))
log.info("Checking PostgreSQL cluster version")
pool = get_pool(config["dbname"])
cluster_version = int(
run_query(pool, "value", "SHOW server_version_num", None)
)
cluster_version_next_check = datetime.now() + timedelta(
seconds=int(config["version_check_period"])
)
log.info("Got PostgreSQL cluster version: {}".format(cluster_version)) log.info("Got PostgreSQL cluster version: {}".format(cluster_version))
log.debug( log.debug("Next PostgreSQL cluster version check will be after: {}".format(cluster_version_next_check))
"Next PostgreSQL cluster version check will be after: {}".format(
cluster_version_next_check
)
)
return cluster_version return cluster_version
def version_num_to_release(version_num):
"""
Extract the revease from a version_num.
In other words, this converts things like:
90603 => 9.6
130010 => 13
"""
if version_num // 10000 < 10:
return version_num // 10000 + (version_num % 10000 // 100 / 10)
else:
return version_num // 10000
def parse_version_rss(raw_rss, release):
"""
Parse the raw RSS from the versions.rss feed to extract the latest version of
PostgreSQL that's availabe for the cluster being monitored.
This sets these global variables:
latest_version
release_supported
It is expected that the caller already holds the latest_version_lock lock.
params:
raw_rss: The raw rss text from versions.rss
release: The PostgreSQL release we care about (ex: 9.2, 14)
"""
global latest_version
global release_supported
# Regular expressions for parsing the RSS document
version_line = re.compile(
r".*?([0-9][0-9.]+) is the latest release in the {} series.*".format(release)
)
unsupported_line = re.compile(r"^This version is unsupported")
# Loop through the RSS until we find the current release
release_found = False
for line in raw_rss.splitlines():
m = version_line.match(line)
if m:
# Note that we found the version we were looking for
release_found = True
# Convert the version to version_num format
version = m.group(1)
parts = list(map(int, version.split(".")))
if parts[0] < 10:
latest_version = int(
"{}{:02}{:02}".format(parts[0], parts[1], parts[2])
)
else:
latest_version = int("{}00{:02}".format(parts[0], parts[1]))
elif release_found:
# The next line after the version tells if the version is supported
if unsupported_line.match(line):
release_supported = False
else:
release_supported = True
break
# Make sure we actually found it
if not release_found:
raise LatestVersionCheckError("Current release ({}) not found".format(release))
log.info(
"Got latest PostgreSQL version: {} supported={}".format(
latest_version, release_supported
)
)
log.debug(
"Next latest PostgreSQL version check will be after: {}".format(
latest_version_next_check
)
)
def get_latest_version():
"""
Get the latest supported version of the major PostgreSQL release running on the server being monitored.
"""
global latest_version_next_check
# If we don't know the latest version or it's past the recheck time, get the
# version from the PostgreSQL RSS feed. Only one thread needs to do this, so
# they all try to grab the lock, and then make sure nobody else beat them to it.
if (
latest_version is None
or latest_version_next_check is None
or latest_version_next_check < datetime.now()
):
# Note: we get the cluster version here before grabbing the latest_version_lock
# lock so it's not held while trying to talk with the DB.
release = version_num_to_release(get_cluster_version())
with latest_version_lock:
# Only check if nobody already got the version before us
if (
latest_version is None
or latest_version_next_check is None
or latest_version_next_check < datetime.now()
):
log.info("Checking latest PostgreSQL version")
latest_version_next_check = datetime.now() + timedelta(
seconds=int(config["latest_version_check_period"])
)
# Grab the RSS feed
raw_rss = requests.get("https://www.postgresql.org/versions.rss")
if raw_rss.status_code != 200:
raise LatestVersionCheckError("code={}".format(r.status_code))
# Parse the RSS body and set global variables
parse_version_rss(raw_rss.text, release)
return latest_version
def sample_metric(dbname, metric_name, args, retry=True):
"""
Run the appropriate query for the named metric against the specified database
"""
# Get the metric definition
try:
metric = config["metrics"][metric_name]
except KeyError:
raise UnknownMetricError("Unknown metric: {}".format(metric_name))
# Get the connection pool for the database, or create one if it doesn't
# already exist.
pool = get_pool(dbname)
# Identify the PostgreSQL version
version = get_cluster_version()
# Get the query version
query = get_query(metric, version)
# Execute the quert
if retry:
return run_query(pool, metric["type"], query, args)
else:
return run_query_no_retry(pool, metric["type"], query, args)
def test_queries():
"""
Run all of the metric queries against a database and check the results
"""
# We just use the default db for tests
dbname = config["dbname"]
# Loop through all defined metrics.
for name, metric in config["metrics"].items():
# If the metric has arguments to use while testing, grab those
args = metric.get("test_args", {})
print("Testing {} [{}]".format(name, ", ".join(["{}={}".format(key, value) for key, value in args.items()])))
# When testing against a docker container, we may end up connecting
# before the service is truly up (it restarts during the initialization
# phase). To cope with this, we'll allow a few connection failures.
tries = 5
while True:
# Run the query without the ability to retry
try:
res = sample_metric(dbname, name, args, retry=False)
break
except MetricVersionError:
res = "Unsupported for this version"
break
except psycopg2.OperationalError as e:
print("Error encountered, {} tries left: {}".format(tries, e))
if tries <= 0:
raise
time.sleep(1)
tries -= 1
# Compare the result to the provided sample results
# TODO
print("{} -> {}".format(name, res))
# Return the number of errors
# TODO
return 0
class SimpleHTTPRequestHandler(BaseHTTPRequestHandler): class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
""" """
This is our request handling server. It is responsible for listening for This is our request handling server. It is responsible for listening for
requests, processing them, and responding. requests, processing them, and responding.
""" """
def log_request(self, code="-", size="-"): def log_request(self, code='-', size='-'):
""" """
Override to suppress standard request logging Override to suppress standard request logging
""" """
@ -722,58 +436,71 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
""" """
# Parse the URL # Parse the URL
parsed_path = urlparse(self.path) parsed_path = urlparse(self.path)
metric_name = parsed_path.path.strip("/") name = parsed_path.path.strip('/')
parsed_query = parse_qs(parsed_path.query) parsed_query = parse_qs(parsed_path.query)
if metric_name == "agent_version": if name == 'agent_version':
self._reply(200, VERSION) self._reply(200, VERSION)
return return
elif metric_name == "latest_version_info":
try:
get_latest_version()
self._reply(
200,
json.dumps(
{
"latest": latest_version,
"supported": 1 if release_supported else 0,
}
),
)
except LatestVersionCheckError as e:
log.error("Failed to retrieve latest version information: {}".format(e))
self._reply(503, "Failed to retrieve latest version info")
return
# Note: parse_qs returns the values as a list. Since we always expect # Note: parse_qs returns the values as a list. Since we always expect
# single values, just grab the first from each. # single values, just grab the first from each.
args = {key: values[0] for key, values in parsed_query.items()} args = {key: values[0] for key, values in parsed_query.items()}
# Get the metric definition
try:
metric = config['metrics'][name]
except KeyError:
log.error("Unknown metric: {}".format(name))
self._reply(404, 'Unknown metric')
return
# Get the dbname. If none was provided, use the default from the # Get the dbname. If none was provided, use the default from the
# config. # config.
dbname = args.get("dbname", config["dbname"]) dbname = args.get('dbname', config['dbname'])
# Sample the metric # Get the connection pool for the database, or create one if it doesn't
# already exist.
try: try:
self._reply(200, sample_metric(dbname, metric_name, args)) pool = get_pool(dbname)
return except UnhappyDBError:
except UnknownMetricError as e:
log.error("Unknown metric: {}".format(metric_name))
self._reply(404, "Unknown metric")
return
except MetricVersionError as e:
log.error(
"Failed to find a version of {} for {}".format(metric_name, version)
)
self._reply(404, "Unsupported version")
return
except UnhappyDBError as e:
log.info("Database {} is unhappy, please be patient".format(dbname)) log.info("Database {} is unhappy, please be patient".format(dbname))
self._reply(503, "Database unavailable") self._reply(503, 'Database unavailable')
return
# Identify the PostgreSQL version
try:
version = get_cluster_version()
except UnhappyDBError:
return return
except Exception as e: except Exception as e:
if dbname in unhappy_cooldown:
log.info("Database {} is unhappy, please be patient".format(dbname))
self._reply(503, 'Database unavailable')
else:
log.error("Failed to get PostgreSQL version: {}".format(e))
self._reply(500, 'Error getting DB version')
return
# Get the query version
try:
query = get_query(metric, version)
except KeyError:
log.error("Failed to find a version of {} for {}".format(name, version))
self._reply(404, 'Unsupported version')
return
# Execute the quert
try:
self._reply(200, run_query(pool, metric['type'], query, args))
return
except Exception as e:
if dbname in unhappy_cooldown:
log.info("Database {} is unhappy, please be patient".format(dbname))
self._reply(503, 'Database unavailable')
else:
log.error("Error running query: {}".format(e)) log.error("Error running query: {}".format(e))
self._reply(500, "Unexpected error: {}".format(e)) self._reply(500, "Error running query")
return return
def _reply(self, code, content): def _reply(self, code, content):
@ -781,29 +508,19 @@ class SimpleHTTPRequestHandler(BaseHTTPRequestHandler):
Send a reply to the client Send a reply to the client
""" """
self.send_response(code) self.send_response(code)
self.send_header("Content-type", "application/json") self.send_header('Content-type', 'application/json')
self.end_headers() self.end_headers()
self.wfile.write(bytes(content, "utf-8")) self.wfile.write(bytes(content, 'utf-8'))
if __name__ == '__main__':
if __name__ == "__main__":
# Handle cli args # Handle cli args
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
prog="pgmon", description="A PostgreSQL monitoring agent" prog = 'pgmon',
) description='A PostgreSQL monitoring agent')
parser.add_argument( parser.add_argument('config_file', default='pgmon.yml', nargs='?',
"-c", help='The config file to read (default: %(default)s)')
"--config_file",
default="pgmon.yml",
nargs="?",
help="The config file to read (default: %(default)s)",
)
parser.add_argument(
"-t", "--test", action="store_true", help="Run query tests and exit"
)
args = parser.parse_args() args = parser.parse_args()
@ -813,16 +530,8 @@ if __name__ == "__main__":
# Read the config file # Read the config file
read_config(config_file) read_config(config_file)
# Run query tests and exit if test mode is enabled
if args.test:
errors = test_queries()
if errors > 0:
sys.exit(1)
else:
sys.exit(0)
# Set up the http server to receive requests # Set up the http server to receive requests
server_address = (config["address"], config["port"]) server_address = ('127.0.0.1', config['port'])
httpd = ThreadingHTTPServer(server_address, SimpleHTTPRequestHandler) httpd = ThreadingHTTPServer(server_address, SimpleHTTPRequestHandler)
# Set up the signal handler # Set up the signal handler
@ -830,7 +539,7 @@ if __name__ == "__main__":
signal.signal(signal.SIGHUP, signal_handler) signal.signal(signal.SIGHUP, signal_handler)
# Handle requests. # Handle requests.
log.info("Listening on port {}...".format(config["port"])) log.info("Listening on port {}...".format(config['port']))
while running: while running:
httpd.handle_request() httpd.handle_request()

View File

@ -5,104 +5,11 @@ import tempfile
import logging import logging
from decimal import Decimal
import json
import pgmon import pgmon
# Silence most logging output # Silence most logging output
logging.disable(logging.CRITICAL) logging.disable(logging.CRITICAL)
versions_rss = """
<?xml version="1.0" encoding="utf-8"?>
<rss version="2.0" xmlns:atom="http://www.w3.org/2005/Atom"><channel><title>PostgreSQL latest versions</title><link>https://www.postgresql.org/</link><description>PostgreSQL latest versions</description><atom:link href="https://www.postgresql.org/versions.rss" rel="self"/><language>en-us</language><lastBuildDate>Thu, 08 May 2025 00:00:00 +0000</lastBuildDate><item><title>17.5
</title><link>https://www.postgresql.org/docs/17/release-17-5.html</link><description>17.5 is the latest release in the 17 series.
</description><pubDate>Thu, 08 May 2025 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/17/release-17-5.html</guid></item><item><title>16.9
</title><link>https://www.postgresql.org/docs/16/release-16-9.html</link><description>16.9 is the latest release in the 16 series.
</description><pubDate>Thu, 08 May 2025 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/16/release-16-9.html</guid></item><item><title>15.13
</title><link>https://www.postgresql.org/docs/15/release-15-13.html</link><description>15.13 is the latest release in the 15 series.
</description><pubDate>Thu, 08 May 2025 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/15/release-15-13.html</guid></item><item><title>14.18
</title><link>https://www.postgresql.org/docs/14/release-14-18.html</link><description>14.18 is the latest release in the 14 series.
</description><pubDate>Thu, 08 May 2025 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/14/release-14-18.html</guid></item><item><title>13.21
</title><link>https://www.postgresql.org/docs/13/release-13-21.html</link><description>13.21 is the latest release in the 13 series.
</description><pubDate>Thu, 08 May 2025 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/13/release-13-21.html</guid></item><item><title>12.22
</title><link>https://www.postgresql.org/docs/12/release-12-22.html</link><description>12.22 is the latest release in the 12 series.
This version is unsupported!
</description><pubDate>Thu, 21 Nov 2024 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/12/release-12-22.html</guid></item><item><title>11.22
</title><link>https://www.postgresql.org/docs/11/release-11-22.html</link><description>11.22 is the latest release in the 11 series.
This version is unsupported!
</description><pubDate>Thu, 09 Nov 2023 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/11/release-11-22.html</guid></item><item><title>10.23
</title><link>https://www.postgresql.org/docs/10/release-10-23.html</link><description>10.23 is the latest release in the 10 series.
This version is unsupported!
</description><pubDate>Thu, 10 Nov 2022 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/10/release-10-23.html</guid></item><item><title>9.6.24
</title><link>https://www.postgresql.org/docs/9.6/release-9-6-24.html</link><description>9.6.24 is the latest release in the 9.6 series.
This version is unsupported!
</description><pubDate>Thu, 11 Nov 2021 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/9.6/release-9-6-24.html</guid></item><item><title>9.5.25
</title><link>https://www.postgresql.org/docs/9.5/release-9-5-25.html</link><description>9.5.25 is the latest release in the 9.5 series.
This version is unsupported!
</description><pubDate>Thu, 11 Feb 2021 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/9.5/release-9-5-25.html</guid></item><item><title>9.4.26
</title><link>https://www.postgresql.org/docs/9.4/release-9-4-26.html</link><description>9.4.26 is the latest release in the 9.4 series.
This version is unsupported!
</description><pubDate>Thu, 13 Feb 2020 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/9.4/release-9-4-26.html</guid></item><item><title>9.3.25
</title><link>https://www.postgresql.org/docs/9.3/release-9-3-25.html</link><description>9.3.25 is the latest release in the 9.3 series.
This version is unsupported!
</description><pubDate>Thu, 08 Nov 2018 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/9.3/release-9-3-25.html</guid></item><item><title>9.2.24
</title><link>https://www.postgresql.org/docs/9.2/release-9-2-24.html</link><description>9.2.24 is the latest release in the 9.2 series.
This version is unsupported!
</description><pubDate>Thu, 09 Nov 2017 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/9.2/release-9-2-24.html</guid></item><item><title>9.1.24
</title><link>https://www.postgresql.org/docs/9.1/release-9-1-24.html</link><description>9.1.24 is the latest release in the 9.1 series.
This version is unsupported!
</description><pubDate>Thu, 27 Oct 2016 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/9.1/release-9-1-24.html</guid></item><item><title>9.0.23
</title><link>https://www.postgresql.org/docs/9.0/release-9-0-23.html</link><description>9.0.23 is the latest release in the 9.0 series.
This version is unsupported!
</description><pubDate>Thu, 08 Oct 2015 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/9.0/release-9-0-23.html</guid></item><item><title>8.4.22
</title><link>https://www.postgresql.org/docs/8.4/release-8-4-22.html</link><description>8.4.22 is the latest release in the 8.4 series.
This version is unsupported!
</description><pubDate>Thu, 24 Jul 2014 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/8.4/release-8-4-22.html</guid></item><item><title>8.3.23
</title><link>https://www.postgresql.org/docs/8.3/release-8-3-23.html</link><description>8.3.23 is the latest release in the 8.3 series.
This version is unsupported!
</description><pubDate>Thu, 07 Feb 2013 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/8.3/release-8-3-23.html</guid></item><item><title>8.2.23
</title><link>https://www.postgresql.org/docs/8.2/release-8-2-23.html</link><description>8.2.23 is the latest release in the 8.2 series.
This version is unsupported!
</description><pubDate>Mon, 05 Dec 2011 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/8.2/release-8-2-23.html</guid></item><item><title>8.1.23
</title><link>https://www.postgresql.org/docs/8.1/release.html</link><description>8.1.23 is the latest release in the 8.1 series.
This version is unsupported!
</description><pubDate>Thu, 16 Dec 2010 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/8.1/release.html</guid></item><item><title>8.0.26
</title><link>https://www.postgresql.org/docs/8.0/release.html</link><description>8.0.26 is the latest release in the 8.0 series.
This version is unsupported!
</description><pubDate>Mon, 04 Oct 2010 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/8.0/release.html</guid></item><item><title>7.4.30
</title><link>https://www.postgresql.org/docs/7.4/release.html</link><description>7.4.30 is the latest release in the 7.4 series.
This version is unsupported!
</description><pubDate>Mon, 04 Oct 2010 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/7.4/release.html</guid></item><item><title>7.3.21
</title><link>https://www.postgresql.org/docs/7.3/release.html</link><description>7.3.21 is the latest release in the 7.3 series.
This version is unsupported!
</description><pubDate>Mon, 07 Jan 2008 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/7.3/release.html</guid></item><item><title>7.2.8
</title><link>https://www.postgresql.org/docs/7.2/release.html</link><description>7.2.8 is the latest release in the 7.2 series.
This version is unsupported!
</description><pubDate>Mon, 09 May 2005 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/7.2/release.html</guid></item><item><title>7.1.3
</title><link>https://www.postgresql.org/docs/7.1/release.html</link><description>7.1.3 is the latest release in the 7.1 series.
This version is unsupported!
</description><pubDate>Fri, 17 Aug 2001 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/7.1/release.html</guid></item><item><title>7.0.3
</title><link>https://www.postgresql.org/docs/7.0/release.htm</link><description>7.0.3 is the latest release in the 7.0 series.
This version is unsupported!
</description><pubDate>Sun, 12 Nov 2000 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/7.0/release.htm</guid></item><item><title>6.5.3
</title><link>https://www.postgresql.org/docs/6.5/release.htm</link><description>6.5.3 is the latest release in the 6.5 series.
This version is unsupported!
</description><pubDate>Thu, 04 Nov 1999 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/6.5/release.htm</guid></item><item><title>6.4.2
</title><link>https://www.postgresql.org/docs/6.4/release.htm</link><description>6.4.2 is the latest release in the 6.4 series.
This version is unsupported!
</description><pubDate>Sun, 03 Jan 1999 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/6.4/release.htm</guid></item><item><title>6.3.2
</title><link>https://www.postgresql.org/docs/6.3/c2701.htm</link><description>6.3.2 is the latest release in the 6.3 series.
This version is unsupported!
</description><pubDate>Mon, 23 Feb 1998 00:00:00 +0000</pubDate><guid>https://www.postgresql.org/docs/6.3/c2701.htm</guid></item></channel></rss>
"""
class TestPgmonMethods(unittest.TestCase): class TestPgmonMethods(unittest.TestCase):
## ##
# update_deep # update_deep
@ -115,104 +22,103 @@ class TestPgmonMethods(unittest.TestCase):
self.assertEqual(d1, {}) self.assertEqual(d1, {})
self.assertEqual(d2, {}) self.assertEqual(d2, {})
d1 = {"a": 1} d1 = {'a': 1}
d2 = {} d2 = {}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"a": 1}) self.assertEqual(d1, { 'a': 1 })
self.assertEqual(d2, {}) self.assertEqual(d2, {})
d1 = {} d1 = {}
d2 = {"a": 1} d2 = {'a': 1}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"a": 1}) self.assertEqual(d1, { 'a': 1 })
self.assertEqual(d2, d1) self.assertEqual(d2, d1)
def test_update_deep__scalars(self): def test_update_deep__scalars(self):
# Test adding/updating scalar values # Test adding/updating scalar values
d1 = {"foo": 1, "bar": "text", "hello": "world"} d1 = {'foo': 1, 'bar': "text", 'hello': "world"}
d2 = {"foo": 2, "baz": "blah"} d2 = {'foo': 2, 'baz': "blah"}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"foo": 2, "bar": "text", "baz": "blah", "hello": "world"}) self.assertEqual(d1, {'foo': 2, 'bar': "text", 'baz': "blah", 'hello': "world"})
self.assertEqual(d2, {"foo": 2, "baz": "blah"}) self.assertEqual(d2, {'foo': 2, 'baz': "blah"})
def test_update_deep__lists(self): def test_update_deep__lists(self):
# Test adding to lists # Test adding to lists
d1 = {"lst1": []} d1 = {'lst1': []}
d2 = {"lst1": [1, 2]} d2 = {'lst1': [1, 2]}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"lst1": [1, 2]}) self.assertEqual(d1, {'lst1': [1, 2]})
self.assertEqual(d2, d1) self.assertEqual(d2, d1)
d1 = {"lst1": [1, 2]} d1 = {'lst1': [1, 2]}
d2 = {"lst1": []} d2 = {'lst1': []}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"lst1": [1, 2]}) self.assertEqual(d1, {'lst1': [1, 2]})
self.assertEqual(d2, {"lst1": []}) self.assertEqual(d2, {'lst1': []})
d1 = {"lst1": [1, 2, 3]} d1 = {'lst1': [1, 2, 3]}
d2 = {"lst1": [3, 4]} d2 = {'lst1': [3, 4]}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"lst1": [1, 2, 3, 3, 4]}) self.assertEqual(d1, {'lst1': [1, 2, 3, 3, 4]})
self.assertEqual(d2, {"lst1": [3, 4]}) self.assertEqual(d2, {'lst1': [3, 4]})
# Lists of objects # Lists of objects
d1 = {"lst1": [{"id": 1}, {"id": 2}, {"id": 3}]} d1 = {'lst1': [{'id': 1}, {'id': 2}, {'id': 3}]}
d2 = {"lst1": [{"id": 3}, {"id": 4}]} d2 = {'lst1': [{'id': 3}, {'id': 4}]}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual( self.assertEqual(d1, {'lst1': [{'id': 1}, {'id': 2}, {'id': 3}, {'id': 3}, {'id': 4}]})
d1, {"lst1": [{"id": 1}, {"id": 2}, {"id": 3}, {"id": 3}, {"id": 4}]} self.assertEqual(d2, {'lst1': [{'id': 3}, {'id': 4}]})
)
self.assertEqual(d2, {"lst1": [{"id": 3}, {"id": 4}]})
# Nested lists # Nested lists
d1 = {"obj1": {"l1": [1, 2]}} d1 = {'obj1': {'l1': [1, 2]}}
d2 = {"obj1": {"l1": [3, 4]}} d2 = {'obj1': {'l1': [3, 4]}}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"obj1": {"l1": [1, 2, 3, 4]}}) self.assertEqual(d1, {'obj1': {'l1': [1, 2, 3, 4]}})
self.assertEqual(d2, {"obj1": {"l1": [3, 4]}}) self.assertEqual(d2, {'obj1': {'l1': [3, 4]}})
def test_update_deep__dicts(self): def test_update_deep__dicts(self):
# Test adding to lists # Test adding to lists
d1 = {"obj1": {}} d1 = {'obj1': {}}
d2 = {"obj1": {"a": 1, "b": 2}} d2 = {'obj1': {'a': 1, 'b': 2}}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"obj1": {"a": 1, "b": 2}}) self.assertEqual(d1, {'obj1': {'a': 1, 'b': 2}})
self.assertEqual(d2, d1) self.assertEqual(d2, d1)
d1 = {"obj1": {"a": 1, "b": 2}} d1 = {'obj1': {'a': 1, 'b': 2}}
d2 = {"obj1": {}} d2 = {'obj1': {}}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"obj1": {"a": 1, "b": 2}}) self.assertEqual(d1, {'obj1': {'a': 1, 'b': 2}})
self.assertEqual(d2, {"obj1": {}}) self.assertEqual(d2, {'obj1': {}})
d1 = {"obj1": {"a": 1, "b": 2}} d1 = {'obj1': {'a': 1, 'b': 2}}
d2 = {"obj1": {"a": 5, "c": 12}} d2 = {'obj1': {'a': 5, 'c': 12}}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"obj1": {"a": 5, "b": 2, "c": 12}}) self.assertEqual(d1, {'obj1': {'a': 5, 'b': 2, 'c': 12}})
self.assertEqual(d2, {"obj1": {"a": 5, "c": 12}}) self.assertEqual(d2, {'obj1': {'a': 5, 'c': 12}})
# Nested dicts # Nested dicts
d1 = {"obj1": {"d1": {"a": 1, "b": 2}}} d1 = {'obj1': {'d1': {'a': 1, 'b': 2}}}
d2 = {"obj1": {"d1": {"a": 5, "c": 12}}} d2 = {'obj1': {'d1': {'a': 5, 'c': 12}}}
pgmon.update_deep(d1, d2) pgmon.update_deep(d1, d2)
self.assertEqual(d1, {"obj1": {"d1": {"a": 5, "b": 2, "c": 12}}}) self.assertEqual(d1, {'obj1': {'d1': {'a': 5, 'b': 2, 'c': 12}}})
self.assertEqual(d2, {"obj1": {"d1": {"a": 5, "c": 12}}}) self.assertEqual(d2, {'obj1': {'d1': {'a': 5, 'c': 12}}})
def test_update_deep__types(self): def test_update_deep__types(self):
# Test mismatched types # Test mismatched types
d1 = {"foo": 5} d1 = {'foo': 5}
d2 = None d2 = None
self.assertRaises(TypeError, pgmon.update_deep, d1, d2) self.assertRaises(TypeError, pgmon.update_deep, d1, d2)
d1 = None d1 = None
d2 = {"foo": 5} d2 = {'foo': 5}
self.assertRaises(TypeError, pgmon.update_deep, d1, d2) self.assertRaises(TypeError, pgmon.update_deep, d1, d2)
# Nested mismatched types # Nested mismatched types
d1 = {"foo": [1, 2]} d1 = {'foo': [1, 2]}
d2 = {"foo": {"a": 7}} d2 = {'foo': {'a': 7}}
self.assertRaises(TypeError, pgmon.update_deep, d1, d2) self.assertRaises(TypeError, pgmon.update_deep, d1, d2)
## ##
# get_pool # get_pool
## ##
@ -220,19 +126,20 @@ class TestPgmonMethods(unittest.TestCase):
def test_get_pool__simple(self): def test_get_pool__simple(self):
# Just get a pool in a normal case # Just get a pool in a normal case
pgmon.config.update(pgmon.default_config) pgmon.config.update(pgmon.default_config)
pool = pgmon.get_pool("postgres") pool = pgmon.get_pool('postgres')
self.assertIsNotNone(pool) self.assertIsNotNone(pool)
def test_get_pool__unhappy(self): def test_get_pool__unhappy(self):
# Test getting an unhappy database pool # Test getting an unhappy database pool
pgmon.config.update(pgmon.default_config) pgmon.config.update(pgmon.default_config)
pgmon.unhappy_cooldown["postgres"] = datetime.now() + timedelta(60) pgmon.unhappy_cooldown['postgres'] = datetime.now() + timedelta(60)
self.assertRaises(pgmon.UnhappyDBError, pgmon.get_pool, "postgres") self.assertRaises(pgmon.UnhappyDBError, pgmon.get_pool, 'postgres')
# Test getting a different database when there's an unhappy one # Test getting a different database when there's an unhappy one
pool = pgmon.get_pool("template0") pool = pgmon.get_pool('template0')
self.assertIsNotNone(pool) self.assertIsNotNone(pool)
## ##
# handle_connect_failure # handle_connect_failure
## ##
@ -241,47 +148,70 @@ class TestPgmonMethods(unittest.TestCase):
# Test adding to an empty unhappy list # Test adding to an empty unhappy list
pgmon.config.update(pgmon.default_config) pgmon.config.update(pgmon.default_config)
pgmon.unhappy_cooldown = {} pgmon.unhappy_cooldown = {}
pool = pgmon.get_pool("postgres") pool = pgmon.get_pool('postgres')
pgmon.handle_connect_failure(pool) pgmon.handle_connect_failure(pool)
self.assertGreater(pgmon.unhappy_cooldown["postgres"], datetime.now()) self.assertGreater(pgmon.unhappy_cooldown['postgres'], datetime.now())
# Test adding another database # Test adding another database
pool = pgmon.get_pool("template0") pool = pgmon.get_pool('template0')
pgmon.handle_connect_failure(pool) pgmon.handle_connect_failure(pool)
self.assertGreater(pgmon.unhappy_cooldown["postgres"], datetime.now()) self.assertGreater(pgmon.unhappy_cooldown['postgres'], datetime.now())
self.assertGreater(pgmon.unhappy_cooldown["template0"], datetime.now()) self.assertGreater(pgmon.unhappy_cooldown['template0'], datetime.now())
self.assertEqual(len(pgmon.unhappy_cooldown), 2) self.assertEqual(len(pgmon.unhappy_cooldown), 2)
## ##
# get_query # get_query
## ##
def test_get_query__basic(self): def test_get_query__basic(self):
# Test getting a query with one version # Test getting a query with one version
metric = {"type": "value", "query": {0: "DEFAULT"}} metric = {
self.assertEqual(pgmon.get_query(metric, 100000), "DEFAULT") 'type': 'value',
'query': {
0: 'DEFAULT'
}
}
self.assertEqual(pgmon.get_query(metric, 100000), 'DEFAULT')
def test_get_query__versions(self): def test_get_query__versions(self):
metric = {"type": "value", "query": {0: "DEFAULT", 110000: "NEW"}} metric = {
'type': 'value',
'query': {
0: 'DEFAULT',
110000: 'NEW'
}
}
# Test getting the default version of a query with no lower bound and a newer # Test getting the default version of a query with no lower bound and a newer version
# version self.assertEqual(pgmon.get_query(metric, 100000), 'DEFAULT')
self.assertEqual(pgmon.get_query(metric, 100000), "DEFAULT")
# Test getting the newer version of a query with no lower bound and a newer # Test getting the newer version of a query with no lower bound and a newer version for the newer version
# version for the newer version self.assertEqual(pgmon.get_query(metric, 110000), 'NEW')
self.assertEqual(pgmon.get_query(metric, 110000), "NEW")
# Test getting the newer version of a query with no lower bound and a newer # Test getting the newer version of a query with no lower bound and a newer version for an even newer version
# version for an even newer version self.assertEqual(pgmon.get_query(metric, 160000), 'NEW')
self.assertEqual(pgmon.get_query(metric, 160000), "NEW")
# Test getting a version in bwtween two other versions # Test getting a version in bwtween two other versions
metric = {"type": "value", "query": {0: "DEFAULT", 96000: "OLD", 110000: "NEW"}} metric = {
self.assertEqual(pgmon.get_query(metric, 100000), "OLD") 'type': 'value',
'query': {
0: 'DEFAULT',
96000: 'OLD',
110000: 'NEW'
}
}
self.assertEqual(pgmon.get_query(metric, 100000), 'OLD')
def test_get_query__missing_version(self): def test_get_query__missing_version(self):
metric = {"type": "value", "query": {96000: "OLD", 110000: "NEW", 150000: ""}} metric = {
'type': 'value',
'query': {
96000: 'OLD',
110000: 'NEW',
150000: ''
}
}
# Test getting a metric that only exists for newer versions # Test getting a metric that only exists for newer versions
self.assertRaises(pgmon.MetricVersionError, pgmon.get_query, metric, 80000) self.assertRaises(pgmon.MetricVersionError, pgmon.get_query, metric, 80000)
@ -289,6 +219,7 @@ class TestPgmonMethods(unittest.TestCase):
# Test getting a metric that only exists for older versions # Test getting a metric that only exists for older versions
self.assertRaises(pgmon.MetricVersionError, pgmon.get_query, metric, 160000) self.assertRaises(pgmon.MetricVersionError, pgmon.get_query, metric, 160000)
## ##
# read_config # read_config
## ##
@ -298,32 +229,27 @@ class TestPgmonMethods(unittest.TestCase):
# Test reading just a metric and using the defaults for everything else # Test reading just a metric and using the defaults for everything else
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
# This is a comment! # This is a comment!
metrics: metrics:
test1: test1:
type: value type: value
query: query:
0: TEST1 0: TEST1
""" """)
)
pgmon.read_config(f"{tmpdirname}/config.yml") pgmon.read_config(f"{tmpdirname}/config.yml")
self.assertEqual( self.assertEqual(pgmon.config['max_pool_size'], pgmon.default_config['max_pool_size'])
pgmon.config["max_pool_size"], pgmon.default_config["max_pool_size"] self.assertEqual(pgmon.config['dbuser'], pgmon.default_config['dbuser'])
)
self.assertEqual(pgmon.config["dbuser"], pgmon.default_config["dbuser"])
pgmon.config = {} pgmon.config = {}
# Test reading a basic config # Test reading a basic config
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
# This is a comment! # This is a comment!
min_pool_size: 1 min_pool_size: 1
max_pool_size: 2 max_pool_size: 2
@ -354,25 +280,22 @@ metrics:
type: column type: column
query: query:
0: TEST4 0: TEST4
""" """)
)
pgmon.read_config(f"{tmpdirname}/config.yml") pgmon.read_config(f"{tmpdirname}/config.yml")
self.assertEqual(pgmon.config["dbuser"], "someone") self.assertEqual(pgmon.config['dbuser'], 'someone')
self.assertEqual(pgmon.config["metrics"]["test1"]["type"], "value") self.assertEqual(pgmon.config['metrics']['test1']['type'], 'value')
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1") self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
self.assertEqual(pgmon.config["metrics"]["test2"]["query"][0], "TEST2") self.assertEqual(pgmon.config['metrics']['test2']['query'][0], 'TEST2')
def test_read_config__include(self): def test_read_config__include(self):
pgmon.config = {} pgmon.config = {}
# Test reading a config that includes other files (absolute and relative paths, # Test reading a config that includes other files (absolute and relative paths, multiple levels)
# multiple levels)
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write(f"""---
f"""---
# This is a comment! # This is a comment!
min_pool_size: 1 min_pool_size: 1
max_pool_size: 2 max_pool_size: 2
@ -385,22 +308,18 @@ version_check_period: 3600
include: include:
- dbsettings.yml - dbsettings.yml
- {tmpdirname}/metrics.yml - {tmpdirname}/metrics.yml
""" """)
)
with open(f"{tmpdirname}/dbsettings.yml", "w") as f: with open(f"{tmpdirname}/dbsettings.yml", 'w') as f:
f.write( f.write(f"""---
f"""---
dbuser: someone dbuser: someone
dbhost: localhost dbhost: localhost
dbport: 5555 dbport: 5555
dbname: template0 dbname: template0
""" """)
)
with open(f"{tmpdirname}/metrics.yml", "w") as f: with open(f"{tmpdirname}/metrics.yml", 'w') as f:
f.write( f.write(f"""---
f"""---
metrics: metrics:
test1: test1:
type: value type: value
@ -412,35 +331,31 @@ metrics:
0: TEST2 0: TEST2
include: include:
- more_metrics.yml - more_metrics.yml
""" """)
)
with open(f"{tmpdirname}/more_metrics.yml", "w") as f: with open(f"{tmpdirname}/more_metrics.yml", 'w') as f:
f.write( f.write(f"""---
f"""---
metrics: metrics:
test3: test3:
type: value type: value
query: query:
0: TEST3 0: TEST3
""" """)
)
pgmon.read_config(f"{tmpdirname}/config.yml") pgmon.read_config(f"{tmpdirname}/config.yml")
self.assertEqual(pgmon.config["max_idle_time"], 10) self.assertEqual(pgmon.config['max_idle_time'], 10)
self.assertEqual(pgmon.config["dbuser"], "someone") self.assertEqual(pgmon.config['dbuser'], 'someone')
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1") self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
self.assertEqual(pgmon.config["metrics"]["test2"]["query"][0], "TEST2") self.assertEqual(pgmon.config['metrics']['test2']['query'][0], 'TEST2')
self.assertEqual(pgmon.config["metrics"]["test3"]["query"][0], "TEST3") self.assertEqual(pgmon.config['metrics']['test3']['query'][0], 'TEST3')
def test_read_config__reload(self): def test_read_config__reload(self):
pgmon.config = {} pgmon.config = {}
# Test rereading a config to update an existing config # Test rereading a config to update an existing config
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
# This is a comment! # This is a comment!
min_pool_size: 1 min_pool_size: 1
max_pool_size: 2 max_pool_size: 2
@ -463,17 +378,15 @@ metrics:
type: value type: value
query: query:
0: TEST2 0: TEST2
""" """)
)
pgmon.read_config(f"{tmpdirname}/config.yml") pgmon.read_config(f"{tmpdirname}/config.yml")
# Just make sure the first config was read # Just make sure the first config was read
self.assertEqual(len(pgmon.config["metrics"]), 2) self.assertEqual(len(pgmon.config['metrics']), 2)
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
# This is a comment! # This is a comment!
min_pool_size: 7 min_pool_size: 7
metrics: metrics:
@ -481,39 +394,34 @@ metrics:
type: value type: value
query: query:
0: NEW1 0: NEW1
""" """)
)
pgmon.read_config(f"{tmpdirname}/config.yml") pgmon.read_config(f"{tmpdirname}/config.yml")
self.assertEqual(pgmon.config["min_pool_size"], 7) self.assertEqual(pgmon.config['min_pool_size'], 7)
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "NEW1") self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'NEW1')
self.assertEqual(len(pgmon.config["metrics"]), 1) self.assertEqual(len(pgmon.config['metrics']), 1)
def test_read_config__query_file(self): def test_read_config__query_file(self):
pgmon.config = {} pgmon.config = {}
# Read a config file that reads a query from a file # Read a config file that reads a query from a file
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
metrics: metrics:
test1: test1:
type: value type: value
query: query:
0: file:some_query.sql 0: file:some_query.sql
""" """)
)
with open(f"{tmpdirname}/some_query.sql", "w") as f: with open(f"{tmpdirname}/some_query.sql", 'w') as f:
f.write("This is a query") f.write("This is a query")
pgmon.read_config(f"{tmpdirname}/config.yml") pgmon.read_config(f"{tmpdirname}/config.yml")
self.assertEqual( self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'This is a query')
pgmon.config["metrics"]["test1"]["query"][0], "This is a query"
)
def test_read_config__invalid(self): def test_read_config__invalid(self):
pgmon.config = {} pgmon.config = {}
@ -521,47 +429,38 @@ metrics:
# For all of these tests, we start with a valid config and also ensure that # For all of these tests, we start with a valid config and also ensure that
# it is not modified when a new config read fails # it is not modified when a new config read fails
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
metrics: metrics:
test1: test1:
type: value type: value
query: query:
0: TEST1 0: TEST1
""" """)
)
pgmon.read_config(f"{tmpdirname}/config.yml") pgmon.read_config(f"{tmpdirname}/config.yml")
# Just make sure the config was read # Just make sure the config was read
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1") self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
# Test reading a nonexistant config file # Test reading a nonexistant config file
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
self.assertRaises( self.assertRaises(FileNotFoundError, pgmon.read_config, f'{tmpdirname}/missing.yml')
FileNotFoundError, pgmon.read_config, f"{tmpdirname}/missing.yml"
)
# Test reading an invalid config file # Test reading an invalid config file
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""[default]
"""[default]
This looks a lot like an ini file to me This looks a lot like an ini file to me
Or maybe a TOML? Or maybe a TOML?
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises(
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml"
)
# Test reading a config that includes an invalid file # Test reading a config that includes an invalid file
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
metrics: metrics:
test1: test1:
@ -570,19 +469,15 @@ metrics:
0: EVIL1 0: EVIL1
include: include:
- missing_file.yml - missing_file.yml
""" """)
) self.assertRaises(FileNotFoundError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
FileNotFoundError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test invalid log level # Test invalid log level
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
log_level: noisy log_level: noisy
dbuser: evil dbuser: evil
metrics: metrics:
@ -590,222 +485,132 @@ metrics:
type: value type: value
query: query:
0: EVIL1 0: EVIL1
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test invalid query return type # Test invalid query return type
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
metrics: metrics:
test1: test1:
type: lots_of_data type: lots_of_data
query: query:
0: EVIL1 0: EVIL1
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test invalid query dict type # Test invalid query dict type
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
metrics: metrics:
test1: test1:
type: lots_of_data type: lots_of_data
query: EVIL1 query: EVIL1
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test incomplete metric: missing type # Test incomplete metric: missing type
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
metrics: metrics:
test1: test1:
query: query:
0: EVIL1 0: EVIL1
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test incomplete metric: missing queries # Test incomplete metric: missing queries
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
metrics: metrics:
test1: test1:
type: value type: value
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test incomplete metric: empty queries # Test incomplete metric: empty queries
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
metrics: metrics:
test1: test1:
type: value type: value
query: {} query: {}
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test incomplete metric: query dict is None # Test incomplete metric: query dict is None
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
metrics: metrics:
test1: test1:
type: value type: value
query: query:
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test reading a config with no metrics # Test reading a config with no metrics
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test reading a query defined in a file but the file is missing # Test reading a query defined in a file but the file is missing
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
metrics: metrics:
test1: test1:
type: value type: value
query: query:
0: file:missing.sql 0: file:missing.sql
""" """)
) self.assertRaises(FileNotFoundError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
FileNotFoundError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
# Test invalid query versions # Test invalid query versions
with tempfile.TemporaryDirectory() as tmpdirname: with tempfile.TemporaryDirectory() as tmpdirname:
with open(f"{tmpdirname}/config.yml", "w") as f: with open(f"{tmpdirname}/config.yml", 'w') as f:
f.write( f.write("""---
"""---
dbuser: evil dbuser: evil
metrics: metrics:
test1: test1:
type: value type: value
query: query:
default: EVIL1 default: EVIL1
""" """)
) self.assertRaises(pgmon.ConfigError, pgmon.read_config, f'{tmpdirname}/config.yml')
self.assertRaises( self.assertEqual(pgmon.config['dbuser'], 'postgres')
pgmon.ConfigError, pgmon.read_config, f"{tmpdirname}/config.yml" self.assertEqual(pgmon.config['metrics']['test1']['query'][0], 'TEST1')
)
self.assertEqual(pgmon.config["dbuser"], "postgres")
self.assertEqual(pgmon.config["metrics"]["test1"]["query"][0], "TEST1")
def test_version_num_to_release__valid(self):
self.assertEqual(pgmon.version_num_to_release(90602), 9.6)
self.assertEqual(pgmon.version_num_to_release(130002), 13)
def test_parse_version_rss__simple(self):
pgmon.parse_version_rss(versions_rss, 13)
self.assertEqual(pgmon.latest_version, 130021)
self.assertTrue(pgmon.release_supported)
pgmon.parse_version_rss(versions_rss, 9.6)
self.assertEqual(pgmon.latest_version, 90624)
self.assertFalse(pgmon.release_supported)
def test_parse_version_rss__missing(self):
# Test asking about versions that don't exist
self.assertRaises(
pgmon.LatestVersionCheckError, pgmon.parse_version_rss, versions_rss, 9.7
)
self.assertRaises(
pgmon.LatestVersionCheckError, pgmon.parse_version_rss, versions_rss, 99
)
def test_get_latest_version(self):
# Define a cluster version here so the test doesn't need a database
pgmon.cluster_version_next_check = datetime.now() + timedelta(hours=1)
pgmon.cluster_version = 90623
# Set up a default config
pgmon.update_deep(pgmon.config, pgmon.default_config)
# Make sure we can pull the RSS file (we assume the 9.6 series won't be getting
# any more updates)
self.assertEqual(pgmon.get_latest_version(), 90624)
def test_json_encode_special(self):
# Confirm that we're getting the right type
self.assertFalse(isinstance(Decimal('0.5'), float))
self.assertTrue(isinstance(pgmon.json_encode_special(Decimal('0.5')), float))
# Make sure we get sane values
self.assertEqual(pgmon.json_encode_special(Decimal('0.5')), 0.5)
self.assertEqual(pgmon.json_encode_special(Decimal('12')), 12.0)
# Make sure we can still fail for other types
self.assertRaises(
TypeError, pgmon.json_encode_special, object
)
# Make sure we can actually serialize a Decimal
self.assertEqual(json.dumps(Decimal('2.5'), default=pgmon.json_encode_special), '2.5')

View File

@ -7,7 +7,7 @@ After=network.target
[Service] [Service]
EnvironmentFile=/etc/pgmon/%i-service.conf EnvironmentFile=/etc/pgmon/%i-service.conf
User=${SERVICE_USER:-postgres} User=${SERVICE_USER:-postgres}
ExecStart=/usr/bin/pgmon -c /etc/pgmon/%i.yml ExecStart=/usr/local/bin/pgmon /etc/pgmon/%i.yml
ExecReload=kill -HUP $MAINPID ExecReload=kill -HUP $MAINPID
Restart=on-failure Restart=on-failure
Type=exec Type=exec

View File

@ -1,23 +0,0 @@
FROM alpine:3.21
RUN apk update && \
apk add py3-psycopg2 \
py3-requests \
py3-yaml \
tini
WORKDIR /app
COPY src/pgmon.py /app/
COPY sample-config/pgmon-metrics.yml /app/
COPY tests/test-config.yml /app/
COPY --chmod=0600 --chown=postgres:postgres tests/pgpass /root/.pgpass
ENTRYPOINT ["tini", "--"]
EXPOSE 5400
CMD ["/app/pgmon.py", "-c", "/app/test-config.yml", "--test"]

View File

@ -1,32 +0,0 @@
---
services:
agent:
image: pgmon
build:
context: ..
dockerfile: tests/Dockerfile
ports:
- :5400
depends_on:
db:
condition: service_healthy
db:
image: "postgres:${PGTAG:-17-bookworm}"
ports:
- :5432
environment:
POSTGRES_PASSWORD: secret
healthcheck:
#test: [ "CMD", "pg_isready", "-U", "postgres" ]
test: [ "CMD-SHELL", "pg_controldata /var/lib/postgresql/data/ | grep -q 'in production'" ]
interval: 5s
timeout: 2s
retries: 40
command: >
postgres -c ssl=on
-c ssl_cert_file='/etc/ssl/certs/ssl-cert-snakeoil.pem'
-c ssl_key_file='/etc/ssl/private/ssl-cert-snakeoil.key'
-c listen_addresses='*'

View File

@ -1 +0,0 @@
db:5432:*:postgres:secret

View File

@ -1,65 +0,0 @@
#!/bin/bash
# Versions to test
versions=( $@ )
# If we weren't given any versions, test them all
if [ ${#versions[@]} -eq 0 ]
then
versions=( 9.2 9.4 9.6 10 11 12 13 14 15 16 17 )
fi
# Image tags to use
declare -A images=()
images["9.2"]='9.2'
images["9.3"]='9.3'
images["9.4"]='9.4'
images["9.5"]='9.5'
images["9.6"]='9.6-bullseye'
images["10"]='10-bullseye'
images["11"]='11-bookworm'
images["12"]='12-bookworm'
images["13"]='13-bookworm'
images["14"]='14-bookworm'
images["15"]='15-bookworm'
images["16"]='16-bookworm'
images["17"]='17-bookworm'
declare -A results=()
# Make sure everything's down to start with
docker compose down
# Make sure our agent container is up to date
docker compose build agent
for version in "${versions[@]}"
do
echo
echo "Testing: PostgreSQL ${version}"
# Specify the version we're testing against
export PGTAG="${images["$version"]}"
# Start the containers
docker compose up --exit-code-from=agent agent
rc=$?
results["$version"]=$rc
# Destroy the containers
docker compose down
done
echo
echo
for v in "${versions[@]}"
do
case "${results["$v"]}" in
0) msg="OK" ;;
1) msg="Query failure detected" ;;
18) msg="Docker image error: 18" ;;
*) msg="Unexpected error: ${results["$v"]}" ;;
esac
echo "$v -> $msg"
done

View File

@ -1,17 +0,0 @@
---
# Bind to all interfaces so we can submit requests from outside the test container
address: 0.0.0.0
# We always just connect to the db container
dbhost: db
dbport: 5432
dbuser: postgres
# The SSL cipher parameters are too old in the 9.2 container, so we allow the tests
# to be run without encryption
ssl_mode: prefer
# Pull in the standard metrics
include:
- pgmon-metrics.yml

File diff suppressed because it is too large Load Diff