From d4e36eb3c9767e743d8960c08007b4536df20e6b Mon Sep 17 00:00:00 2001 From: Piotr Goczal Date: Fri, 31 May 2019 18:52:20 +0200 Subject: [PATCH 1/5] Fixes for compatibility with older (2.6) python version --- pgzabbix/__init__.py | 15 ++++++------ pgzabbix/database.py | 34 +++++++++++++------------- pgzabbix/generic.py | 10 ++++---- pgzabbix/replication.py | 6 ++--- pgzabbix/table.py | 54 ++++++++++++++--------------------------- setup.py | 2 +- 6 files changed, 51 insertions(+), 70 deletions(-) diff --git a/pgzabbix/__init__.py b/pgzabbix/__init__.py index f7290f3..8f032fa 100644 --- a/pgzabbix/__init__.py +++ b/pgzabbix/__init__.py @@ -6,7 +6,6 @@ import pgzabbix.table import psycopg2 - def all_generic(cur): for fun in ( pgzabbix.generic.psql_running, @@ -33,7 +32,7 @@ def all_generic(cur): pgzabbix.generic.psql_tx_rolledback, ): for key, val in fun(cur): - print("- {0} {1}".format(key, val)) + print("- %s %s" % (key, val)) def all_perdb(cur): @@ -58,7 +57,7 @@ def all_perdb(cur): pgzabbix.database.db_cachehit_ratio, ): for key, val in fun(cur): - print("- {0} {1}".format(key, val)) + print("- %s %s" % (key, val)) def all_sr(cur): @@ -68,7 +67,7 @@ def all_sr(cur): pgzabbix.replication.sync_priority, ): for key, val in fun(cur): - print("- {0} {1}".format(key, val)) + print("- %s %s" % (key, val)) def current_tables(cur): @@ -108,13 +107,13 @@ def to_zbx(thelist): def discover_sr(cur): data = list(pgzabbix.replication.sr_discovery(cur)) data = to_zbx(data) - print(" - {0} {1}".format("psql.sr.discovery", data)) + print(" - %s %s" % ("psql.sr.discovery", data)) def discover_db(cur): data = pgzabbix.discover.db_discovery(cur) data = to_zbx(data) - print(" - {0} {1}".format("psql.discovery", data)) + print(" - %s %s" % ("psql.discovery", data)) def list_databases_we_can_connect_to_and_fuck_off(cur): @@ -149,7 +148,7 @@ def foreach_db(config, perdb_function): def tables_stat(config): for key, val in foreach_db(config, current_tables): - print("- {0} {1}".format(key, val)) + print("- %s %s" % (key, val)) def discover_tables(config): @@ -160,7 +159,7 @@ def discover_tables(config): # then complains about invalid (truncated) json data = list(foreach_db(config, pgzabbix.discover.tables_discovery)) data = to_zbx(data) - print(" - {0} {1}".format("psql.table.discovery", data)) + print(" - %s %s" % ("psql.table.discovery", data)) def discover_all(config, cur): diff --git a/pgzabbix/database.py b/pgzabbix/database.py index cb1e1fa..a959a01 100644 --- a/pgzabbix/database.py +++ b/pgzabbix/database.py @@ -15,7 +15,7 @@ def psql_db_size(cur): " has_database_privilege(datname, 'CONNECT')") cur.execute(query) for row in cur.fetchall(): - yield ("psql.db_size[{}]".format(row[0]), row[1]) + yield ("psql.db_size[%s]" % (row[0]), row[1]) def psql_db_garbage_ratio(cur): @@ -31,7 +31,7 @@ def confl_tablespace(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.confl_tablespace[{}]'.format(row[0]), row[1]) + yield ('psql.confl_tablespace[%s]' % (row[0]), row[1]) def confl_lock(cur): @@ -39,7 +39,7 @@ def confl_lock(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.confl_lock[{}]'.format(row[0]), row[1]) + yield ('psql.confl_lock[%s]' % (row[0]), row[1]) def confl_snapshot(cur): @@ -47,7 +47,7 @@ def confl_snapshot(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.confl_snapshot[{}]'.format(row[0]), row[1]) + yield ('psql.confl_snapshot[%s]' % (row[0]), row[1]) def confl_bufferpin(cur): @@ -55,7 +55,7 @@ def confl_bufferpin(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.confl_bufferpin[{}]'.format(row[0]), row[1]) + yield ('psql.confl_bufferpin[%s]' % (row[0]), row[1]) def confl_deadlock(cur): @@ -64,7 +64,7 @@ def confl_deadlock(cur): " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.confl_deadlock[{}]'.format(row[0]), row[1]) + yield ('psql.confl_deadlock[%s]' % (row[0]), row[1]) def db_tx_commited(cur): @@ -72,7 +72,7 @@ def db_tx_commited(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_tx_commited[{}]'.format(row[0]), row[1]) + yield ('psql.db_tx_commited[%s]' % (row[0]), row[1]) def db_deadlocks(cur): @@ -81,7 +81,7 @@ def db_deadlocks(cur): " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_deadlocks[{}]'.format(row[0]), row[1]) + yield ('psql.db_deadlocks[%s]' % (row[0]), row[1]) def db_tx_rolledback(cur): @@ -89,7 +89,7 @@ def db_tx_rolledback(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_tx_rolledback[{}]'.format(row[0]), row[1]) + yield ('psql.db_tx_rolledback[%s]' % (row[0]), row[1]) def db_temp_bytes(cur): @@ -97,7 +97,7 @@ def db_temp_bytes(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_temp_bytes[{}]'.format(row[0]), row[1]) + yield ('psql.db_temp_bytes[%s]' %(row[0]), row[1]) def db_deleted(cur): @@ -105,7 +105,7 @@ def db_deleted(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_deleted[{}]'.format(row[0]), row[1]) + yield ('psql.db_deleted[%s]' % (row[0]), row[1]) def db_fetched(cur): @@ -113,7 +113,7 @@ def db_fetched(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_fetched[{}]'.format(row[0]), row[1]) + yield ('psql.db_fetched[%s]' % (row[0]), row[1]) def db_inserted(cur): @@ -121,7 +121,7 @@ def db_inserted(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_inserted[{}]'.format(row[0]), row[1]) + yield ('psql.db_inserted[%s]' % (row[0]), row[1]) def db_returned(cur): @@ -129,7 +129,7 @@ def db_returned(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_returned[{}]'.format(row[0]), row[1]) + yield ('psql.db_returned[%s]' % (row[0]), row[1]) def db_updated(cur): @@ -137,7 +137,7 @@ def db_updated(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_updated[{}]'.format(row[0]), row[1]) + yield ('psql.db_updated[%s]' % (row[0]), row[1]) def db_connections(cur): @@ -145,7 +145,7 @@ def db_connections(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.db_connections[{}]'.format(row[0]), row[1]) + yield ('psql.db_connections[%s]' % (row[0]), row[1]) def db_cachehit_ratio(cur): @@ -154,4 +154,4 @@ def db_cachehit_ratio(cur): " inner join pg_database using (datname)" " where pg_database.datistemplate=False;") for row in cur.fetchall(): - yield ('psql.cachehit_ratio[{}]'.format(row[0]), row[1]) + yield ('psql.cachehit_ratio[%s]' % (row[0]), row[1]) diff --git a/pgzabbix/generic.py b/pgzabbix/generic.py index f38cd3e..1ecfa35 100644 --- a/pgzabbix/generic.py +++ b/pgzabbix/generic.py @@ -67,8 +67,8 @@ def psql_locks_waiting(cur): def psql_slow_dml_queries(cur, limit=123): query = ("select count(*) from pg_stat_activity where state = 'active' " - " and now() - query_start > '{} sec'::interval " - " and query ~* '^(insert|update|delete)'").format(limit) + " and now() - query_start > '%d sec'::interval " + " and query ~* '^(insert|update|delete)'") % limit cur.execute(query) for row in cur.fetchall(): yield ("psql.slow_dml_queries", row[0]) @@ -76,7 +76,7 @@ def psql_slow_dml_queries(cur, limit=123): def psql_slow_queries(cur, limit=123): query = ("select count(*) from pg_stat_activity where state = 'active' " - " and now() - query_start > '{} sec'::interval").format(limit) + " and now() - query_start > '%d sec'::interval") % limit cur.execute(query) for row in cur.fetchall(): yield ("psql.slow_queries", row[0]) @@ -84,8 +84,8 @@ def psql_slow_queries(cur, limit=123): def psql_slow_select_queries(cur, limit=123): query = ("select count(*) from pg_stat_activity where state = 'active' " - " and now() - query_start > '{} sec'::interval " - " and query ilike 'select%'").format(limit) + " and now() - query_start > '%d sec'::interval " + " and query ilike 'select%%'") % limit cur.execute(query) for row in cur.fetchall(): yield ("psql.slow_select_queries", row[0]) diff --git a/pgzabbix/replication.py b/pgzabbix/replication.py index 8f3a13d..3a2fb14 100644 --- a/pgzabbix/replication.py +++ b/pgzabbix/replication.py @@ -21,7 +21,7 @@ def write_diff(cur): cur.execute(query.format(table=view_select(cur))) for row in cur.fetchall(): - yield ('psql.write_diff[{}]'.format(row[0]), row[1]) + yield ('psql.write_diff[%s]' % (row[0]), row[1]) def replay_diff(cur): @@ -37,7 +37,7 @@ def replay_diff(cur): cur.execute(query.format(table=view_select(cur))) for row in cur.fetchall(): - yield ('psql.replay_diff[{}]'.format(row[0]), row[1]) + yield ('psql.replay_diff[%s]' % (row[0]), row[1]) def sync_priority(cur): @@ -47,7 +47,7 @@ def sync_priority(cur): cur.execute(query.format(table=view_select(cur))) for row in cur.fetchall(): - yield ('psql.sync_priority[{}]'.format(row[0]), row[1]) + yield ('psql.sync_priority[%s]' % (row[0]), row[1]) def sr_discovery(cur): diff --git a/pgzabbix/table.py b/pgzabbix/table.py index bb513ae..b6b50e3 100644 --- a/pgzabbix/table.py +++ b/pgzabbix/table.py @@ -1,153 +1,135 @@ def psql_table_analyze_count(cur): query = "select current_database(), schemaname, relname, analyze_count from pg_stat_user_tables" - out = "psql.table_analyze_count[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_analyze_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_autoanalyze_count(cur): query = "select current_database(), schemaname, relname, autoanalyze_count from pg_stat_user_tables" - out = "psql.table_autoanalyze_count[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_analyze_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_autovacuum_count(cur): query = "select current_database(), schemaname, relname, autovacuum_count from pg_stat_user_tables" - out = "psql.table_autovacuum_count[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_autovacum_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_n_dead_tup(cur): query = "select current_database(), schemaname, relname, n_dead_tup from pg_stat_user_tables" - out = "psql.table_n_dead_tup[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_n_dead_tup[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_n_tup_del(cur): query = "select current_database(), schemaname, relname, n_tup_del from pg_stat_user_tables" - out = "psql.table_n_tup_del[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_n_tup_del[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_n_tup_hot_upd(cur): query = "select current_database(), schemaname, relname, n_tup_hot_upd from pg_stat_user_tables" - out = "psql.table_n_tup_hot_upd[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_n_tup_hot_upd[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_idx_scan(cur): query = "select current_database(), schemaname, relname, coalesce(idx_scan, 0) from pg_stat_user_tables" - out = "psql.table_idx_scan[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_idx_scan[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_seq_tup_read(cur): query = "select current_database(), schemaname, relname, coalesce(seq_tup_read, 0) from pg_stat_user_tables" - out = "psql.table_seq_tup_read[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_seq_tup_read[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_idx_tup_fetch(cur): query = "select current_database(), schemaname, relname, coalesce(idx_tup_fetch,0) from pg_stat_user_tables" - out = "psql.table_idx_tup_fetch[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_idx_tup_fetch[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_idx_tup_ins(cur): query = "select current_database(), schemaname, relname, n_tup_ins from pg_stat_user_tables" - out = "psql.table_n_tup_ins[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_n_tup_ins[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_n_live_tup(cur): query = "select current_database(), schemaname, relname, n_live_tup from pg_stat_user_tables" - out = "psql.table_n_live_tup[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_n_live_tup[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_seq_scan(cur): query = "select current_database(), schemaname, relname, seq_scan from pg_stat_user_tables" - out = "psql.table_seq_scan[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_seq_scan[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_n_tup_upd(cur): query = "select current_database(), schemaname, relname, n_tup_upd from pg_stat_user_tables" - out = "psql.table_n_tup_upd[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_n_tup_upd[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_vacuum_count(cur): query = "select current_database(), schemaname, relname, vacuum_count from pg_stat_user_tables" - out = "psql.table_vacuum_count[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_vacuum_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_total_size(cur): query = "select current_database(), schemaname, relname, pg_total_relation_size(relid) from pg_stat_user_tables" - out = "psql.table_total_size[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_total_size[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_heap_cachehit_ratio(cur): query = ("select current_database(), schemaname, relname, " " round(heap_blks_hit * 100.0 / greatest(heap_blks_hit + heap_blks_read, 1), 2) " " from pg_statio_user_tables") - out = "psql.table_heap_cachehit_ratio[{},{},{}]" cur.execute(query) for row in cur.fetchall(): if row[3] is None: continue - yield out.format(*row[:3]), row[3] + yield ('psql.table_heap_cachehit_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_idx_cachehit_ratio(cur): query = ("select current_database(), schemaname, relname, " " round(idx_blks_hit * 100.0 / greatest(idx_blks_hit + idx_blks_read, 1), 2) " " from pg_statio_user_tables;") - out = "psql.table_idx_cachehit_ratio[{},{},{}]" cur.execute(query) for row in cur.fetchall(): if row[3] is None: continue - yield out.format(*row[:3]), row[3] + yield ('psql.table_idx_cachehit_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] def psql_table_garbage_ratio(cur): query = ("select current_database(), schemaname, relname, " " round(n_dead_tup / greatest(n_live_tup + n_dead_tup , 1), 2) " " from pg_stat_user_tables;") - out = "psql.table_garbage_ratio[{},{},{}]" cur.execute(query) for row in cur.fetchall(): - yield out.format(*row[:3]), row[3] + yield ('psql.table_garbage_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] diff --git a/setup.py b/setup.py index a1fdc95..6d26598 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ def reinitialize_command(self, name, **kw): if __name__ == '__main__': setup(name="pgzabbix", - version="1.7", + version="1.7.1", description="Send stats about postgres to Zabbix", url="https://github.com/Spindel/PgZabbix", author="D.S. Ljungmark", From eaf5644ba9bedaeb1ba128ffbdfee15348ecc73c Mon Sep 17 00:00:00 2001 From: bilbolodz Date: Fri, 31 May 2019 18:55:34 +0200 Subject: [PATCH 2/5] Update __init__.py --- pgzabbix/__init__.py | 1 + 1 file changed, 1 insertion(+) diff --git a/pgzabbix/__init__.py b/pgzabbix/__init__.py index 8f032fa..2630bf4 100644 --- a/pgzabbix/__init__.py +++ b/pgzabbix/__init__.py @@ -6,6 +6,7 @@ import pgzabbix.table import psycopg2 + def all_generic(cur): for fun in ( pgzabbix.generic.psql_running, From d9d6c2a34d1f4e816e0a6addad91b03daa9637a2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Gocza=C5=82?= Date: Wed, 15 Jan 2020 15:23:43 +0100 Subject: [PATCH 3/5] Added zip dist file --- README.md | 2 +- build/lib.linux-x86_64-2.7/__main__.py | 4 + .../lib.linux-x86_64-2.7/pgzabbix/__init__.py | 169 ++++++++++++++++ build/lib.linux-x86_64-2.7/pgzabbix/cmd.py | 83 ++++++++ .../lib.linux-x86_64-2.7/pgzabbix/database.py | 149 ++++++++++++++ .../lib.linux-x86_64-2.7/pgzabbix/discover.py | 33 +++ build/lib.linux-x86_64-2.7/pgzabbix/dome.py | 27 +++ .../lib.linux-x86_64-2.7/pgzabbix/generic.py | 191 ++++++++++++++++++ build/lib.linux-x86_64-2.7/pgzabbix/pgstat.py | 52 +++++ .../pgzabbix/replication.py | 65 ++++++ build/lib.linux-x86_64-2.7/pgzabbix/table.py | 153 ++++++++++++++ 11 files changed, 927 insertions(+), 1 deletion(-) create mode 100644 build/lib.linux-x86_64-2.7/__main__.py create mode 100644 build/lib.linux-x86_64-2.7/pgzabbix/__init__.py create mode 100644 build/lib.linux-x86_64-2.7/pgzabbix/cmd.py create mode 100644 build/lib.linux-x86_64-2.7/pgzabbix/database.py create mode 100644 build/lib.linux-x86_64-2.7/pgzabbix/discover.py create mode 100644 build/lib.linux-x86_64-2.7/pgzabbix/dome.py create mode 100644 build/lib.linux-x86_64-2.7/pgzabbix/generic.py create mode 100644 build/lib.linux-x86_64-2.7/pgzabbix/pgstat.py create mode 100644 build/lib.linux-x86_64-2.7/pgzabbix/replication.py create mode 100644 build/lib.linux-x86_64-2.7/pgzabbix/table.py diff --git a/README.md b/README.md index b1c3bda..a04ad5c 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -PgZabbix +PgZabbix (python 2.6 compatible version) Suck some stats out of Postgres, and inject it into Zabbix. Mostly based on pg_monz but not written in shell script. diff --git a/build/lib.linux-x86_64-2.7/__main__.py b/build/lib.linux-x86_64-2.7/__main__.py new file mode 100644 index 0000000..7c410ce --- /dev/null +++ b/build/lib.linux-x86_64-2.7/__main__.py @@ -0,0 +1,4 @@ +import pgzabbix.cmd + +if __name__ == "__main__": + pgzabbix.cmd.main() diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/__init__.py b/build/lib.linux-x86_64-2.7/pgzabbix/__init__.py new file mode 100644 index 0000000..820e814 --- /dev/null +++ b/build/lib.linux-x86_64-2.7/pgzabbix/__init__.py @@ -0,0 +1,169 @@ +import json +import pgzabbix.generic +import pgzabbix.discover +import pgzabbix.database +import pgzabbix.replication +import pgzabbix.table +import psycopg2 + + +def all_generic(cur): + for fun in ( + pgzabbix.generic.psql_running, + pgzabbix.generic.machine_is_primary, + pgzabbix.generic.machine_is_standby, + pgzabbix.generic.psql_active_connections, + pgzabbix.generic.psql_buffers_alloc, + pgzabbix.generic.psql_buffers_backend, + pgzabbix.generic.psql_buffers_backend_fsync, + pgzabbix.generic.psql_buffers_checkpoint, + pgzabbix.generic.psql_buffers_clean, + pgzabbix.generic.psql_checkpoints_req, + pgzabbix.generic.psql_checkpoints_timed, + pgzabbix.generic.psql_idle_connections, + pgzabbix.generic.psql_idle_tx_connections, + pgzabbix.generic.psql_locks_waiting, + pgzabbix.generic.psql_maxwritten_clean, + pgzabbix.generic.psql_server_connections, + pgzabbix.generic.psql_server_maxcon, + pgzabbix.generic.psql_slow_dml_queries, + pgzabbix.generic.psql_slow_queries, + pgzabbix.generic.psql_slow_select_queries, + pgzabbix.generic.psql_tx_commited, + pgzabbix.generic.psql_tx_rolledback, + ): + for key, val in fun(cur): + print("- %s %s" % (key, val)) + + +def all_perdb(cur): + for fun in ( + pgzabbix.database.psql_db_size, + pgzabbix.database.psql_db_garbage_ratio, + pgzabbix.database.confl_tablespace, + pgzabbix.database.confl_lock, + pgzabbix.database.confl_snapshot, + pgzabbix.database.confl_bufferpin, + pgzabbix.database.confl_deadlock, + pgzabbix.database.db_tx_commited, + pgzabbix.database.db_deadlocks, + pgzabbix.database.db_tx_rolledback, + pgzabbix.database.db_temp_bytes, + pgzabbix.database.db_deleted, + pgzabbix.database.db_fetched, + pgzabbix.database.db_inserted, + pgzabbix.database.db_returned, + pgzabbix.database.db_updated, + pgzabbix.database.db_connections, + pgzabbix.database.db_cachehit_ratio, + ): + for key, val in fun(cur): + print("- %s %s" % (key, val)) + + +def all_sr(cur): + for fun in ( + pgzabbix.replication.write_diff, + pgzabbix.replication.replay_diff, + pgzabbix.replication.sync_priority, + ): + for key, val in fun(cur): + print("- %s %s" % (key, val)) + + +def current_tables(cur): + for fun in ( + pgzabbix.table.psql_table_analyze_count, + pgzabbix.table.psql_table_autoanalyze_count, + pgzabbix.table.psql_table_autovacuum_count, + pgzabbix.table.psql_table_garbage_ratio, + pgzabbix.table.psql_table_heap_cachehit_ratio, + pgzabbix.table.psql_table_idx_cachehit_ratio, + pgzabbix.table.psql_table_idx_scan, + pgzabbix.table.psql_table_idx_tup_fetch, + pgzabbix.table.psql_table_idx_tup_ins, + pgzabbix.table.psql_table_n_dead_tup, + pgzabbix.table.psql_table_n_live_tup, + pgzabbix.table.psql_table_n_tup_del, + pgzabbix.table.psql_table_n_tup_hot_upd, + pgzabbix.table.psql_table_n_tup_upd, + pgzabbix.table.psql_table_seq_scan, + pgzabbix.table.psql_table_seq_tup_read, + pgzabbix.table.psql_table_total_size, + pgzabbix.table.psql_table_vacuum_count, + ): + for line in fun(cur): + yield line + + +def to_zbx(thelist): + obj = {} + obj["data"] = list(thelist) + + # Zabbix implementation of json is sensitive to whitespace, + # so no whitespace separation is easiest way to please it. + return json.dumps(obj, separators=(",", ":")) + + +def discover_sr(cur): + data = list(pgzabbix.replication.sr_discovery(cur)) + data = to_zbx(data) + print(" - %s %s" % ("psql.sr.discovery", data)) + + +def discover_db(cur): + data = pgzabbix.discover.db_discovery(cur) + data = to_zbx(data) + print(" - %s %s" % ("psql.discovery", data)) + + +def list_databases_we_can_connect_to_and_fuck_off(cur): + query = ("select datname, pg_database_size(datname) from pg_database " + " where datistemplate = 'f' and " + " has_database_privilege(datname, 'CONNECT')") + cur.execute(query) + return [x[0] for x in cur] + + +def foreach_db(config, perdb_function): + conn_string = "host={host} user={user} password={password} dbname={dbname}" + con = psycopg2.connect(conn_string.format(**config)) + con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + cur = con.cursor() + + databases = list_databases_we_can_connect_to_and_fuck_off(cur) + cur.close() + con.close() + + for db in databases: + config["dbname"] = db + con = psycopg2.connect(conn_string.format(**config)) + con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + cur = con.cursor() + # Py2: yield from perdb_function + for line in perdb_function(cur): + yield line + cur.close() + con.close() + + +def tables_stat(config): + for key, val in foreach_db(config, current_tables): + print("- %s %s" % (key, val)) + + +def discover_tables(config): + """ This function is _special_ in the not quite retarded sense + Pay close attention to the fact that it doesn't take a connection, but + takes a configuration for connection options""" + # Note that zabbix is sometimes retarded and truncates long messages + # then complains about invalid (truncated) json + data = list(foreach_db(config, pgzabbix.discover.tables_discovery)) + data = to_zbx(data) + print(" - %s %s" % ("psql.table.discovery", data)) + + +def discover_all(config, cur): + discover_sr(cur) + discover_db(cur) + discover_tables(config) diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/cmd.py b/build/lib.linux-x86_64-2.7/pgzabbix/cmd.py new file mode 100644 index 0000000..ace807a --- /dev/null +++ b/build/lib.linux-x86_64-2.7/pgzabbix/cmd.py @@ -0,0 +1,83 @@ +# vim: set nobomb: +import argparse +import psycopg2 +import pgzabbix +try: + import ConfigParser as configparser +except ImportError: + import configparser + + +def parseConfig(inifile): + config = configparser.SafeConfigParser() + config.readfp(inifile) + if not config.sections(): + print("No sections in %s. Exiting" % inifile) + exit(1) + + opt = {} + for item in ('host', 'password', 'dbname', 'user'): + opt[item] = config.get("postgres", item) + return opt + + +def get_connection(config): + conn_string = "host={host} dbname={dbname} user={user} password={password}" + conn = psycopg2.connect(conn_string.format(**config)) + conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) + return conn + + +def commandline(): + parser = argparse.ArgumentParser( + prog="PgZabbix", + description="Fiddle with Postgres for Zabbix" + ) + parser.add_argument('--config', + nargs='?', + type=argparse.FileType('r'), + default='/etc/pgzabbix.ini' + ) + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument('--read', action='store_true', default=False) + group.add_argument('--tables', action='store_true', default=False) + group.add_argument('--discover', action='store_true', default=False) + group.add_argument('--discover_tables', action='store_true', default=False) + group.add_argument('--discover_db', action='store_true', default=False) + group.add_argument('--discover_sr', action='store_true', default=False) + args = parser.parse_args() + return args + + +def main(): + args = commandline() + config = parseConfig(args.config) + conn = get_connection(config) + cur = conn.cursor() + + if args.read: + pgzabbix.all_generic(cur) + pgzabbix.all_perdb(cur) + pgzabbix.all_sr(cur) + + if args.tables: + pgzabbix.tables_stat(config) + + if args.discover_db: + pgzabbix.discover_db(cur) + + if args.discover_sr: + pgzabbix.discover_sr(cur) + + if args.discover_tables: + pgzabbix.discover_tables(config) + + if args.discover: + pgzabbix.discover_all(config, cur) + + cur.close() + conn.close() + + +if __name__ == "__main__": + main() diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/database.py b/build/lib.linux-x86_64-2.7/pgzabbix/database.py new file mode 100644 index 0000000..4922f6e --- /dev/null +++ b/build/lib.linux-x86_64-2.7/pgzabbix/database.py @@ -0,0 +1,149 @@ +""" +TODO + +# Needs to connect to the DB to read +UserParameter=pgsql.get.pg.size[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_userdb_funcs.sh pg.size "$2" "$3" "$4" "$5" + +# Needs to connect to the DB, and to get the table name +UserParameter=pgsql.get.pg.stat_table[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_tbl_funcs.sh pg.stat_table "$2" "$3" "$4" "$5" "$6" "$7" +""" + + +def psql_db_size(cur): + query = ("select datname, pg_database_size(datname) from pg_database " + " where datistemplate = 'f' and " + " has_database_privilege(datname, 'CONNECT')") + cur.execute(query) + for row in cur.fetchall(): + yield ("psql.db_size[%s]" % (row[0]), row[1]) + +def psql_db_garbage_ratio(cur): + return () +# cur.execute("select datname, pg_database_size(datname) from pg_database " +# " where datistemplate = 'f'") +# for row in cur.fetchall(): +# yield ("psql.db_size[{0}]".format(row[0]), row[1]) + + +def confl_tablespace(cur): + cur.execute("select datname, confl_tablespace from pg_stat_database_conflicts" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.confl_tablespace[%s]' % (row[0]), row[1]) + +def confl_lock(cur): + cur.execute("select datname, confl_lock from pg_stat_database_conflicts " + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.confl_lock[%s]' % (row[0]), row[1]) + +def confl_snapshot(cur): + cur.execute("select datname, confl_snapshot from pg_stat_database_conflicts" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.confl_snapshot[%s]' % (row[0]), row[1]) + +def confl_bufferpin(cur): + cur.execute("select datname, confl_bufferpin from pg_stat_database_conflicts" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.confl_bufferpin[%s]' % (row[0]), row[1]) + +def confl_deadlock(cur): + cur.execute("select datname, confl_deadlock from pg_stat_database_conflicts" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + + for row in cur.fetchall(): + yield ('psql.confl_deadlock[%s]' % (row[0]), row[1]) + +def db_tx_commited(cur): + cur.execute("select datname, xact_commit from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.db_tx_commited[%s]' % (row[0]), row[1]) + +def db_deadlocks(cur): + vers = cur.connection.server_version + if vers <= 90125: + # Old postgresql version + return + cur.execute("select datname, deadlocks from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + + for row in cur.fetchall(): + yield ('psql.db_deadlocks[%s]' % (row[0]), row[1]) + +def db_tx_rolledback(cur): + cur.execute("select datname, xact_rollback from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.db_tx_rolledback[%s]' % (row[0]), row[1]) + +def db_temp_bytes(cur): + vers = cur.connection.server_version + if vers <= 90125: + # Old postgresql version + return + cur.execute("select datname, temp_bytes from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.db_temp_bytes[%s]' %(row[0]), row[1]) + +def db_deleted(cur): + cur.execute("select datname, tup_deleted from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.db_deleted[%s]' % (row[0]), row[1]) + +def db_fetched(cur): + cur.execute("select datname, tup_fetched from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.db_fetched[%s]' % (row[0]), row[1]) + +def db_inserted(cur): + cur.execute("select datname, tup_inserted from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.db_inserted[%s]' % (row[0]), row[1]) + +def db_returned(cur): + cur.execute("select datname, tup_returned from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.db_returned[%s]' % (row[0]), row[1]) + +def db_updated(cur): + cur.execute("select datname, tup_updated from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.db_updated[%s]' % (row[0]), row[1]) + +def db_connections(cur): + cur.execute("select datname, numbackends from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.db_connections[%s]' % (row[0]), row[1]) + +def db_cachehit_ratio(cur): + cur.execute("select datname, round(blks_hit * 100.0 / (blks_hit + greatest(blks_read, 1)), 2)" + " from pg_stat_database" + " inner join pg_database using (datname)" + " where pg_database.datistemplate=False;") + for row in cur.fetchall(): + yield ('psql.cachehit_ratio[%s]' % (row[0]), row[1]) diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/discover.py b/build/lib.linux-x86_64-2.7/pgzabbix/discover.py new file mode 100644 index 0000000..49e8b6d --- /dev/null +++ b/build/lib.linux-x86_64-2.7/pgzabbix/discover.py @@ -0,0 +1,33 @@ +def list_databases(cur): + cur.execute("SELECT datname from pg_database where datistemplate = 'f'") + for row in cur.fetchall(): + yield row[0] + + +# UserParameter=db.list.discovery[*],"$1"/find_dbname.sh "$2" +def db_discovery(cur): + for database in list_databases(cur): + yield {"{#DBNAME}": database} + + +def tables_discovery(cur): + cur.execute("select current_database(), schemaname, tablename " + " from pg_tables " + " where schemaname not in ('pg_catalog','information_schema')") + for row in cur.fetchall(): + yield { + "{#DBNAME}": row[0], + "{#SCHEMANAME}": row[1], + "{#TABLENAME}": row[2], + } + + +""" + proc.num[postgres,,,wal receiver] + proc.num[postgres,,,wal sender] + pgsql.get.pg.sr.status[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF}] + sr.db.list.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}] + sr.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}] + pgsql.get.pg.stat_replication[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF},{#MODE}] + sr.status.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}] +""" diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/dome.py b/build/lib.linux-x86_64-2.7/pgzabbix/dome.py new file mode 100644 index 0000000..31e45cf --- /dev/null +++ b/build/lib.linux-x86_64-2.7/pgzabbix/dome.py @@ -0,0 +1,27 @@ +""" +TODO + +# Needs to connect to the DB to read +UserParameter=pgsql.get.pg.size[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_userdb_funcs.sh pg.size "$2" "$3" "$4" "$5" + +# Needs to connect to the DB, and to get the table name +UserParameter=pgsql.get.pg.stat_table[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_tbl_funcs.sh pg.stat_table "$2" "$3" "$4" "$5" "$6" "$7" + +""" + +""" + proc.num[postgres,,,wal receiver] + proc.num[postgres,,,wal sender] + + pgsql.get.pg.sr.status[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF}] + sr.db.list.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}] + psql.confl_bufferpin[{#DBNAME}] + psql.confl_deadlock[{#DBNAME}] + psql.confl_lock[{#DBNAME}] + psql.confl_snapshot[{#DBNAME}] + psql.confl_tablespace[{#DBNAME}] + sr.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}] + pgsql.get.pg.stat_replication[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF},{#MODE}] + sr.status.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}] + +""" diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/generic.py b/build/lib.linux-x86_64-2.7/pgzabbix/generic.py new file mode 100644 index 0000000..be86363 --- /dev/null +++ b/build/lib.linux-x86_64-2.7/pgzabbix/generic.py @@ -0,0 +1,191 @@ +""" +TODO + +# Needs to connect to the DB to read +UserParameter=pgsql.get.pg.size[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_userdb_funcs.sh pg.size "$2" "$3" "$4" "$5" + +# Needs to connect to the DB, and to get the table name +UserParameter=pgsql.get.pg.stat_table[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_tbl_funcs.sh pg.stat_table "$2" "$3" "$4" "$5" "$6" "$7" + +""" + + +def psql_running(cur): + """ Basic check """ + cur.execute("select 1") + for row in cur.fetchall(): + yield ("psql.running", row[0]) + + +def psql_tx_commited(cur): + cur.execute("select sum(xact_commit) from pg_stat_database") + for row in cur.fetchall(): + yield ("psql.tx_commited", row[0]) + + +def psql_tx_rolledback(cur): + cur.execute("select sum(xact_rollback) from pg_stat_database") + for row in cur.fetchall(): + yield ("psql.tx_rolledback", row[0]) + + +def psql_active_connections(cur): + vers = cur.connection.server_version + if vers <= 90125: + # Old postgresql version + cur.execute("select count(*) from pg_stat_activity where current_query <> ''") + else: + cur.execute("select count(*) from pg_stat_activity where state = 'active'") + for row in cur.fetchall(): + yield ("psql.active_connections", row[0]) + + +def psql_server_connections(cur): + cur.execute("select count(*) from pg_stat_activity") + for row in cur.fetchall(): + yield ("psql.server_connections", row[0]) + + +def psql_idle_connections(cur): + vers = cur.connection.server_version + if vers <= 90125: + # Old postgresql version + cur.execute("select count(*) from pg_stat_activity where current_query =''") + else: + cur.execute("select count(*) from pg_stat_activity where state = 'idle';") + for row in cur.fetchall(): + yield ("psql.idle_connections", row[0]) + + +def psql_idle_tx_connections(cur): + vers = cur.connection.server_version + if vers <= 90125: + # Old postgresql version + cur.execute("select count(*) from pg_stat_activity where current_query =' in transaction'") + else: + cur.execute("select count(*) from pg_stat_activity where state = 'idle in transaction'") + for row in cur.fetchall(): + yield ('psql.idle_tx_connections', row[0]) + + +def psql_locks_waiting(cur): + vers = cur.connection.server_version + if vers < 90600: + query = "select count(*) from pg_stat_activity where waiting = 'true'" + else: + query = "select count(*) from pg_stat_activity where wait_event_type in ('Lock', 'LWLock')" + + cur.execute(query) + for row in cur.fetchall(): + yield ("psql.locks_waiting", row[0]) + + +def psql_slow_dml_queries(cur, limit=123): + vers = cur.connection.server_version + if vers <= 90125: + query = ( + "select count(*) from pg_stat_activity where current_query not like '%'" + " and now() - query_start > '%d sec'::interval " + " and current_query ~* '^(insert|update|delete)'") % limit + else: + query = ("select count(*) from pg_stat_activity where state = 'active' " + " and now() - query_start > '%d sec'::interval " + " and query ~* '^(insert|update|delete)'") % limit + cur.execute(query) + for row in cur.fetchall(): + yield ("psql.slow_dml_queries", row[0]) + + +def psql_slow_queries(cur, limit=123): + vers = cur.connection.server_version + if vers <= 90125: + query = ( + "select count(*) from pg_stat_activity where current_query not like '%'" + " and now() - query_start > '%d sec'::interval") % limit + else: + query = ("select count(*) from pg_stat_activity where state = 'active' " + " and now() - query_start > '%d sec'::interval") % limit + cur.execute(query) + for row in cur.fetchall(): + yield ("psql.slow_queries", row[0]) + + +def psql_slow_select_queries(cur, limit=123): + vers = cur.connection.server_version + if vers <= 90125: + query = ( + "select count(*) from pg_stat_activity where current_query ilike 'select%'" + " and now() - query_start > '%d sec'::interval") % limit + else: + query = ("select count(*) from pg_stat_activity where state = 'active' " + " and now() - query_start > '%d sec'::interval " + " and query ilike 'select%'") % limit + cur.execute(query) + for row in cur.fetchall(): + yield ("psql.slow_select_queries", row[0]) + + +def psql_server_maxcon(cur): + cur.execute("select setting::int from pg_settings where name = 'max_connections'") + for row in cur.fetchall(): + yield ("psql.server_maxcon", row[0]) + + +def psql_buffers_alloc(cur): + cur.execute("select buffers_alloc from pg_stat_bgwriter") + for row in cur.fetchall(): + yield ("psql.buffers_alloc", row[0]) + + +def psql_buffers_backend(cur): + cur.execute("select buffers_backend from pg_stat_bgwriter") + for row in cur.fetchall(): + yield ("psql.buffers_backend", row[0]) + + +def psql_buffers_backend_fsync(cur): + cur.execute("select buffers_backend_fsync from pg_stat_bgwriter") + for row in cur.fetchall(): + yield ("psql.buffers_backend_fsync", row[0]) + + +def psql_buffers_checkpoint(cur): + cur.execute("select buffers_checkpoint from pg_stat_bgwriter") + for row in cur.fetchall(): + yield ("psql.buffers_checkpoint", row[0]) + + +def psql_buffers_clean(cur): + cur.execute("select buffers_clean from pg_stat_bgwriter") + for row in cur.fetchall(): + yield ("psql.buffers_clean", row[0]) + + +def psql_checkpoints_req(cur): + cur.execute("select checkpoints_req from pg_stat_bgwriter") + for row in cur.fetchall(): + yield ("psql.checkpoints_req", row[0]) + + +def psql_checkpoints_timed(cur): + cur.execute("select checkpoints_timed from pg_stat_bgwriter") + for row in cur.fetchall(): + yield ("psql.checkpoints_timed", row[0]) + + +def psql_maxwritten_clean(cur): + cur.execute("select maxwritten_clean from pg_stat_bgwriter") + for row in cur.fetchall(): + yield ("psql.maxwritten_clean", row[0]) + + +def machine_is_primary(cur): + cur.execute("select (NOT(pg_is_in_recovery()))::int") + for row in cur.fetchall(): + yield ("psql.primary_server", row[0]) + + +def machine_is_standby(cur): + cur.execute("select pg_is_in_recovery()::int") + for row in cur.fetchall(): + yield ("psql.standby_server", row[0]) diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/pgstat.py b/build/lib.linux-x86_64-2.7/pgzabbix/pgstat.py new file mode 100644 index 0000000..2b312a2 --- /dev/null +++ b/build/lib.linux-x86_64-2.7/pgzabbix/pgstat.py @@ -0,0 +1,52 @@ + +""" + pg.sr.status: + psql.block_query + pgsql.get.pg.sr.status [scriptdir,confdir,host,agentd_conf] + sr.db.list.discovery [ scriptdir, confdir] + + psql.confl_bufferpin[DBNAME] + psql.confl_deadlock[DBNAME] + psql.confl_lock[DBNAME] + psql.confl_snapshot[DBNAME] + psql.confl_tablespace[DBNAME] + + sr.discovery[ scriptdir, confdir] + pgsql.get.pg.stat_replication + sr.status.discovery[scriptdir, confdir] + psql.replay_diff[SRCLIENT] + psql.sync_priority[SRCLIENT] + psql.write_diff[SRCLIENT] + + + + psql.running[pgscripdir, confdir, last, ] + psql.standby_server[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]",last,0]psql.standby_server[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}] + psql.tx_commited + psql.table_analyze_count[{#DBNAME},{#SCHEMANAME},{#TABLENAME}] + psql.tx_rolledback + + +""" diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/replication.py b/build/lib.linux-x86_64-2.7/pgzabbix/replication.py new file mode 100644 index 0000000..5fb1c46 --- /dev/null +++ b/build/lib.linux-x86_64-2.7/pgzabbix/replication.py @@ -0,0 +1,65 @@ +def view_select(cur): + REPL_VIEWS = ('pg_stat_replication', 'pg_stat_repl') + exists = ("select exists (select 1 from information_schema.tables where" + " table_schema = 'public' and " + " table_name = 'pg_stat_repl')") + cur.execute(exists) + check = cur.fetchone() + return REPL_VIEWS[check[0]] + + +def write_diff(cur): + vers = cur.connection.server_version + if vers <= 90124: + # Postgres 9.1 Doesn't support diffing the xlog locations + return + elif vers < 100000: + query = ("SELECT host(client_addr), " + " pg_xlog_location_diff(sent_location, write_location) " + " from {table}") + else: + query = ("SELECT host(client_addr), " + " pg_wal_lsn_diff(sent_lsn, write_lsn) " + " from {table}") + + cur.execute(query.format(table=view_select(cur))) + for row in cur.fetchall(): + yield ('psql.write_diff[%s]' % (row[0]), row[1]) + +def replay_diff(cur): + vers = cur.connection.server_version + if vers <= 90124: + # Postgres 9.1 Doesn't support diffing the xlog locations + return + elif vers < 100000: + query = ("SELECT host(client_addr), " + " pg_xlog_location_diff(sent_location, replay_location) " + " from {table}") + else: + query = ("SELECT host(client_addr), " + " pg_wal_lsn_diff(sent_lsn, replay_lsn) " + " from {table}") + + cur.execute(query.format(table=view_select(cur))) + for row in cur.fetchall(): + yield ('psql.replay_diff[%s]' % (row[0]), row[1]) + +def sync_priority(cur): + query = ("SELECT host(client_addr), " + " sync_priority " + " from {table}") + + cur.execute(query.format(table=view_select(cur))) + for row in cur.fetchall(): + yield ('psql.sync_priority[%s]' % (row[0]), row[1]) + +def sr_discovery(cur): + query = ("SELECT client_addr, state from {table};") + cur.execute(query.format(table=view_select(cur))) + for row in cur.fetchall(): + # pg_basebackup has no client_addr set when streaming + if row[0]: + yield { + "{#SRCLIENT}": row[0], + "{#MODE}": row[1], + } diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/table.py b/build/lib.linux-x86_64-2.7/pgzabbix/table.py new file mode 100644 index 0000000..82088b2 --- /dev/null +++ b/build/lib.linux-x86_64-2.7/pgzabbix/table.py @@ -0,0 +1,153 @@ + +def psql_table_analyze_count(cur): + query = "select current_database(), schemaname, relname, analyze_count from pg_stat_user_tables" + out = "psql.table_analyze_count[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_analyze_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_autoanalyze_count(cur): + query = "select current_database(), schemaname, relname, autoanalyze_count from pg_stat_user_tables" + out = "psql.table_autoanalyze_count[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_analyze_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_autovacuum_count(cur): + query = "select current_database(), schemaname, relname, autovacuum_count from pg_stat_user_tables" + out = "psql.table_autovacuum_count[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_autovacum_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_n_dead_tup(cur): + query = "select current_database(), schemaname, relname, n_dead_tup from pg_stat_user_tables" + out = "psql.table_n_dead_tup[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_n_dead_tup[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_n_tup_del(cur): + query = "select current_database(), schemaname, relname, n_tup_del from pg_stat_user_tables" + out = "psql.table_n_tup_del[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_n_tup_del[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_n_tup_hot_upd(cur): + query = "select current_database(), schemaname, relname, n_tup_hot_upd from pg_stat_user_tables" + out = "psql.table_n_tup_hot_upd[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_n_tup_hot_upd[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_idx_scan(cur): + query = "select current_database(), schemaname, relname, coalesce(idx_scan, 0) from pg_stat_user_tables" + out = "psql.table_idx_scan[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_idx_scan[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_seq_tup_read(cur): + query = "select current_database(), schemaname, relname, coalesce(seq_tup_read, 0) from pg_stat_user_tables" + out = "psql.table_seq_tup_read[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_seq_tup_read[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_idx_tup_fetch(cur): + query = "select current_database(), schemaname, relname, coalesce(idx_tup_fetch,0) from pg_stat_user_tables" + out = "psql.table_idx_tup_fetch[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_idx_tup_fetch[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_idx_tup_ins(cur): + query = "select current_database(), schemaname, relname, n_tup_ins from pg_stat_user_tables" + out = "psql.table_n_tup_ins[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_n_tup_ins[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_n_live_tup(cur): + query = "select current_database(), schemaname, relname, n_live_tup from pg_stat_user_tables" + out = "psql.table_n_live_tup[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_n_live_tup[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_seq_scan(cur): + query = "select current_database(), schemaname, relname, seq_scan from pg_stat_user_tables" + out = "psql.table_seq_scan[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_seq_scan[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_n_tup_upd(cur): + query = "select current_database(), schemaname, relname, n_tup_upd from pg_stat_user_tables" + out = "psql.table_n_tup_upd[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_n_tup_upd[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_vacuum_count(cur): + query = "select current_database(), schemaname, relname, vacuum_count from pg_stat_user_tables" + out = "psql.table_vacuum_count[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_vacuum_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_total_size(cur): + query = "select current_database(), schemaname, relname, pg_total_relation_size(relid) from pg_stat_user_tables" + out = "psql.table_total_size[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_total_size[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_heap_cachehit_ratio(cur): + query = ("select current_database(), schemaname, relname, " + " round(heap_blks_hit * 100.0 / greatest(heap_blks_hit + heap_blks_read, 1), 2) " + " from pg_statio_user_tables") + out = "psql.table_heap_cachehit_ratio[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + if row[3] is None: + continue + yield ('psql.table_heap_cachehit_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_idx_cachehit_ratio(cur): + query = ("select current_database(), schemaname, relname, " + " round(idx_blks_hit * 100.0 / greatest(idx_blks_hit + idx_blks_read, 1), 2) " + " from pg_statio_user_tables;") + out = "psql.table_idx_cachehit_ratio[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + if row[3] is None: + continue + yield ('psql.table_idx_cachehit_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] + + +def psql_table_garbage_ratio(cur): + query = ("select current_database(), schemaname, relname, " + " round(n_dead_tup / greatest(n_live_tup + n_dead_tup , 1), 2) " + " from pg_stat_user_tables;") + out = "psql.table_garbage_ratio[{0},{1},{2}]" + cur.execute(query) + for row in cur.fetchall(): + yield ('psql.table_garbage_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3] From a7f184c522580d46e952cd83d7e5944f04f96585 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Piotr=20Gocza=C5=82?= Date: Thu, 16 Jan 2020 12:49:45 +0100 Subject: [PATCH 4/5] Fixed problem with format function --- build/lib.linux-x86_64-2.7/pgzabbix/generic.py | 18 +++++++++--------- pgzabbix/generic.py | 18 +++++++++--------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/generic.py b/build/lib.linux-x86_64-2.7/pgzabbix/generic.py index be86363..22e6988 100644 --- a/build/lib.linux-x86_64-2.7/pgzabbix/generic.py +++ b/build/lib.linux-x86_64-2.7/pgzabbix/generic.py @@ -85,12 +85,12 @@ def psql_slow_dml_queries(cur, limit=123): if vers <= 90125: query = ( "select count(*) from pg_stat_activity where current_query not like '%'" - " and now() - query_start > '%d sec'::interval " - " and current_query ~* '^(insert|update|delete)'") % limit + " and now() - query_start > '{0} sec'::interval " + " and current_query ~* '^(insert|update|delete)'").format(limit) else: query = ("select count(*) from pg_stat_activity where state = 'active' " - " and now() - query_start > '%d sec'::interval " - " and query ~* '^(insert|update|delete)'") % limit + " and now() - query_start > '{0} sec'::interval " + " and query ~* '^(insert|update|delete)'").format(limit) cur.execute(query) for row in cur.fetchall(): yield ("psql.slow_dml_queries", row[0]) @@ -101,10 +101,10 @@ def psql_slow_queries(cur, limit=123): if vers <= 90125: query = ( "select count(*) from pg_stat_activity where current_query not like '%'" - " and now() - query_start > '%d sec'::interval") % limit + " and now() - query_start > '{0} sec'::interval").format(limit) else: query = ("select count(*) from pg_stat_activity where state = 'active' " - " and now() - query_start > '%d sec'::interval") % limit + " and now() - query_start > '{0} sec'::interval").format(limit) cur.execute(query) for row in cur.fetchall(): yield ("psql.slow_queries", row[0]) @@ -115,11 +115,11 @@ def psql_slow_select_queries(cur, limit=123): if vers <= 90125: query = ( "select count(*) from pg_stat_activity where current_query ilike 'select%'" - " and now() - query_start > '%d sec'::interval") % limit + " and now() - query_start > '{0} sec'::interval").format(limit) else: query = ("select count(*) from pg_stat_activity where state = 'active' " - " and now() - query_start > '%d sec'::interval " - " and query ilike 'select%'") % limit + " and now() - query_start > '{0} sec'::interval " + " and query ilike 'select%'").format(limit) cur.execute(query) for row in cur.fetchall(): yield ("psql.slow_select_queries", row[0]) diff --git a/pgzabbix/generic.py b/pgzabbix/generic.py index be86363..22e6988 100644 --- a/pgzabbix/generic.py +++ b/pgzabbix/generic.py @@ -85,12 +85,12 @@ def psql_slow_dml_queries(cur, limit=123): if vers <= 90125: query = ( "select count(*) from pg_stat_activity where current_query not like '%'" - " and now() - query_start > '%d sec'::interval " - " and current_query ~* '^(insert|update|delete)'") % limit + " and now() - query_start > '{0} sec'::interval " + " and current_query ~* '^(insert|update|delete)'").format(limit) else: query = ("select count(*) from pg_stat_activity where state = 'active' " - " and now() - query_start > '%d sec'::interval " - " and query ~* '^(insert|update|delete)'") % limit + " and now() - query_start > '{0} sec'::interval " + " and query ~* '^(insert|update|delete)'").format(limit) cur.execute(query) for row in cur.fetchall(): yield ("psql.slow_dml_queries", row[0]) @@ -101,10 +101,10 @@ def psql_slow_queries(cur, limit=123): if vers <= 90125: query = ( "select count(*) from pg_stat_activity where current_query not like '%'" - " and now() - query_start > '%d sec'::interval") % limit + " and now() - query_start > '{0} sec'::interval").format(limit) else: query = ("select count(*) from pg_stat_activity where state = 'active' " - " and now() - query_start > '%d sec'::interval") % limit + " and now() - query_start > '{0} sec'::interval").format(limit) cur.execute(query) for row in cur.fetchall(): yield ("psql.slow_queries", row[0]) @@ -115,11 +115,11 @@ def psql_slow_select_queries(cur, limit=123): if vers <= 90125: query = ( "select count(*) from pg_stat_activity where current_query ilike 'select%'" - " and now() - query_start > '%d sec'::interval") % limit + " and now() - query_start > '{0} sec'::interval").format(limit) else: query = ("select count(*) from pg_stat_activity where state = 'active' " - " and now() - query_start > '%d sec'::interval " - " and query ilike 'select%'") % limit + " and now() - query_start > '{0} sec'::interval " + " and query ilike 'select%'").format(limit) cur.execute(query) for row in cur.fetchall(): yield ("psql.slow_select_queries", row[0]) From 1252e1121729fc444d609509b708b947f2d987e6 Mon Sep 17 00:00:00 2001 From: Piotr Goczal Date: Thu, 10 Apr 2025 20:10:32 +0200 Subject: [PATCH 5/5] Fix for ptyhon 3.12 and new names for parameters in configparser module --- README.md | 2 +- build/{lib.linux-x86_64-2.7 => lib}/__main__.py | 0 .../pgzabbix/__init__.py | 0 .../{lib.linux-x86_64-2.7 => lib}/pgzabbix/cmd.py | 14 ++++++++++---- .../pgzabbix/database.py | 0 .../pgzabbix/discover.py | 0 .../{lib.linux-x86_64-2.7 => lib}/pgzabbix/dome.py | 0 .../pgzabbix/generic.py | 0 .../pgzabbix/pgstat.py | 0 .../pgzabbix/replication.py | 0 .../pgzabbix/table.py | 0 pgzabbix/cmd.py | 14 ++++++++++---- 12 files changed, 21 insertions(+), 9 deletions(-) rename build/{lib.linux-x86_64-2.7 => lib}/__main__.py (100%) rename build/{lib.linux-x86_64-2.7 => lib}/pgzabbix/__init__.py (100%) rename build/{lib.linux-x86_64-2.7 => lib}/pgzabbix/cmd.py (90%) rename build/{lib.linux-x86_64-2.7 => lib}/pgzabbix/database.py (100%) rename build/{lib.linux-x86_64-2.7 => lib}/pgzabbix/discover.py (100%) rename build/{lib.linux-x86_64-2.7 => lib}/pgzabbix/dome.py (100%) rename build/{lib.linux-x86_64-2.7 => lib}/pgzabbix/generic.py (100%) rename build/{lib.linux-x86_64-2.7 => lib}/pgzabbix/pgstat.py (100%) rename build/{lib.linux-x86_64-2.7 => lib}/pgzabbix/replication.py (100%) rename build/{lib.linux-x86_64-2.7 => lib}/pgzabbix/table.py (100%) diff --git a/README.md b/README.md index a04ad5c..b1476af 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -PgZabbix (python 2.6 compatible version) +PgZabbix (python 2.6, 3.X, 3.12 compatible version) Suck some stats out of Postgres, and inject it into Zabbix. Mostly based on pg_monz but not written in shell script. diff --git a/build/lib.linux-x86_64-2.7/__main__.py b/build/lib/__main__.py similarity index 100% rename from build/lib.linux-x86_64-2.7/__main__.py rename to build/lib/__main__.py diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/__init__.py b/build/lib/pgzabbix/__init__.py similarity index 100% rename from build/lib.linux-x86_64-2.7/pgzabbix/__init__.py rename to build/lib/pgzabbix/__init__.py diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/cmd.py b/build/lib/pgzabbix/cmd.py similarity index 90% rename from build/lib.linux-x86_64-2.7/pgzabbix/cmd.py rename to build/lib/pgzabbix/cmd.py index ace807a..6ec1462 100644 --- a/build/lib.linux-x86_64-2.7/pgzabbix/cmd.py +++ b/build/lib/pgzabbix/cmd.py @@ -7,10 +7,17 @@ except ImportError: import configparser - def parseConfig(inifile): - config = configparser.SafeConfigParser() - config.readfp(inifile) + try: + config = configparser.SafeConfigParser() + except AttributeError: + config = configparser.ConfigParser() + + try: + config.read_file(inifile) + except AttributeError: + config.readfp(inifile) + if not config.sections(): print("No sections in %s. Exiting" % inifile) exit(1) @@ -20,7 +27,6 @@ def parseConfig(inifile): opt[item] = config.get("postgres", item) return opt - def get_connection(config): conn_string = "host={host} dbname={dbname} user={user} password={password}" conn = psycopg2.connect(conn_string.format(**config)) diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/database.py b/build/lib/pgzabbix/database.py similarity index 100% rename from build/lib.linux-x86_64-2.7/pgzabbix/database.py rename to build/lib/pgzabbix/database.py diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/discover.py b/build/lib/pgzabbix/discover.py similarity index 100% rename from build/lib.linux-x86_64-2.7/pgzabbix/discover.py rename to build/lib/pgzabbix/discover.py diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/dome.py b/build/lib/pgzabbix/dome.py similarity index 100% rename from build/lib.linux-x86_64-2.7/pgzabbix/dome.py rename to build/lib/pgzabbix/dome.py diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/generic.py b/build/lib/pgzabbix/generic.py similarity index 100% rename from build/lib.linux-x86_64-2.7/pgzabbix/generic.py rename to build/lib/pgzabbix/generic.py diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/pgstat.py b/build/lib/pgzabbix/pgstat.py similarity index 100% rename from build/lib.linux-x86_64-2.7/pgzabbix/pgstat.py rename to build/lib/pgzabbix/pgstat.py diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/replication.py b/build/lib/pgzabbix/replication.py similarity index 100% rename from build/lib.linux-x86_64-2.7/pgzabbix/replication.py rename to build/lib/pgzabbix/replication.py diff --git a/build/lib.linux-x86_64-2.7/pgzabbix/table.py b/build/lib/pgzabbix/table.py similarity index 100% rename from build/lib.linux-x86_64-2.7/pgzabbix/table.py rename to build/lib/pgzabbix/table.py diff --git a/pgzabbix/cmd.py b/pgzabbix/cmd.py index ace807a..6ec1462 100644 --- a/pgzabbix/cmd.py +++ b/pgzabbix/cmd.py @@ -7,10 +7,17 @@ except ImportError: import configparser - def parseConfig(inifile): - config = configparser.SafeConfigParser() - config.readfp(inifile) + try: + config = configparser.SafeConfigParser() + except AttributeError: + config = configparser.ConfigParser() + + try: + config.read_file(inifile) + except AttributeError: + config.readfp(inifile) + if not config.sections(): print("No sections in %s. Exiting" % inifile) exit(1) @@ -20,7 +27,6 @@ def parseConfig(inifile): opt[item] = config.get("postgres", item) return opt - def get_connection(config): conn_string = "host={host} dbname={dbname} user={user} password={password}" conn = psycopg2.connect(conn_string.format(**config))