diff --git a/README.md b/README.md
index b1c3bda..b1476af 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-PgZabbix
+PgZabbix (python 2.6, 3.X, 3.12 compatible version)
Suck some stats out of Postgres, and inject it into Zabbix. Mostly based on
pg_monz but not written in shell script.
diff --git a/build/lib/__main__.py b/build/lib/__main__.py
new file mode 100644
index 0000000..7c410ce
--- /dev/null
+++ b/build/lib/__main__.py
@@ -0,0 +1,4 @@
+import pgzabbix.cmd
+
+if __name__ == "__main__":
+ pgzabbix.cmd.main()
diff --git a/build/lib/pgzabbix/__init__.py b/build/lib/pgzabbix/__init__.py
new file mode 100644
index 0000000..820e814
--- /dev/null
+++ b/build/lib/pgzabbix/__init__.py
@@ -0,0 +1,169 @@
+import json
+import pgzabbix.generic
+import pgzabbix.discover
+import pgzabbix.database
+import pgzabbix.replication
+import pgzabbix.table
+import psycopg2
+
+
+def all_generic(cur):
+ for fun in (
+ pgzabbix.generic.psql_running,
+ pgzabbix.generic.machine_is_primary,
+ pgzabbix.generic.machine_is_standby,
+ pgzabbix.generic.psql_active_connections,
+ pgzabbix.generic.psql_buffers_alloc,
+ pgzabbix.generic.psql_buffers_backend,
+ pgzabbix.generic.psql_buffers_backend_fsync,
+ pgzabbix.generic.psql_buffers_checkpoint,
+ pgzabbix.generic.psql_buffers_clean,
+ pgzabbix.generic.psql_checkpoints_req,
+ pgzabbix.generic.psql_checkpoints_timed,
+ pgzabbix.generic.psql_idle_connections,
+ pgzabbix.generic.psql_idle_tx_connections,
+ pgzabbix.generic.psql_locks_waiting,
+ pgzabbix.generic.psql_maxwritten_clean,
+ pgzabbix.generic.psql_server_connections,
+ pgzabbix.generic.psql_server_maxcon,
+ pgzabbix.generic.psql_slow_dml_queries,
+ pgzabbix.generic.psql_slow_queries,
+ pgzabbix.generic.psql_slow_select_queries,
+ pgzabbix.generic.psql_tx_commited,
+ pgzabbix.generic.psql_tx_rolledback,
+ ):
+ for key, val in fun(cur):
+ print("- %s %s" % (key, val))
+
+
+def all_perdb(cur):
+ for fun in (
+ pgzabbix.database.psql_db_size,
+ pgzabbix.database.psql_db_garbage_ratio,
+ pgzabbix.database.confl_tablespace,
+ pgzabbix.database.confl_lock,
+ pgzabbix.database.confl_snapshot,
+ pgzabbix.database.confl_bufferpin,
+ pgzabbix.database.confl_deadlock,
+ pgzabbix.database.db_tx_commited,
+ pgzabbix.database.db_deadlocks,
+ pgzabbix.database.db_tx_rolledback,
+ pgzabbix.database.db_temp_bytes,
+ pgzabbix.database.db_deleted,
+ pgzabbix.database.db_fetched,
+ pgzabbix.database.db_inserted,
+ pgzabbix.database.db_returned,
+ pgzabbix.database.db_updated,
+ pgzabbix.database.db_connections,
+ pgzabbix.database.db_cachehit_ratio,
+ ):
+ for key, val in fun(cur):
+ print("- %s %s" % (key, val))
+
+
+def all_sr(cur):
+ for fun in (
+ pgzabbix.replication.write_diff,
+ pgzabbix.replication.replay_diff,
+ pgzabbix.replication.sync_priority,
+ ):
+ for key, val in fun(cur):
+ print("- %s %s" % (key, val))
+
+
+def current_tables(cur):
+ for fun in (
+ pgzabbix.table.psql_table_analyze_count,
+ pgzabbix.table.psql_table_autoanalyze_count,
+ pgzabbix.table.psql_table_autovacuum_count,
+ pgzabbix.table.psql_table_garbage_ratio,
+ pgzabbix.table.psql_table_heap_cachehit_ratio,
+ pgzabbix.table.psql_table_idx_cachehit_ratio,
+ pgzabbix.table.psql_table_idx_scan,
+ pgzabbix.table.psql_table_idx_tup_fetch,
+ pgzabbix.table.psql_table_idx_tup_ins,
+ pgzabbix.table.psql_table_n_dead_tup,
+ pgzabbix.table.psql_table_n_live_tup,
+ pgzabbix.table.psql_table_n_tup_del,
+ pgzabbix.table.psql_table_n_tup_hot_upd,
+ pgzabbix.table.psql_table_n_tup_upd,
+ pgzabbix.table.psql_table_seq_scan,
+ pgzabbix.table.psql_table_seq_tup_read,
+ pgzabbix.table.psql_table_total_size,
+ pgzabbix.table.psql_table_vacuum_count,
+ ):
+ for line in fun(cur):
+ yield line
+
+
+def to_zbx(thelist):
+ obj = {}
+ obj["data"] = list(thelist)
+
+ # Zabbix implementation of json is sensitive to whitespace,
+ # so no whitespace separation is easiest way to please it.
+ return json.dumps(obj, separators=(",", ":"))
+
+
+def discover_sr(cur):
+ data = list(pgzabbix.replication.sr_discovery(cur))
+ data = to_zbx(data)
+ print(" - %s %s" % ("psql.sr.discovery", data))
+
+
+def discover_db(cur):
+ data = pgzabbix.discover.db_discovery(cur)
+ data = to_zbx(data)
+ print(" - %s %s" % ("psql.discovery", data))
+
+
+def list_databases_we_can_connect_to_and_fuck_off(cur):
+ query = ("select datname, pg_database_size(datname) from pg_database "
+ " where datistemplate = 'f' and "
+ " has_database_privilege(datname, 'CONNECT')")
+ cur.execute(query)
+ return [x[0] for x in cur]
+
+
+def foreach_db(config, perdb_function):
+ conn_string = "host={host} user={user} password={password} dbname={dbname}"
+ con = psycopg2.connect(conn_string.format(**config))
+ con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ cur = con.cursor()
+
+ databases = list_databases_we_can_connect_to_and_fuck_off(cur)
+ cur.close()
+ con.close()
+
+ for db in databases:
+ config["dbname"] = db
+ con = psycopg2.connect(conn_string.format(**config))
+ con.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ cur = con.cursor()
+ # Py2: yield from perdb_function
+ for line in perdb_function(cur):
+ yield line
+ cur.close()
+ con.close()
+
+
+def tables_stat(config):
+ for key, val in foreach_db(config, current_tables):
+ print("- %s %s" % (key, val))
+
+
+def discover_tables(config):
+ """ This function is _special_ in the not quite retarded sense
+ Pay close attention to the fact that it doesn't take a connection, but
+ takes a configuration for connection options"""
+ # Note that zabbix is sometimes retarded and truncates long messages
+ # then complains about invalid (truncated) json
+ data = list(foreach_db(config, pgzabbix.discover.tables_discovery))
+ data = to_zbx(data)
+ print(" - %s %s" % ("psql.table.discovery", data))
+
+
+def discover_all(config, cur):
+ discover_sr(cur)
+ discover_db(cur)
+ discover_tables(config)
diff --git a/build/lib/pgzabbix/cmd.py b/build/lib/pgzabbix/cmd.py
new file mode 100644
index 0000000..6ec1462
--- /dev/null
+++ b/build/lib/pgzabbix/cmd.py
@@ -0,0 +1,89 @@
+# vim: set nobomb:
+import argparse
+import psycopg2
+import pgzabbix
+try:
+ import ConfigParser as configparser
+except ImportError:
+ import configparser
+
+def parseConfig(inifile):
+ try:
+ config = configparser.SafeConfigParser()
+ except AttributeError:
+ config = configparser.ConfigParser()
+
+ try:
+ config.read_file(inifile)
+ except AttributeError:
+ config.readfp(inifile)
+
+ if not config.sections():
+ print("No sections in %s. Exiting" % inifile)
+ exit(1)
+
+ opt = {}
+ for item in ('host', 'password', 'dbname', 'user'):
+ opt[item] = config.get("postgres", item)
+ return opt
+
+def get_connection(config):
+ conn_string = "host={host} dbname={dbname} user={user} password={password}"
+ conn = psycopg2.connect(conn_string.format(**config))
+ conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
+ return conn
+
+
+def commandline():
+ parser = argparse.ArgumentParser(
+ prog="PgZabbix",
+ description="Fiddle with Postgres for Zabbix"
+ )
+ parser.add_argument('--config',
+ nargs='?',
+ type=argparse.FileType('r'),
+ default='/etc/pgzabbix.ini'
+ )
+ group = parser.add_mutually_exclusive_group(required=False)
+ group.add_argument('--read', action='store_true', default=False)
+ group.add_argument('--tables', action='store_true', default=False)
+ group.add_argument('--discover', action='store_true', default=False)
+ group.add_argument('--discover_tables', action='store_true', default=False)
+ group.add_argument('--discover_db', action='store_true', default=False)
+ group.add_argument('--discover_sr', action='store_true', default=False)
+ args = parser.parse_args()
+ return args
+
+
+def main():
+ args = commandline()
+ config = parseConfig(args.config)
+ conn = get_connection(config)
+ cur = conn.cursor()
+
+ if args.read:
+ pgzabbix.all_generic(cur)
+ pgzabbix.all_perdb(cur)
+ pgzabbix.all_sr(cur)
+
+ if args.tables:
+ pgzabbix.tables_stat(config)
+
+ if args.discover_db:
+ pgzabbix.discover_db(cur)
+
+ if args.discover_sr:
+ pgzabbix.discover_sr(cur)
+
+ if args.discover_tables:
+ pgzabbix.discover_tables(config)
+
+ if args.discover:
+ pgzabbix.discover_all(config, cur)
+
+ cur.close()
+ conn.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/build/lib/pgzabbix/database.py b/build/lib/pgzabbix/database.py
new file mode 100644
index 0000000..4922f6e
--- /dev/null
+++ b/build/lib/pgzabbix/database.py
@@ -0,0 +1,149 @@
+"""
+TODO
+
+# Needs to connect to the DB to read
+UserParameter=pgsql.get.pg.size[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_userdb_funcs.sh pg.size "$2" "$3" "$4" "$5"
+
+# Needs to connect to the DB, and to get the table name
+UserParameter=pgsql.get.pg.stat_table[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_tbl_funcs.sh pg.stat_table "$2" "$3" "$4" "$5" "$6" "$7"
+"""
+
+
+def psql_db_size(cur):
+ query = ("select datname, pg_database_size(datname) from pg_database "
+ " where datistemplate = 'f' and "
+ " has_database_privilege(datname, 'CONNECT')")
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ("psql.db_size[%s]" % (row[0]), row[1])
+
+def psql_db_garbage_ratio(cur):
+ return ()
+# cur.execute("select datname, pg_database_size(datname) from pg_database "
+# " where datistemplate = 'f'")
+# for row in cur.fetchall():
+# yield ("psql.db_size[{0}]".format(row[0]), row[1])
+
+
+def confl_tablespace(cur):
+ cur.execute("select datname, confl_tablespace from pg_stat_database_conflicts"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.confl_tablespace[%s]' % (row[0]), row[1])
+
+def confl_lock(cur):
+ cur.execute("select datname, confl_lock from pg_stat_database_conflicts "
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.confl_lock[%s]' % (row[0]), row[1])
+
+def confl_snapshot(cur):
+ cur.execute("select datname, confl_snapshot from pg_stat_database_conflicts"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.confl_snapshot[%s]' % (row[0]), row[1])
+
+def confl_bufferpin(cur):
+ cur.execute("select datname, confl_bufferpin from pg_stat_database_conflicts"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.confl_bufferpin[%s]' % (row[0]), row[1])
+
+def confl_deadlock(cur):
+ cur.execute("select datname, confl_deadlock from pg_stat_database_conflicts"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+
+ for row in cur.fetchall():
+ yield ('psql.confl_deadlock[%s]' % (row[0]), row[1])
+
+def db_tx_commited(cur):
+ cur.execute("select datname, xact_commit from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.db_tx_commited[%s]' % (row[0]), row[1])
+
+def db_deadlocks(cur):
+ vers = cur.connection.server_version
+ if vers <= 90125:
+ # Old postgresql version
+ return
+ cur.execute("select datname, deadlocks from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+
+ for row in cur.fetchall():
+ yield ('psql.db_deadlocks[%s]' % (row[0]), row[1])
+
+def db_tx_rolledback(cur):
+ cur.execute("select datname, xact_rollback from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.db_tx_rolledback[%s]' % (row[0]), row[1])
+
+def db_temp_bytes(cur):
+ vers = cur.connection.server_version
+ if vers <= 90125:
+ # Old postgresql version
+ return
+ cur.execute("select datname, temp_bytes from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.db_temp_bytes[%s]' %(row[0]), row[1])
+
+def db_deleted(cur):
+ cur.execute("select datname, tup_deleted from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.db_deleted[%s]' % (row[0]), row[1])
+
+def db_fetched(cur):
+ cur.execute("select datname, tup_fetched from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.db_fetched[%s]' % (row[0]), row[1])
+
+def db_inserted(cur):
+ cur.execute("select datname, tup_inserted from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.db_inserted[%s]' % (row[0]), row[1])
+
+def db_returned(cur):
+ cur.execute("select datname, tup_returned from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.db_returned[%s]' % (row[0]), row[1])
+
+def db_updated(cur):
+ cur.execute("select datname, tup_updated from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.db_updated[%s]' % (row[0]), row[1])
+
+def db_connections(cur):
+ cur.execute("select datname, numbackends from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.db_connections[%s]' % (row[0]), row[1])
+
+def db_cachehit_ratio(cur):
+ cur.execute("select datname, round(blks_hit * 100.0 / (blks_hit + greatest(blks_read, 1)), 2)"
+ " from pg_stat_database"
+ " inner join pg_database using (datname)"
+ " where pg_database.datistemplate=False;")
+ for row in cur.fetchall():
+ yield ('psql.cachehit_ratio[%s]' % (row[0]), row[1])
diff --git a/build/lib/pgzabbix/discover.py b/build/lib/pgzabbix/discover.py
new file mode 100644
index 0000000..49e8b6d
--- /dev/null
+++ b/build/lib/pgzabbix/discover.py
@@ -0,0 +1,33 @@
+def list_databases(cur):
+ cur.execute("SELECT datname from pg_database where datistemplate = 'f'")
+ for row in cur.fetchall():
+ yield row[0]
+
+
+# UserParameter=db.list.discovery[*],"$1"/find_dbname.sh "$2"
+def db_discovery(cur):
+ for database in list_databases(cur):
+ yield {"{#DBNAME}": database}
+
+
+def tables_discovery(cur):
+ cur.execute("select current_database(), schemaname, tablename "
+ " from pg_tables "
+ " where schemaname not in ('pg_catalog','information_schema')")
+ for row in cur.fetchall():
+ yield {
+ "{#DBNAME}": row[0],
+ "{#SCHEMANAME}": row[1],
+ "{#TABLENAME}": row[2],
+ }
+
+
+"""
+ proc.num[postgres,,,wal receiver]
+ proc.num[postgres,,,wal sender]
+ pgsql.get.pg.sr.status[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF}]
+ sr.db.list.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]
+ sr.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]
+ pgsql.get.pg.stat_replication[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF},{#MODE}]
+ sr.status.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]
+"""
diff --git a/build/lib/pgzabbix/dome.py b/build/lib/pgzabbix/dome.py
new file mode 100644
index 0000000..31e45cf
--- /dev/null
+++ b/build/lib/pgzabbix/dome.py
@@ -0,0 +1,27 @@
+"""
+TODO
+
+# Needs to connect to the DB to read
+UserParameter=pgsql.get.pg.size[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_userdb_funcs.sh pg.size "$2" "$3" "$4" "$5"
+
+# Needs to connect to the DB, and to get the table name
+UserParameter=pgsql.get.pg.stat_table[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_tbl_funcs.sh pg.stat_table "$2" "$3" "$4" "$5" "$6" "$7"
+
+"""
+
+"""
+ proc.num[postgres,,,wal receiver]
+ proc.num[postgres,,,wal sender]
+
+ pgsql.get.pg.sr.status[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF}]
+ sr.db.list.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]
+ psql.confl_bufferpin[{#DBNAME}]
+ psql.confl_deadlock[{#DBNAME}]
+ psql.confl_lock[{#DBNAME}]
+ psql.confl_snapshot[{#DBNAME}]
+ psql.confl_tablespace[{#DBNAME}]
+ sr.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]
+ pgsql.get.pg.stat_replication[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR},{HOST.HOST},{$ZABBIX_AGENTD_CONF},{#MODE}]
+ sr.status.discovery[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]
+
+"""
diff --git a/build/lib/pgzabbix/generic.py b/build/lib/pgzabbix/generic.py
new file mode 100644
index 0000000..22e6988
--- /dev/null
+++ b/build/lib/pgzabbix/generic.py
@@ -0,0 +1,191 @@
+"""
+TODO
+
+# Needs to connect to the DB to read
+UserParameter=pgsql.get.pg.size[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_userdb_funcs.sh pg.size "$2" "$3" "$4" "$5"
+
+# Needs to connect to the DB, and to get the table name
+UserParameter=pgsql.get.pg.stat_table[*-,-,hostname,-,dbname,schemaname, tablename],"$1"/pgsql_tbl_funcs.sh pg.stat_table "$2" "$3" "$4" "$5" "$6" "$7"
+
+"""
+
+
+def psql_running(cur):
+ """ Basic check """
+ cur.execute("select 1")
+ for row in cur.fetchall():
+ yield ("psql.running", row[0])
+
+
+def psql_tx_commited(cur):
+ cur.execute("select sum(xact_commit) from pg_stat_database")
+ for row in cur.fetchall():
+ yield ("psql.tx_commited", row[0])
+
+
+def psql_tx_rolledback(cur):
+ cur.execute("select sum(xact_rollback) from pg_stat_database")
+ for row in cur.fetchall():
+ yield ("psql.tx_rolledback", row[0])
+
+
+def psql_active_connections(cur):
+ vers = cur.connection.server_version
+ if vers <= 90125:
+ # Old postgresql version
+ cur.execute("select count(*) from pg_stat_activity where current_query <> ''")
+ else:
+ cur.execute("select count(*) from pg_stat_activity where state = 'active'")
+ for row in cur.fetchall():
+ yield ("psql.active_connections", row[0])
+
+
+def psql_server_connections(cur):
+ cur.execute("select count(*) from pg_stat_activity")
+ for row in cur.fetchall():
+ yield ("psql.server_connections", row[0])
+
+
+def psql_idle_connections(cur):
+ vers = cur.connection.server_version
+ if vers <= 90125:
+ # Old postgresql version
+ cur.execute("select count(*) from pg_stat_activity where current_query =''")
+ else:
+ cur.execute("select count(*) from pg_stat_activity where state = 'idle';")
+ for row in cur.fetchall():
+ yield ("psql.idle_connections", row[0])
+
+
+def psql_idle_tx_connections(cur):
+ vers = cur.connection.server_version
+ if vers <= 90125:
+ # Old postgresql version
+ cur.execute("select count(*) from pg_stat_activity where current_query =' in transaction'")
+ else:
+ cur.execute("select count(*) from pg_stat_activity where state = 'idle in transaction'")
+ for row in cur.fetchall():
+ yield ('psql.idle_tx_connections', row[0])
+
+
+def psql_locks_waiting(cur):
+ vers = cur.connection.server_version
+ if vers < 90600:
+ query = "select count(*) from pg_stat_activity where waiting = 'true'"
+ else:
+ query = "select count(*) from pg_stat_activity where wait_event_type in ('Lock', 'LWLock')"
+
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ("psql.locks_waiting", row[0])
+
+
+def psql_slow_dml_queries(cur, limit=123):
+ vers = cur.connection.server_version
+ if vers <= 90125:
+ query = (
+ "select count(*) from pg_stat_activity where current_query not like '%'"
+ " and now() - query_start > '{0} sec'::interval "
+ " and current_query ~* '^(insert|update|delete)'").format(limit)
+ else:
+ query = ("select count(*) from pg_stat_activity where state = 'active' "
+ " and now() - query_start > '{0} sec'::interval "
+ " and query ~* '^(insert|update|delete)'").format(limit)
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ("psql.slow_dml_queries", row[0])
+
+
+def psql_slow_queries(cur, limit=123):
+ vers = cur.connection.server_version
+ if vers <= 90125:
+ query = (
+ "select count(*) from pg_stat_activity where current_query not like '%'"
+ " and now() - query_start > '{0} sec'::interval").format(limit)
+ else:
+ query = ("select count(*) from pg_stat_activity where state = 'active' "
+ " and now() - query_start > '{0} sec'::interval").format(limit)
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ("psql.slow_queries", row[0])
+
+
+def psql_slow_select_queries(cur, limit=123):
+ vers = cur.connection.server_version
+ if vers <= 90125:
+ query = (
+ "select count(*) from pg_stat_activity where current_query ilike 'select%'"
+ " and now() - query_start > '{0} sec'::interval").format(limit)
+ else:
+ query = ("select count(*) from pg_stat_activity where state = 'active' "
+ " and now() - query_start > '{0} sec'::interval "
+ " and query ilike 'select%'").format(limit)
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ("psql.slow_select_queries", row[0])
+
+
+def psql_server_maxcon(cur):
+ cur.execute("select setting::int from pg_settings where name = 'max_connections'")
+ for row in cur.fetchall():
+ yield ("psql.server_maxcon", row[0])
+
+
+def psql_buffers_alloc(cur):
+ cur.execute("select buffers_alloc from pg_stat_bgwriter")
+ for row in cur.fetchall():
+ yield ("psql.buffers_alloc", row[0])
+
+
+def psql_buffers_backend(cur):
+ cur.execute("select buffers_backend from pg_stat_bgwriter")
+ for row in cur.fetchall():
+ yield ("psql.buffers_backend", row[0])
+
+
+def psql_buffers_backend_fsync(cur):
+ cur.execute("select buffers_backend_fsync from pg_stat_bgwriter")
+ for row in cur.fetchall():
+ yield ("psql.buffers_backend_fsync", row[0])
+
+
+def psql_buffers_checkpoint(cur):
+ cur.execute("select buffers_checkpoint from pg_stat_bgwriter")
+ for row in cur.fetchall():
+ yield ("psql.buffers_checkpoint", row[0])
+
+
+def psql_buffers_clean(cur):
+ cur.execute("select buffers_clean from pg_stat_bgwriter")
+ for row in cur.fetchall():
+ yield ("psql.buffers_clean", row[0])
+
+
+def psql_checkpoints_req(cur):
+ cur.execute("select checkpoints_req from pg_stat_bgwriter")
+ for row in cur.fetchall():
+ yield ("psql.checkpoints_req", row[0])
+
+
+def psql_checkpoints_timed(cur):
+ cur.execute("select checkpoints_timed from pg_stat_bgwriter")
+ for row in cur.fetchall():
+ yield ("psql.checkpoints_timed", row[0])
+
+
+def psql_maxwritten_clean(cur):
+ cur.execute("select maxwritten_clean from pg_stat_bgwriter")
+ for row in cur.fetchall():
+ yield ("psql.maxwritten_clean", row[0])
+
+
+def machine_is_primary(cur):
+ cur.execute("select (NOT(pg_is_in_recovery()))::int")
+ for row in cur.fetchall():
+ yield ("psql.primary_server", row[0])
+
+
+def machine_is_standby(cur):
+ cur.execute("select pg_is_in_recovery()::int")
+ for row in cur.fetchall():
+ yield ("psql.standby_server", row[0])
diff --git a/build/lib/pgzabbix/pgstat.py b/build/lib/pgzabbix/pgstat.py
new file mode 100644
index 0000000..2b312a2
--- /dev/null
+++ b/build/lib/pgzabbix/pgstat.py
@@ -0,0 +1,52 @@
+
+"""
+ pg.sr.status:
+ psql.block_query
+ pgsql.get.pg.sr.status [scriptdir,confdir,host,agentd_conf]
+ sr.db.list.discovery [ scriptdir, confdir]
+
+ psql.confl_bufferpin[DBNAME]
+ psql.confl_deadlock[DBNAME]
+ psql.confl_lock[DBNAME]
+ psql.confl_snapshot[DBNAME]
+ psql.confl_tablespace[DBNAME]
+
+ sr.discovery[ scriptdir, confdir]
+ pgsql.get.pg.stat_replication
+ sr.status.discovery[scriptdir, confdir]
+ psql.replay_diff[SRCLIENT]
+ psql.sync_priority[SRCLIENT]
+ psql.write_diff[SRCLIENT]
+
+
+
+ psql.running[pgscripdir, confdir, last, ]
+ psql.standby_server[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]",last,0]psql.standby_server[{$PGSCRIPTDIR},{$PGSCRIPT_CONFDIR}]
+ psql.tx_commited
+ psql.table_analyze_count[{#DBNAME},{#SCHEMANAME},{#TABLENAME}]
+ psql.tx_rolledback
+
+
+"""
diff --git a/build/lib/pgzabbix/replication.py b/build/lib/pgzabbix/replication.py
new file mode 100644
index 0000000..5fb1c46
--- /dev/null
+++ b/build/lib/pgzabbix/replication.py
@@ -0,0 +1,65 @@
+def view_select(cur):
+ REPL_VIEWS = ('pg_stat_replication', 'pg_stat_repl')
+ exists = ("select exists (select 1 from information_schema.tables where"
+ " table_schema = 'public' and "
+ " table_name = 'pg_stat_repl')")
+ cur.execute(exists)
+ check = cur.fetchone()
+ return REPL_VIEWS[check[0]]
+
+
+def write_diff(cur):
+ vers = cur.connection.server_version
+ if vers <= 90124:
+ # Postgres 9.1 Doesn't support diffing the xlog locations
+ return
+ elif vers < 100000:
+ query = ("SELECT host(client_addr), "
+ " pg_xlog_location_diff(sent_location, write_location) "
+ " from {table}")
+ else:
+ query = ("SELECT host(client_addr), "
+ " pg_wal_lsn_diff(sent_lsn, write_lsn) "
+ " from {table}")
+
+ cur.execute(query.format(table=view_select(cur)))
+ for row in cur.fetchall():
+ yield ('psql.write_diff[%s]' % (row[0]), row[1])
+
+def replay_diff(cur):
+ vers = cur.connection.server_version
+ if vers <= 90124:
+ # Postgres 9.1 Doesn't support diffing the xlog locations
+ return
+ elif vers < 100000:
+ query = ("SELECT host(client_addr), "
+ " pg_xlog_location_diff(sent_location, replay_location) "
+ " from {table}")
+ else:
+ query = ("SELECT host(client_addr), "
+ " pg_wal_lsn_diff(sent_lsn, replay_lsn) "
+ " from {table}")
+
+ cur.execute(query.format(table=view_select(cur)))
+ for row in cur.fetchall():
+ yield ('psql.replay_diff[%s]' % (row[0]), row[1])
+
+def sync_priority(cur):
+ query = ("SELECT host(client_addr), "
+ " sync_priority "
+ " from {table}")
+
+ cur.execute(query.format(table=view_select(cur)))
+ for row in cur.fetchall():
+ yield ('psql.sync_priority[%s]' % (row[0]), row[1])
+
+def sr_discovery(cur):
+ query = ("SELECT client_addr, state from {table};")
+ cur.execute(query.format(table=view_select(cur)))
+ for row in cur.fetchall():
+ # pg_basebackup has no client_addr set when streaming
+ if row[0]:
+ yield {
+ "{#SRCLIENT}": row[0],
+ "{#MODE}": row[1],
+ }
diff --git a/build/lib/pgzabbix/table.py b/build/lib/pgzabbix/table.py
new file mode 100644
index 0000000..82088b2
--- /dev/null
+++ b/build/lib/pgzabbix/table.py
@@ -0,0 +1,153 @@
+
+def psql_table_analyze_count(cur):
+ query = "select current_database(), schemaname, relname, analyze_count from pg_stat_user_tables"
+ out = "psql.table_analyze_count[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_analyze_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_autoanalyze_count(cur):
+ query = "select current_database(), schemaname, relname, autoanalyze_count from pg_stat_user_tables"
+ out = "psql.table_autoanalyze_count[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_analyze_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_autovacuum_count(cur):
+ query = "select current_database(), schemaname, relname, autovacuum_count from pg_stat_user_tables"
+ out = "psql.table_autovacuum_count[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_autovacum_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_n_dead_tup(cur):
+ query = "select current_database(), schemaname, relname, n_dead_tup from pg_stat_user_tables"
+ out = "psql.table_n_dead_tup[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_n_dead_tup[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_n_tup_del(cur):
+ query = "select current_database(), schemaname, relname, n_tup_del from pg_stat_user_tables"
+ out = "psql.table_n_tup_del[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_n_tup_del[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_n_tup_hot_upd(cur):
+ query = "select current_database(), schemaname, relname, n_tup_hot_upd from pg_stat_user_tables"
+ out = "psql.table_n_tup_hot_upd[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_n_tup_hot_upd[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_idx_scan(cur):
+ query = "select current_database(), schemaname, relname, coalesce(idx_scan, 0) from pg_stat_user_tables"
+ out = "psql.table_idx_scan[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_idx_scan[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_seq_tup_read(cur):
+ query = "select current_database(), schemaname, relname, coalesce(seq_tup_read, 0) from pg_stat_user_tables"
+ out = "psql.table_seq_tup_read[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_seq_tup_read[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_idx_tup_fetch(cur):
+ query = "select current_database(), schemaname, relname, coalesce(idx_tup_fetch,0) from pg_stat_user_tables"
+ out = "psql.table_idx_tup_fetch[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_idx_tup_fetch[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_idx_tup_ins(cur):
+ query = "select current_database(), schemaname, relname, n_tup_ins from pg_stat_user_tables"
+ out = "psql.table_n_tup_ins[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_n_tup_ins[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_n_live_tup(cur):
+ query = "select current_database(), schemaname, relname, n_live_tup from pg_stat_user_tables"
+ out = "psql.table_n_live_tup[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_n_live_tup[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_seq_scan(cur):
+ query = "select current_database(), schemaname, relname, seq_scan from pg_stat_user_tables"
+ out = "psql.table_seq_scan[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_seq_scan[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_n_tup_upd(cur):
+ query = "select current_database(), schemaname, relname, n_tup_upd from pg_stat_user_tables"
+ out = "psql.table_n_tup_upd[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_n_tup_upd[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_vacuum_count(cur):
+ query = "select current_database(), schemaname, relname, vacuum_count from pg_stat_user_tables"
+ out = "psql.table_vacuum_count[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_vacuum_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_total_size(cur):
+ query = "select current_database(), schemaname, relname, pg_total_relation_size(relid) from pg_stat_user_tables"
+ out = "psql.table_total_size[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_total_size[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_heap_cachehit_ratio(cur):
+ query = ("select current_database(), schemaname, relname, "
+ " round(heap_blks_hit * 100.0 / greatest(heap_blks_hit + heap_blks_read, 1), 2) "
+ " from pg_statio_user_tables")
+ out = "psql.table_heap_cachehit_ratio[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ if row[3] is None:
+ continue
+ yield ('psql.table_heap_cachehit_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_idx_cachehit_ratio(cur):
+ query = ("select current_database(), schemaname, relname, "
+ " round(idx_blks_hit * 100.0 / greatest(idx_blks_hit + idx_blks_read, 1), 2) "
+ " from pg_statio_user_tables;")
+ out = "psql.table_idx_cachehit_ratio[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ if row[3] is None:
+ continue
+ yield ('psql.table_idx_cachehit_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
+
+
+def psql_table_garbage_ratio(cur):
+ query = ("select current_database(), schemaname, relname, "
+ " round(n_dead_tup / greatest(n_live_tup + n_dead_tup , 1), 2) "
+ " from pg_stat_user_tables;")
+ out = "psql.table_garbage_ratio[{0},{1},{2}]"
+ cur.execute(query)
+ for row in cur.fetchall():
+ yield ('psql.table_garbage_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
diff --git a/pgzabbix/__init__.py b/pgzabbix/__init__.py
index 227be80..820e814 100644
--- a/pgzabbix/__init__.py
+++ b/pgzabbix/__init__.py
@@ -33,7 +33,7 @@ def all_generic(cur):
pgzabbix.generic.psql_tx_rolledback,
):
for key, val in fun(cur):
- print("- {0} {1}".format(key, val))
+ print("- %s %s" % (key, val))
def all_perdb(cur):
@@ -58,7 +58,7 @@ def all_perdb(cur):
pgzabbix.database.db_cachehit_ratio,
):
for key, val in fun(cur):
- print("- {0} {1}".format(key, val))
+ print("- %s %s" % (key, val))
def all_sr(cur):
@@ -68,7 +68,7 @@ def all_sr(cur):
pgzabbix.replication.sync_priority,
):
for key, val in fun(cur):
- print("- {0} {1}".format(key, val))
+ print("- %s %s" % (key, val))
def current_tables(cur):
@@ -108,13 +108,13 @@ def to_zbx(thelist):
def discover_sr(cur):
data = list(pgzabbix.replication.sr_discovery(cur))
data = to_zbx(data)
- print(" - {0} {1}".format("psql.sr.discovery", data))
+ print(" - %s %s" % ("psql.sr.discovery", data))
def discover_db(cur):
data = pgzabbix.discover.db_discovery(cur)
data = to_zbx(data)
- print(" - {0} {1}".format("psql.discovery", data))
+ print(" - %s %s" % ("psql.discovery", data))
def list_databases_we_can_connect_to_and_fuck_off(cur):
@@ -149,7 +149,7 @@ def foreach_db(config, perdb_function):
def tables_stat(config):
for key, val in foreach_db(config, current_tables):
- print("- {0} {1}".format(key, val))
+ print("- %s %s" % (key, val))
def discover_tables(config):
@@ -160,7 +160,7 @@ def discover_tables(config):
# then complains about invalid (truncated) json
data = list(foreach_db(config, pgzabbix.discover.tables_discovery))
data = to_zbx(data)
- print(" - {0} {1}".format("psql.table.discovery", data))
+ print(" - %s %s" % ("psql.table.discovery", data))
def discover_all(config, cur):
diff --git a/pgzabbix/cmd.py b/pgzabbix/cmd.py
index ace807a..6ec1462 100644
--- a/pgzabbix/cmd.py
+++ b/pgzabbix/cmd.py
@@ -7,10 +7,17 @@
except ImportError:
import configparser
-
def parseConfig(inifile):
- config = configparser.SafeConfigParser()
- config.readfp(inifile)
+ try:
+ config = configparser.SafeConfigParser()
+ except AttributeError:
+ config = configparser.ConfigParser()
+
+ try:
+ config.read_file(inifile)
+ except AttributeError:
+ config.readfp(inifile)
+
if not config.sections():
print("No sections in %s. Exiting" % inifile)
exit(1)
@@ -20,7 +27,6 @@ def parseConfig(inifile):
opt[item] = config.get("postgres", item)
return opt
-
def get_connection(config):
conn_string = "host={host} dbname={dbname} user={user} password={password}"
conn = psycopg2.connect(conn_string.format(**config))
diff --git a/pgzabbix/database.py b/pgzabbix/database.py
index 0620414..4922f6e 100644
--- a/pgzabbix/database.py
+++ b/pgzabbix/database.py
@@ -15,8 +15,7 @@ def psql_db_size(cur):
" has_database_privilege(datname, 'CONNECT')")
cur.execute(query)
for row in cur.fetchall():
- yield ("psql.db_size[{0}]".format(row[0]), row[1])
-
+ yield ("psql.db_size[%s]" % (row[0]), row[1])
def psql_db_garbage_ratio(cur):
return ()
@@ -31,32 +30,28 @@ def confl_tablespace(cur):
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.confl_tablespace[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.confl_tablespace[%s]' % (row[0]), row[1])
def confl_lock(cur):
cur.execute("select datname, confl_lock from pg_stat_database_conflicts "
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.confl_lock[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.confl_lock[%s]' % (row[0]), row[1])
def confl_snapshot(cur):
cur.execute("select datname, confl_snapshot from pg_stat_database_conflicts"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.confl_snapshot[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.confl_snapshot[%s]' % (row[0]), row[1])
def confl_bufferpin(cur):
cur.execute("select datname, confl_bufferpin from pg_stat_database_conflicts"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.confl_bufferpin[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.confl_bufferpin[%s]' % (row[0]), row[1])
def confl_deadlock(cur):
cur.execute("select datname, confl_deadlock from pg_stat_database_conflicts"
@@ -64,16 +59,14 @@ def confl_deadlock(cur):
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.confl_deadlock[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.confl_deadlock[%s]' % (row[0]), row[1])
def db_tx_commited(cur):
cur.execute("select datname, xact_commit from pg_stat_database"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_tx_commited[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_tx_commited[%s]' % (row[0]), row[1])
def db_deadlocks(cur):
vers = cur.connection.server_version
@@ -85,16 +78,14 @@ def db_deadlocks(cur):
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_deadlocks[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_deadlocks[%s]' % (row[0]), row[1])
def db_tx_rolledback(cur):
cur.execute("select datname, xact_rollback from pg_stat_database"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_tx_rolledback[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_tx_rolledback[%s]' % (row[0]), row[1])
def db_temp_bytes(cur):
vers = cur.connection.server_version
@@ -105,56 +96,49 @@ def db_temp_bytes(cur):
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_temp_bytes[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_temp_bytes[%s]' %(row[0]), row[1])
def db_deleted(cur):
cur.execute("select datname, tup_deleted from pg_stat_database"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_deleted[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_deleted[%s]' % (row[0]), row[1])
def db_fetched(cur):
cur.execute("select datname, tup_fetched from pg_stat_database"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_fetched[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_fetched[%s]' % (row[0]), row[1])
def db_inserted(cur):
cur.execute("select datname, tup_inserted from pg_stat_database"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_inserted[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_inserted[%s]' % (row[0]), row[1])
def db_returned(cur):
cur.execute("select datname, tup_returned from pg_stat_database"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_returned[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_returned[%s]' % (row[0]), row[1])
def db_updated(cur):
cur.execute("select datname, tup_updated from pg_stat_database"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_updated[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_updated[%s]' % (row[0]), row[1])
def db_connections(cur):
cur.execute("select datname, numbackends from pg_stat_database"
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.db_connections[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.db_connections[%s]' % (row[0]), row[1])
def db_cachehit_ratio(cur):
cur.execute("select datname, round(blks_hit * 100.0 / (blks_hit + greatest(blks_read, 1)), 2)"
@@ -162,4 +146,4 @@ def db_cachehit_ratio(cur):
" inner join pg_database using (datname)"
" where pg_database.datistemplate=False;")
for row in cur.fetchall():
- yield ('psql.cachehit_ratio[{0}]'.format(row[0]), row[1])
+ yield ('psql.cachehit_ratio[%s]' % (row[0]), row[1])
diff --git a/pgzabbix/replication.py b/pgzabbix/replication.py
index b8b4a06..5fb1c46 100644
--- a/pgzabbix/replication.py
+++ b/pgzabbix/replication.py
@@ -24,8 +24,7 @@ def write_diff(cur):
cur.execute(query.format(table=view_select(cur)))
for row in cur.fetchall():
- yield ('psql.write_diff[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.write_diff[%s]' % (row[0]), row[1])
def replay_diff(cur):
vers = cur.connection.server_version
@@ -43,8 +42,7 @@ def replay_diff(cur):
cur.execute(query.format(table=view_select(cur)))
for row in cur.fetchall():
- yield ('psql.replay_diff[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.replay_diff[%s]' % (row[0]), row[1])
def sync_priority(cur):
query = ("SELECT host(client_addr), "
@@ -53,8 +51,7 @@ def sync_priority(cur):
cur.execute(query.format(table=view_select(cur)))
for row in cur.fetchall():
- yield ('psql.sync_priority[{0}]'.format(row[0]), row[1])
-
+ yield ('psql.sync_priority[%s]' % (row[0]), row[1])
def sr_discovery(cur):
query = ("SELECT client_addr, state from {table};")
diff --git a/pgzabbix/table.py b/pgzabbix/table.py
index 7803a40..82088b2 100644
--- a/pgzabbix/table.py
+++ b/pgzabbix/table.py
@@ -4,7 +4,7 @@ def psql_table_analyze_count(cur):
out = "psql.table_analyze_count[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_analyze_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_autoanalyze_count(cur):
@@ -12,7 +12,7 @@ def psql_table_autoanalyze_count(cur):
out = "psql.table_autoanalyze_count[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_analyze_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_autovacuum_count(cur):
@@ -20,7 +20,7 @@ def psql_table_autovacuum_count(cur):
out = "psql.table_autovacuum_count[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_autovacum_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_n_dead_tup(cur):
@@ -28,7 +28,7 @@ def psql_table_n_dead_tup(cur):
out = "psql.table_n_dead_tup[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_n_dead_tup[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_n_tup_del(cur):
@@ -36,7 +36,7 @@ def psql_table_n_tup_del(cur):
out = "psql.table_n_tup_del[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_n_tup_del[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_n_tup_hot_upd(cur):
@@ -44,7 +44,7 @@ def psql_table_n_tup_hot_upd(cur):
out = "psql.table_n_tup_hot_upd[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_n_tup_hot_upd[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_idx_scan(cur):
@@ -52,7 +52,7 @@ def psql_table_idx_scan(cur):
out = "psql.table_idx_scan[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_idx_scan[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_seq_tup_read(cur):
@@ -60,7 +60,7 @@ def psql_table_seq_tup_read(cur):
out = "psql.table_seq_tup_read[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_seq_tup_read[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_idx_tup_fetch(cur):
@@ -68,7 +68,7 @@ def psql_table_idx_tup_fetch(cur):
out = "psql.table_idx_tup_fetch[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_idx_tup_fetch[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_idx_tup_ins(cur):
@@ -76,7 +76,7 @@ def psql_table_idx_tup_ins(cur):
out = "psql.table_n_tup_ins[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_n_tup_ins[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_n_live_tup(cur):
@@ -84,7 +84,7 @@ def psql_table_n_live_tup(cur):
out = "psql.table_n_live_tup[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_n_live_tup[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_seq_scan(cur):
@@ -92,7 +92,7 @@ def psql_table_seq_scan(cur):
out = "psql.table_seq_scan[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_seq_scan[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_n_tup_upd(cur):
@@ -100,7 +100,7 @@ def psql_table_n_tup_upd(cur):
out = "psql.table_n_tup_upd[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_n_tup_upd[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_vacuum_count(cur):
@@ -108,7 +108,7 @@ def psql_table_vacuum_count(cur):
out = "psql.table_vacuum_count[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_vacuum_count[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_total_size(cur):
@@ -116,7 +116,7 @@ def psql_table_total_size(cur):
out = "psql.table_total_size[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_total_size[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_heap_cachehit_ratio(cur):
@@ -128,7 +128,7 @@ def psql_table_heap_cachehit_ratio(cur):
for row in cur.fetchall():
if row[3] is None:
continue
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_heap_cachehit_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_idx_cachehit_ratio(cur):
@@ -140,7 +140,7 @@ def psql_table_idx_cachehit_ratio(cur):
for row in cur.fetchall():
if row[3] is None:
continue
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_idx_cachehit_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]
def psql_table_garbage_ratio(cur):
@@ -150,4 +150,4 @@ def psql_table_garbage_ratio(cur):
out = "psql.table_garbage_ratio[{0},{1},{2}]"
cur.execute(query)
for row in cur.fetchall():
- yield out.format(*row[:3]), row[3]
+ yield ('psql.table_garbage_ratio[%s,%s,%s]') % (row[0],row[1],row[2]),row[3]