PgManager: log connection name with queries. BatchUpdateTool: handle some possible exceptions and try reconnect to database.
# -*- coding: utf-8 -*-
#
# PgManager - manage database connections
#
# Requires: Python 3.2, psycopg2
#
# Part of pgtoolkit
# http://hg.devl.cz/pgtoolkit
#
# Copyright (c) 2010, 2011 Radek Brich <radek.brich@devl.cz>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Postgres database connection manager
PgManager wraps psycopg2 connect function, adding following features:
* Manage database connection parameters - link connection parameters
to an unique identifier, retrieve connection object by this identifier
* Connection pooling - connections with same identifier are pooled and reused
* Easy query using the with statement - retrieve cursor directly by connection
identifier, don't worry about connections
* Dict rows - cursor has additional methods like fetchall_dict(), which
returns dict row instead of ordinary list-like row
Example:
import pgmanager
pgm = pgmanager.get_instance()
pgm.create_conn(hostaddr='127.0.0.1', dbname='postgres')
with pgm.cursor() as curs:
curs.execute('SELECT now() AS now')
row = curs.fetchone_dict()
print row.now
First, we have obtained PgManager instance. This is like calling
PgManager(), although in our example the instance is global. That means
getting the instance in another module brings us all the defined connections
etc.
On second line we have created connection named 'default' (this name can be left out).
The with statement obtains connection (actually connects to database when needed),
then returns cursor for this connection. At the end of with statement,
the connection is returned to the pool or closed (depending on number of connections
in pool and on setting of keep_open parameter).
The row returned by fetchone_dict() is special dict object, which can be accessed
using item or attribute access, that is row['now'] or row.now.
"""
from contextlib import contextmanager
from collections import OrderedDict
import logging
import threading
import multiprocessing
import select
import socket
import psycopg2
import psycopg2.extensions
from psycopg2 import DatabaseError, IntegrityError, OperationalError
log_sql = logging.getLogger("pgmanager_sql")
log_notices = logging.getLogger("pgmanager_notices")
class PgManagerError(Exception):
pass
class ConnectionInfo:
def __init__(self, dsn, isolation_level=None, keep_alive=True,
init_statement=None, keep_open=1):
self.dsn = dsn
self.isolation_level = isolation_level
self.keep_alive = keep_alive
self.init_statement = init_statement
self.keep_open = keep_open
class RowDict(OrderedDict):
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(key)
class Cursor(psycopg2.extensions.cursor):
def execute(self, query, args=None):
try:
return super(Cursor, self).execute(query, args)
finally:
self._log_query()
def callproc(self, procname, args=None):
try:
return super(Cursor, self).callproc(procname, args)
finally:
self._log_query()
def row_dict(self, row, lstrip=None):
adjustname = lambda a: a
if lstrip:
adjustname = lambda a: a.lstrip(lstrip)
return RowDict(zip([adjustname(desc[0]) for desc in self.description], row))
def fetchone_dict(self, lstrip=None):
'''Return one row as OrderedDict'''
row = super(Cursor, self).fetchone()
if row is None:
return None
return self.row_dict(row, lstrip)
def fetchall_dict(self, lstrip=None):
'''Return all rows as OrderedDict'''
rows = super(Cursor, self).fetchall()
return [self.row_dict(row, lstrip) for row in rows]
def fetchone_adapted(self, lstrip=None):
'''Like fetchone_dict() but values are quoted for direct inclusion in SQL query.
This is useful when you need to generate SQL script from data returned
by the query. Use mogrify() for simple cases.
'''
row = super(Cursor, self).fetchone()
if row is None:
return None
return self.row_dict([self.mogrify('%s', [x]).decode('utf8') for x in row], lstrip)
def fetchall_adapted(self, lstrip=None):
'''Like fetchall_dict() but values are quoted for direct inclusion in SQL query.'''
rows = super(Cursor, self).fetchall()
return [self.row_dict([self.mogrify('%s', [x]).decode('utf8') for x in row], lstrip) for row in rows]
def _log_query(self):
if self.query:
name = self.connection.name if hasattr(self.connection, 'name') else '-'
log_sql.info('[%s] %s' % (name, self.query.decode('utf8')))
class Connection(psycopg2.extensions.connection):
def cursor(self, name=None):
if name is None:
return super(Connection, self).cursor(cursor_factory=Cursor)
else:
return super(Connection, self).cursor(name, cursor_factory=Cursor)
def keep_alive(self):
'''Set socket to keepalive mode. Must be called before any query.'''
sock = socket.fromfd(self.fileno(), socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
try:
# Maximum keep-alive probes before asuming the connection is lost
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)
# Interval (in seconds) between keep-alive probes
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 2)
# Maximum idle time (in seconds) before start sending keep-alive probes
sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 10)
except socket.error:
pass
class PgManager:
def __init__(self):
self.conn_known = {} # available connections
self.conn_pool = {} # active connetions
self.lock = threading.Lock() # mutual exclusion for threads
self.pid = multiprocessing.current_process().pid # forking check
def __del__(self):
for conn in tuple(self.conn_known.keys()):
self.destroy_conn(conn)
def create_conn(self, name='default', keep_open=1, isolation_level=None, keep_alive=True, dsn=None, **kw):
'''Create named connection.
name -- name for connection (default is "default")
keep_open -- how many connections will be kept open in pool (more connections will still be created,
but they will be closed by put_conn)
isolation_level -- "autocommit", "read_committed", "serializable" or None for driver default
keep_alive -- set socket to keepalive mode
dsn -- string with connection parameters (dsn means Data Source Name)
Alternative for dsn is keyword args (same names as in dsn).
'''
if name in self.conn_known:
raise PgManagerError('Connection name "%s" already registered.' % name)
if dsn is None:
dsn = ' '.join([x[0]+'='+str(x[1]) for x in kw.items() if x[1] is not None])
isolation_level = self._normalize_isolation_level(isolation_level)
ci = ConnectionInfo(dsn, isolation_level, keep_alive, keep_open=keep_open)
self.conn_known[name] = ci
self.conn_pool[name] = []
def close_conn(self, name='default'):
'''Close all connections of given name.
Connection credentials are still saved.
'''
while len(self.conn_pool[name]):
conn = self.conn_pool[name].pop()
conn.close()
def destroy_conn(self, name='default'):
'''Destroy connection.
Counterpart of create_conn.
'''
if not name in self.conn_known:
raise PgManagerError('Connection name "%s" not registered.' % name)
self.close_conn(name)
del self.conn_known[name]
del self.conn_pool[name]
def get_conn(self, name='default'):
'''Get connection of name 'name' from pool.'''
self._check_fork()
self.lock.acquire()
try:
if not name in self.conn_known:
raise PgManagerError("Connection name '%s' not registered." % name)
conn = None
while len(self.conn_pool[name]) and conn is None:
conn = self.conn_pool[name].pop()
if conn.closed:
conn = None
if conn is None:
ci = self.conn_known[name]
conn = self._connect(ci)
# add our name to connection instance (this is then logged with SQL queries)
conn.name = name
finally:
self.lock.release()
return conn
def put_conn(self, conn, name='default'):
'''Put connection back to pool.
Name must be same as used for get_conn,
otherwise things become broken.
'''
self.lock.acquire()
try:
if not name in self.conn_known:
raise PgManagerError("Connection name '%s' not registered." % name)
if len(self.conn_pool[name]) >= self.conn_known[name].keep_open:
conn.close()
return
if conn.get_transaction_status() == psycopg2.extensions.TRANSACTION_STATUS_UNKNOWN:
conn.close()
return
# connection returned to the pool must not be in transaction
if conn.get_transaction_status() != psycopg2.extensions.TRANSACTION_STATUS_IDLE:
try:
conn.rollback()
except OperationalError:
if not conn.closed:
conn.close()
return
self.conn_pool[name].append(conn)
finally:
self.lock.release()
@contextmanager
def cursor(self, name='default'):
'''Cursor context.
Uses any connection of name 'name' from pool
and returns cursor for that connection.
'''
conn = self.get_conn(name)
try:
curs = conn.cursor()
yield curs
finally:
curs.close()
self.log_notices(conn)
self.put_conn(conn, name)
def log_notices(self, conn):
for notice in conn.notices:
log_notices.info(notice.rstrip())
conn.notices[:] = []
def wait_for_notify(self, name='default', timeout=5):
'''Wait for asynchronous notifies, return the last one.
Returns None on timeout.
'''
conn = self.get_conn(name)
try:
# any residual notify?
# then return it, that should not break anything
if conn.notifies:
return conn.notifies.pop()
if select.select([conn], [], [], timeout) == ([], [], []):
# timeout
return None
else:
conn.poll()
# return just the last notify (we do not care for older ones)
if conn.notifies:
return conn.notifies.pop()
return None
finally:
# clean notifies
while conn.notifies:
conn.notifies.pop()
self.put_conn(conn, name)
def _connect(self, ci):
conn = psycopg2.connect(ci.dsn, connection_factory=Connection)
if ci.keep_alive:
conn.keep_alive()
if not ci.isolation_level is None:
conn.set_isolation_level(ci.isolation_level)
if ci.init_statement:
curs = conn.cursor()
curs.execute(ci.init_statement)
curs.close()
return conn
def _normalize_isolation_level(self, level):
if type(level) == str:
if level.lower() == 'autocommit':
return psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
if level.lower() == 'read_committed':
return psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
if level.lower() == 'serializable':
return psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE
raise PgManagerError('Unknown isolation level name: "%s"', level)
return level
def _check_fork(self):
'''Check if process was forked (PID has changed).
If it was, clean parent's connections.
New connections are created for children.
Known connection credentials are inherited, but not shared.
'''
if self.pid == multiprocessing.current_process().pid:
# PID has not changed
return
# update saved PID
self.pid = multiprocessing.current_process().pid
# reinitialize lock
self.lock = threading.Lock()
# clean parent's connections
for name in self.conn_pool:
self.conn_pool[name] = []
@classmethod
def get_instance(cls):
if not hasattr(cls, '_instance'):
cls._instance = cls()
return cls._instance
def get_instance():
return PgManager.get_instance()