Move batchquery, listtables tools under pgtool.
--- a/batchquery.py Wed Jul 09 18:03:54 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,69 +0,0 @@
-#!/usr/bin/env python3
-
-from pydbkit import toolbase
-
-
-class BatchQueryTool(toolbase.SimpleTool):
- def __init__(self):
- toolbase.SimpleTool.__init__(self, name='batchquery', desc='Run a query using columns from CSV file as arguments.')
- self.parser.add_argument('--query', dest='query', type=str, help='Query to run. Use %%s for arguments, or %%(name)s for named arguments (see --header).')
- self.parser.add_argument('--file', dest='file', type=str, help='CSV file with data to use as arguments.')
- self.parser.add_argument('--init', dest='init', type=str, help='Query which initialize database session (eg. temporary function).')
- self.parser.add_argument('--output', dest='output', type=str, help='File name for results.')
- self.parser.add_argument('--outputfunc', dest='outputfunc', type=str, help='Python function which will format results (format_row(args, rows)).')
- self.parser.add_argument('--header', dest='header', action='store_true', help='First line of CSV is header with names for columns. These name can be used in query.')
-
- def _split_line(self, line):
- return [x.strip() for x in line.split(',')]
-
- def main(self):
- results = []
- # load query from file
- with open(self.args.query, 'r', encoding='utf8') as f:
- query = f.read()
- # connect DB
- with self.pgm.cursor('target') as curs:
- # run init query
- if self.args.init:
- with open(self.args.init, 'r', encoding='utf8') as f:
- curs.execute(f.read(), [])
- # read CSV file
- with open(self.args.file, 'r', encoding='utf8') as f:
- # read header
- names = None
- if self.args.header:
- line = f.readline()
- names = self._split_line(line)
- # read and process lines
- for line in f:
- args = self._split_line(line)
- if names:
- args = dict(zip(names, args))
- curs.execute(query, args)
- rows = curs.fetchall()
- results.append((args, rows))
- curs.connection.commit()
- # write results to output file
- if self.args.output:
- format_row = None
- if self.args.outputfunc:
- with open(self.args.outputfunc, 'r', encoding='utf8') as f:
- d = dict()
- exec(f.read(), d)
- format_row = d['format_row']
-
- with open(self.args.output, 'w', encoding='utf8') as f:
- for args, rows in results:
- if format_row:
- f.write(format_row(args, rows))
- else:
- f.write(repr(args))
- f.write(' -> ')
- f.write(repr(rows))
- f.write('\n')
-
-
-tool = BatchQueryTool()
-tool.setup()
-tool.main()
-
--- a/listtables.py Wed Jul 09 18:03:54 2014 +0200
+++ /dev/null Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-#!/usr/bin/env python3
-
-from pydbkit import pgbrowser, toolbase
-
-
-class ListTablesTool(toolbase.SimpleTool):
- def __init__(self):
- toolbase.SimpleTool.__init__(self, name='listtables', desc='List tables in database.')
- self.parser.add_argument('-o', dest='options', type=str, nargs='*', help='Filter by options (eg. -o autovacuum_enabled=false).')
-
- def main(self):
- browser = pgbrowser.PgBrowser(self.pgm.get_conn('target'))
-
- # scan all tables from all shemas, remember names and sizes
- tables = []
- for schema in browser.schemas.values():
- for table in schema.tables.values():
- for option in self.args.options:
- if option in table.options:
- tables.append(table)
-
- # print result
- if len(tables):
- print('Found %d tables:' % len(tables))
- else:
- print('No table meets the conditions.')
- for table in tables:
- table_name = '%s.%s' % (table.schema.name, table.name)
- print(' ', table_name)
-
-
-tool = ListTablesTool()
-tool.setup()
-tool.main()
-
--- a/pydbkit/tools/__init__.py Wed Jul 09 18:03:54 2014 +0200
+++ b/pydbkit/tools/__init__.py Wed Jul 09 18:04:11 2014 +0200
@@ -1,3 +1,3 @@
-__all__ = ['analyzeall', 'batchcopy', 'bigtables', 'listdepends',
- 'listserial', 'longqueries', 'loopquery',
+__all__ = ['analyzeall', 'batchcopy', 'batchquery', 'bigtables', 'listdepends',
+ 'listserial', 'listtables', 'longqueries', 'loopquery',
'runquery', 'schemadiff', 'tablediff', 'tablesync']
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/pydbkit/tools/batchquery.py Wed Jul 09 18:04:11 2014 +0200
@@ -0,0 +1,73 @@
+from pydbkit.toolbase import SimpleTool
+
+
+class BatchQueryTool(SimpleTool):
+
+ """
+ Run a query using columns from CSV file as arguments.
+ """
+
+ def __init__(self):
+ SimpleTool.__init__(self, name='batchquery')
+
+ def specify_args(self):
+ SimpleTool.specify_args(self)
+ self.parser.add_argument('--query', dest='query', type=str, help='Query to run. Use %%s for arguments, or %%(name)s for named arguments (see --header).')
+ self.parser.add_argument('--file', dest='file', type=str, help='CSV file with data to use as arguments.')
+ self.parser.add_argument('--init', dest='init', type=str, help='Query which initialize database session (eg. temporary function).')
+ self.parser.add_argument('--output', dest='output', type=str, help='File name for results.')
+ self.parser.add_argument('--outputfunc', dest='outputfunc', type=str, help='Python function which will format results (format_row(args, rows)).')
+ self.parser.add_argument('--header', dest='header', action='store_true', help='First line of CSV is header with names for columns. These name can be used in query.')
+
+ def _split_line(self, line):
+ return [x.strip() for x in line.split(',')]
+
+ def main(self):
+ results = []
+ # load query from file
+ with open(self.args.query, 'r', encoding='utf8') as f:
+ query = f.read()
+ # connect DB
+ with self.pgm.cursor('target') as curs:
+ # run init query
+ if self.args.init:
+ with open(self.args.init, 'r', encoding='utf8') as f:
+ curs.execute(f.read(), [])
+ # read CSV file
+ with open(self.args.file, 'r', encoding='utf8') as f:
+ # read header
+ names = None
+ if self.args.header:
+ line = f.readline()
+ names = self._split_line(line)
+ # read and process lines
+ for line in f:
+ args = self._split_line(line)
+ if names:
+ args = dict(zip(names, args))
+ curs.execute(query, args)
+ rows = curs.fetchall()
+ results.append((args, rows))
+ curs.connection.commit()
+ # write results to output file
+ if self.args.output:
+ format_row = None
+ if self.args.outputfunc:
+ with open(self.args.outputfunc, 'r', encoding='utf8') as f:
+ d = dict()
+ exec(f.read(), d)
+ format_row = d['format_row']
+
+ with open(self.args.output, 'w', encoding='utf8') as f:
+ for args, rows in results:
+ if format_row:
+ f.write(format_row(args, rows))
+ else:
+ f.write(repr(args))
+ f.write(' -> ')
+ f.write(repr(rows))
+ f.write('\n')
+
+
+cls = BatchQueryTool
+
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/pydbkit/tools/listtables.py Wed Jul 09 18:04:11 2014 +0200
@@ -0,0 +1,44 @@
+from pydbkit.toolbase import SimpleTool
+from pydbkit import pgbrowser
+
+
+class ListTablesTool(SimpleTool):
+
+ """
+ List tables in database.
+
+ Allows filtering by combination of conditions.
+ (Currently only by options.)
+
+ """
+
+ def __init__(self):
+ SimpleTool.__init__(self, name='listtables')
+
+ def specify_args(self):
+ SimpleTool.specify_args(self)
+ self.parser.add_argument('-o', dest='options', type=str, nargs='*', help='Filter by options (eg. -o autovacuum_enabled=false).')
+
+ def main(self):
+ browser = pgbrowser.PgBrowser(self.pgm.get_conn('target'))
+
+ # scan all tables from all shemas, remember names and sizes
+ tables = []
+ for schema in browser.schemas.values():
+ for table in schema.tables.values():
+ for option in self.args.options:
+ if option in table.options:
+ tables.append(table)
+
+ # print result
+ if len(tables):
+ print('Found %d tables:' % len(tables))
+ else:
+ print('No table meets the conditions.')
+ for table in tables:
+ table_name = '%s.%s' % (table.schema.name, table.name)
+ print(' ', table_name)
+
+
+cls = ListTablesTool
+
--- a/pydbkit/tools/runquery.py Wed Jul 09 18:03:54 2014 +0200
+++ b/pydbkit/tools/runquery.py Wed Jul 09 18:04:11 2014 +0200
@@ -1,4 +1,4 @@
-from pydbkit.toolbase import SimpleTool
+from pydbkit.toolbase import ToolBase
import logging.handlers
import time